1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
23
24
25 /*
26 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright 2015, Joyent, Inc. All rights reserved.
28 */
29
30 #include <sys/types.h>
31 #include <sys/sysmacros.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/signal.h>
35 #include <sys/stat.h>
36 #include <sys/proc.h>
37 #include <sys/cred.h>
38 #include <sys/user.h>
39 #include <sys/vnode.h>
40 #include <sys/file.h>
41 #include <sys/stream.h>
42 #include <sys/strsubr.h>
43 #include <sys/stropts.h>
44 #include <sys/tihdr.h>
45 #include <sys/var.h>
46 #include <sys/poll.h>
47 #include <sys/termio.h>
48 #include <sys/ttold.h>
49 #include <sys/systm.h>
50 #include <sys/uio.h>
51 #include <sys/cmn_err.h>
52 #include <sys/sad.h>
53 #include <sys/netstack.h>
54 #include <sys/priocntl.h>
55 #include <sys/jioctl.h>
56 #include <sys/procset.h>
57 #include <sys/session.h>
58 #include <sys/kmem.h>
59 #include <sys/filio.h>
60 #include <sys/vtrace.h>
61 #include <sys/debug.h>
62 #include <sys/strredir.h>
63 #include <sys/fs/fifonode.h>
64 #include <sys/fs/snode.h>
65 #include <sys/strlog.h>
66 #include <sys/strsun.h>
67 #include <sys/project.h>
68 #include <sys/kbio.h>
69 #include <sys/msio.h>
70 #include <sys/tty.h>
71 #include <sys/ptyvar.h>
72 #include <sys/vuid_event.h>
73 #include <sys/modctl.h>
74 #include <sys/sunddi.h>
75 #include <sys/sunldi_impl.h>
76 #include <sys/autoconf.h>
77 #include <sys/policy.h>
78 #include <sys/dld.h>
79 #include <sys/zone.h>
80 #include <sys/limits.h>
81 #include <c2/audit.h>
82
83 /*
84 * This define helps improve the readability of streams code while
85 * still maintaining a very old streams performance enhancement. The
86 * performance enhancement basically involved having all callers
87 * of straccess() perform the first check that straccess() will do
88 * locally before actually calling straccess(). (There by reducing
89 * the number of unnecessary calls to straccess().)
90 */
91 #define i_straccess(x, y) ((stp->sd_sidp == NULL) ? 0 : \
92 (stp->sd_vnode->v_type == VFIFO) ? 0 : \
93 straccess((x), (y)))
94
95 /*
96 * what is mblk_pull_len?
97 *
98 * If a streams message consists of many short messages,
99 * a performance degradation occurs from copyout overhead.
100 * To decrease the per mblk overhead, messages that are
101 * likely to consist of many small mblks are pulled up into
102 * one continuous chunk of memory.
103 *
104 * To avoid the processing overhead of examining every
105 * mblk, a quick heuristic is used. If the first mblk in
106 * the message is shorter than mblk_pull_len, it is likely
107 * that the rest of the mblk will be short.
108 *
109 * This heuristic was decided upon after performance tests
110 * indicated that anything more complex slowed down the main
111 * code path.
112 */
113 #define MBLK_PULL_LEN 64
114 uint32_t mblk_pull_len = MBLK_PULL_LEN;
115
116 /*
117 * The sgttyb_handling flag controls the handling of the old BSD
118 * TIOCGETP, TIOCSETP, and TIOCSETN ioctls as follows:
119 *
120 * 0 - Emit no warnings at all and retain old, broken behavior.
121 * 1 - Emit no warnings and silently handle new semantics.
122 * 2 - Send cmn_err(CE_NOTE) when either TIOCSETP or TIOCSETN is used
123 * (once per system invocation). Handle with new semantics.
124 * 3 - Send SIGSYS when any TIOCGETP, TIOCSETP, or TIOCSETN call is
125 * made (so that offenders drop core and are easy to debug).
126 *
127 * The "new semantics" are that TIOCGETP returns B38400 for
128 * sg_[io]speed if the corresponding value is over B38400, and that
129 * TIOCSET[PN] accept B38400 in these cases to mean "retain current
130 * bit rate."
131 */
132 int sgttyb_handling = 1;
133 static boolean_t sgttyb_complaint;
134
135 /* don't push drcompat module by default on Style-2 streams */
136 static int push_drcompat = 0;
137
138 /*
139 * id value used to distinguish between different ioctl messages
140 */
141 static uint32_t ioc_id;
142
143 static void putback(struct stdata *, queue_t *, mblk_t *, int);
144 static void strcleanall(struct vnode *);
145 static int strwsrv(queue_t *);
146 static int strdocmd(struct stdata *, struct strcmd *, cred_t *);
147
148 /*
149 * qinit and module_info structures for stream head read and write queues
150 */
151 struct module_info strm_info = { 0, "strrhead", 0, INFPSZ, STRHIGH, STRLOW };
152 struct module_info stwm_info = { 0, "strwhead", 0, 0, 0, 0 };
153 struct qinit strdata = { strrput, NULL, NULL, NULL, NULL, &strm_info };
154 struct qinit stwdata = { NULL, strwsrv, NULL, NULL, NULL, &stwm_info };
155 struct module_info fiform_info = { 0, "fifostrrhead", 0, PIPE_BUF, FIFOHIWAT,
156 FIFOLOWAT };
157 struct module_info fifowm_info = { 0, "fifostrwhead", 0, 0, 0, 0 };
158 struct qinit fifo_strdata = { strrput, NULL, NULL, NULL, NULL, &fiform_info };
159 struct qinit fifo_stwdata = { NULL, strwsrv, NULL, NULL, NULL, &fifowm_info };
160
161 extern kmutex_t strresources; /* protects global resources */
162 extern kmutex_t muxifier; /* single-threads multiplexor creation */
163
164 static boolean_t msghasdata(mblk_t *bp);
165 #define msgnodata(bp) (!msghasdata(bp))
166
167 /*
168 * Stream head locking notes:
169 * There are four monitors associated with the stream head:
170 * 1. v_stream monitor: in stropen() and strclose() v_lock
171 * is held while the association of vnode and stream
172 * head is established or tested for.
173 * 2. open/close/push/pop monitor: sd_lock is held while each
174 * thread bids for exclusive access to this monitor
175 * for opening or closing a stream. In addition, this
176 * monitor is entered during pushes and pops. This
177 * guarantees that during plumbing operations there
178 * is only one thread trying to change the plumbing.
179 * Any other threads present in the stream are only
180 * using the plumbing.
181 * 3. read/write monitor: in the case of read, a thread holds
182 * sd_lock while trying to get data from the stream
183 * head queue. if there is none to fulfill a read
184 * request, it sets RSLEEP and calls cv_wait_sig() down
185 * in strwaitq() to await the arrival of new data.
186 * when new data arrives in strrput(), sd_lock is acquired
187 * before testing for RSLEEP and calling cv_broadcast().
188 * the behavior of strwrite(), strwsrv(), and WSLEEP
189 * mirror this.
190 * 4. ioctl monitor: sd_lock is gotten to ensure that only one
191 * thread is doing an ioctl at a time.
192 */
193
194 static int
195 push_mod(queue_t *qp, dev_t *devp, struct stdata *stp, const char *name,
196 int anchor, cred_t *crp, uint_t anchor_zoneid)
197 {
198 int error;
199 fmodsw_impl_t *fp;
200
201 if (stp->sd_flag & (STRHUP|STRDERR|STWRERR)) {
202 error = (stp->sd_flag & STRHUP) ? ENXIO : EIO;
203 return (error);
204 }
205 if (stp->sd_pushcnt >= nstrpush) {
206 return (EINVAL);
207 }
208
209 if ((fp = fmodsw_find(name, FMODSW_HOLD | FMODSW_LOAD)) == NULL) {
210 stp->sd_flag |= STREOPENFAIL;
211 return (EINVAL);
212 }
213
214 /*
215 * push new module and call its open routine via qattach
216 */
217 if ((error = qattach(qp, devp, 0, crp, fp, B_FALSE)) != 0)
218 return (error);
219
220 /*
221 * Check to see if caller wants a STREAMS anchor
222 * put at this place in the stream, and add if so.
223 */
224 mutex_enter(&stp->sd_lock);
225 if (anchor == stp->sd_pushcnt) {
226 stp->sd_anchor = stp->sd_pushcnt;
227 stp->sd_anchorzone = anchor_zoneid;
228 }
229 mutex_exit(&stp->sd_lock);
230
231 return (0);
232 }
233
234 /*
235 * Open a stream device.
236 */
237 int
238 stropen(vnode_t *vp, dev_t *devp, int flag, cred_t *crp)
239 {
240 struct stdata *stp;
241 queue_t *qp;
242 int s;
243 dev_t dummydev, savedev;
244 struct autopush *ap;
245 struct dlautopush dlap;
246 int error = 0;
247 ssize_t rmin, rmax;
248 int cloneopen;
249 queue_t *brq;
250 major_t major;
251 str_stack_t *ss;
252 zoneid_t zoneid;
253 uint_t anchor;
254
255 /*
256 * If the stream already exists, wait for any open in progress
257 * to complete, then call the open function of each module and
258 * driver in the stream. Otherwise create the stream.
259 */
260 TRACE_1(TR_FAC_STREAMS_FR, TR_STROPEN, "stropen:%p", vp);
261 retry:
262 mutex_enter(&vp->v_lock);
263 if ((stp = vp->v_stream) != NULL) {
264
265 /*
266 * Waiting for stream to be created to device
267 * due to another open.
268 */
269 mutex_exit(&vp->v_lock);
270
271 if (STRMATED(stp)) {
272 struct stdata *strmatep = stp->sd_mate;
273
274 STRLOCKMATES(stp);
275 if (strmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
276 if (flag & (FNDELAY|FNONBLOCK)) {
277 error = EAGAIN;
278 mutex_exit(&strmatep->sd_lock);
279 goto ckreturn;
280 }
281 mutex_exit(&stp->sd_lock);
282 if (!cv_wait_sig(&strmatep->sd_monitor,
283 &strmatep->sd_lock)) {
284 error = EINTR;
285 mutex_exit(&strmatep->sd_lock);
286 mutex_enter(&stp->sd_lock);
287 goto ckreturn;
288 }
289 mutex_exit(&strmatep->sd_lock);
290 goto retry;
291 }
292 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
293 if (flag & (FNDELAY|FNONBLOCK)) {
294 error = EAGAIN;
295 mutex_exit(&strmatep->sd_lock);
296 goto ckreturn;
297 }
298 mutex_exit(&strmatep->sd_lock);
299 if (!cv_wait_sig(&stp->sd_monitor,
300 &stp->sd_lock)) {
301 error = EINTR;
302 goto ckreturn;
303 }
304 mutex_exit(&stp->sd_lock);
305 goto retry;
306 }
307
308 if (stp->sd_flag & (STRDERR|STWRERR)) {
309 error = EIO;
310 mutex_exit(&strmatep->sd_lock);
311 goto ckreturn;
312 }
313
314 stp->sd_flag |= STWOPEN;
315 STRUNLOCKMATES(stp);
316 } else {
317 mutex_enter(&stp->sd_lock);
318 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
319 if (flag & (FNDELAY|FNONBLOCK)) {
320 error = EAGAIN;
321 goto ckreturn;
322 }
323 if (!cv_wait_sig(&stp->sd_monitor,
324 &stp->sd_lock)) {
325 error = EINTR;
326 goto ckreturn;
327 }
328 mutex_exit(&stp->sd_lock);
329 goto retry; /* could be clone! */
330 }
331
332 if (stp->sd_flag & (STRDERR|STWRERR)) {
333 error = EIO;
334 goto ckreturn;
335 }
336
337 stp->sd_flag |= STWOPEN;
338 mutex_exit(&stp->sd_lock);
339 }
340
341 /*
342 * Open all modules and devices down stream to notify
343 * that another user is streaming. For modules, set the
344 * last argument to MODOPEN and do not pass any open flags.
345 * Ignore dummydev since this is not the first open.
346 */
347 claimstr(stp->sd_wrq);
348 qp = stp->sd_wrq;
349 while (_SAMESTR(qp)) {
350 qp = qp->q_next;
351 if ((error = qreopen(_RD(qp), devp, flag, crp)) != 0)
352 break;
353 }
354 releasestr(stp->sd_wrq);
355 mutex_enter(&stp->sd_lock);
356 stp->sd_flag &= ~(STRHUP|STWOPEN|STRDERR|STWRERR);
357 stp->sd_rerror = 0;
358 stp->sd_werror = 0;
359 ckreturn:
360 cv_broadcast(&stp->sd_monitor);
361 mutex_exit(&stp->sd_lock);
362 return (error);
363 }
364
365 /*
366 * This vnode isn't streaming. SPECFS already
367 * checked for multiple vnodes pointing to the
368 * same stream, so create a stream to the driver.
369 */
370 qp = allocq();
371 stp = shalloc(qp);
372
373 /*
374 * Initialize stream head. shalloc() has given us
375 * exclusive access, and we have the vnode locked;
376 * we can do whatever we want with stp.
377 */
378 stp->sd_flag = STWOPEN;
379 stp->sd_siglist = NULL;
380 stp->sd_pollist.ph_list = NULL;
381 stp->sd_sigflags = 0;
382 stp->sd_mark = NULL;
383 stp->sd_closetime = STRTIMOUT;
384 stp->sd_sidp = NULL;
385 stp->sd_pgidp = NULL;
386 stp->sd_vnode = vp;
387 stp->sd_rerror = 0;
388 stp->sd_werror = 0;
389 stp->sd_wroff = 0;
390 stp->sd_tail = 0;
391 stp->sd_iocblk = NULL;
392 stp->sd_cmdblk = NULL;
393 stp->sd_pushcnt = 0;
394 stp->sd_qn_minpsz = 0;
395 stp->sd_qn_maxpsz = INFPSZ - 1; /* used to check for initialization */
396 stp->sd_maxblk = INFPSZ;
397 qp->q_ptr = _WR(qp)->q_ptr = stp;
398 STREAM(qp) = STREAM(_WR(qp)) = stp;
399 vp->v_stream = stp;
400 mutex_exit(&vp->v_lock);
401 if (vp->v_type == VFIFO) {
402 stp->sd_flag |= OLDNDELAY;
403 /*
404 * This means, both for pipes and fifos
405 * strwrite will send SIGPIPE if the other
406 * end is closed. For putmsg it depends
407 * on whether it is a XPG4_2 application
408 * or not
409 */
410 stp->sd_wput_opt = SW_SIGPIPE;
411
412 /* setq might sleep in kmem_alloc - avoid holding locks. */
413 setq(qp, &fifo_strdata, &fifo_stwdata, NULL, QMTSAFE,
414 SQ_CI|SQ_CO, B_FALSE);
415
416 set_qend(qp);
417 stp->sd_strtab = fifo_getinfo();
418 _WR(qp)->q_nfsrv = _WR(qp);
419 qp->q_nfsrv = qp;
420 /*
421 * Wake up others that are waiting for stream to be created.
422 */
423 mutex_enter(&stp->sd_lock);
424 /*
425 * nothing is be pushed on stream yet, so
426 * optimized stream head packetsizes are just that
427 * of the read queue
428 */
429 stp->sd_qn_minpsz = qp->q_minpsz;
430 stp->sd_qn_maxpsz = qp->q_maxpsz;
431 stp->sd_flag &= ~STWOPEN;
432 goto fifo_opendone;
433 }
434 /* setq might sleep in kmem_alloc - avoid holding locks. */
435 setq(qp, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_FALSE);
436
437 set_qend(qp);
438
439 /*
440 * Open driver and create stream to it (via qattach).
441 */
442 savedev = *devp;
443 cloneopen = (getmajor(*devp) == clone_major);
444 if ((error = qattach(qp, devp, flag, crp, NULL, B_FALSE)) != 0) {
445 mutex_enter(&vp->v_lock);
446 vp->v_stream = NULL;
447 mutex_exit(&vp->v_lock);
448 mutex_enter(&stp->sd_lock);
449 cv_broadcast(&stp->sd_monitor);
450 mutex_exit(&stp->sd_lock);
451 freeq(_RD(qp));
452 shfree(stp);
453 return (error);
454 }
455 /*
456 * Set sd_strtab after open in order to handle clonable drivers
457 */
458 stp->sd_strtab = STREAMSTAB(getmajor(*devp));
459
460 /*
461 * Historical note: dummydev used to be be prior to the initial
462 * open (via qattach above), which made the value seen
463 * inconsistent between an I_PUSH and an autopush of a module.
464 */
465 dummydev = *devp;
466
467 /*
468 * For clone open of old style (Q not associated) network driver,
469 * push DRMODNAME module to handle DL_ATTACH/DL_DETACH
470 */
471 brq = _RD(_WR(qp)->q_next);
472 major = getmajor(*devp);
473 if (push_drcompat && cloneopen && NETWORK_DRV(major) &&
474 ((brq->q_flag & _QASSOCIATED) == 0)) {
475 if (push_mod(qp, &dummydev, stp, DRMODNAME, 0, crp, 0) != 0)
476 cmn_err(CE_WARN, "cannot push " DRMODNAME
477 " streams module");
478 }
479
480 if (!NETWORK_DRV(major)) {
481 savedev = *devp;
482 } else {
483 /*
484 * For network devices, process differently based on the
485 * return value from dld_autopush():
486 *
487 * 0: the passed-in device points to a GLDv3 datalink with
488 * per-link autopush configuration; use that configuration
489 * and ignore any per-driver autopush configuration.
490 *
491 * 1: the passed-in device points to a physical GLDv3
492 * datalink without per-link autopush configuration. The
493 * passed in device was changed to refer to the actual
494 * physical device (if it's not already); we use that new
495 * device to look up any per-driver autopush configuration.
496 *
497 * -1: neither of the above cases applied; use the initial
498 * device to look up any per-driver autopush configuration.
499 */
500 switch (dld_autopush(&savedev, &dlap)) {
501 case 0:
502 zoneid = crgetzoneid(crp);
503 for (s = 0; s < dlap.dap_npush; s++) {
504 error = push_mod(qp, &dummydev, stp,
505 dlap.dap_aplist[s], dlap.dap_anchor, crp,
506 zoneid);
507 if (error != 0)
508 break;
509 }
510 goto opendone;
511 case 1:
512 break;
513 case -1:
514 savedev = *devp;
515 break;
516 }
517 }
518 /*
519 * Find the autopush configuration based on "savedev". Start with the
520 * global zone. If not found check in the local zone.
521 */
522 zoneid = GLOBAL_ZONEID;
523 retryap:
524 ss = netstack_find_by_stackid(zoneid_to_netstackid(zoneid))->
525 netstack_str;
526 if ((ap = sad_ap_find_by_dev(savedev, ss)) == NULL) {
527 netstack_rele(ss->ss_netstack);
528 if (zoneid == GLOBAL_ZONEID) {
529 /*
530 * None found. Also look in the zone's autopush table.
531 */
532 zoneid = crgetzoneid(crp);
533 if (zoneid != GLOBAL_ZONEID)
534 goto retryap;
535 }
536 goto opendone;
537 }
538 anchor = ap->ap_anchor;
539 zoneid = crgetzoneid(crp);
540 for (s = 0; s < ap->ap_npush; s++) {
541 error = push_mod(qp, &dummydev, stp, ap->ap_list[s],
542 anchor, crp, zoneid);
543 if (error != 0)
544 break;
545 }
546 sad_ap_rele(ap, ss);
547 netstack_rele(ss->ss_netstack);
548
549 opendone:
550
551 /*
552 * let specfs know that open failed part way through
553 */
554 if (error) {
555 mutex_enter(&stp->sd_lock);
556 stp->sd_flag |= STREOPENFAIL;
557 mutex_exit(&stp->sd_lock);
558 }
559
560 /*
561 * Wake up others that are waiting for stream to be created.
562 */
563 mutex_enter(&stp->sd_lock);
564 stp->sd_flag &= ~STWOPEN;
565
566 /*
567 * As a performance concern we are caching the values of
568 * q_minpsz and q_maxpsz of the module below the stream
569 * head in the stream head.
570 */
571 mutex_enter(QLOCK(stp->sd_wrq->q_next));
572 rmin = stp->sd_wrq->q_next->q_minpsz;
573 rmax = stp->sd_wrq->q_next->q_maxpsz;
574 mutex_exit(QLOCK(stp->sd_wrq->q_next));
575
576 /* do this processing here as a performance concern */
577 if (strmsgsz != 0) {
578 if (rmax == INFPSZ)
579 rmax = strmsgsz;
580 else
581 rmax = MIN(strmsgsz, rmax);
582 }
583
584 mutex_enter(QLOCK(stp->sd_wrq));
585 stp->sd_qn_minpsz = rmin;
586 stp->sd_qn_maxpsz = rmax;
587 mutex_exit(QLOCK(stp->sd_wrq));
588
589 fifo_opendone:
590 cv_broadcast(&stp->sd_monitor);
591 mutex_exit(&stp->sd_lock);
592 return (error);
593 }
594
595 static int strsink(queue_t *, mblk_t *);
596 static struct qinit deadrend = {
597 strsink, NULL, NULL, NULL, NULL, &strm_info, NULL
598 };
599 static struct qinit deadwend = {
600 NULL, NULL, NULL, NULL, NULL, &stwm_info, NULL
601 };
602
603 /*
604 * Close a stream.
605 * This is called from closef() on the last close of an open stream.
606 * Strclean() will already have removed the siglist and pollist
607 * information, so all that remains is to remove all multiplexor links
608 * for the stream, pop all the modules (and the driver), and free the
609 * stream structure.
610 */
611
612 int
613 strclose(struct vnode *vp, int flag, cred_t *crp)
614 {
615 struct stdata *stp;
616 queue_t *qp;
617 int rval;
618 int freestp = 1;
619 queue_t *rmq;
620
621 TRACE_1(TR_FAC_STREAMS_FR,
622 TR_STRCLOSE, "strclose:%p", vp);
623 ASSERT(vp->v_stream);
624
625 stp = vp->v_stream;
626 ASSERT(!(stp->sd_flag & STPLEX));
627 qp = stp->sd_wrq;
628
629 /*
630 * Needed so that strpoll will return non-zero for this fd.
631 * Note that with POLLNOERR STRHUP does still cause POLLHUP.
632 */
633 mutex_enter(&stp->sd_lock);
634 stp->sd_flag |= STRHUP;
635 mutex_exit(&stp->sd_lock);
636
637 /*
638 * If the registered process or process group did not have an
639 * open instance of this stream then strclean would not be
640 * called. Thus at the time of closing all remaining siglist entries
641 * are removed.
642 */
643 if (stp->sd_siglist != NULL)
644 strcleanall(vp);
645
646 ASSERT(stp->sd_siglist == NULL);
647 ASSERT(stp->sd_sigflags == 0);
648
649 if (STRMATED(stp)) {
650 struct stdata *strmatep = stp->sd_mate;
651 int waited = 1;
652
653 STRLOCKMATES(stp);
654 while (waited) {
655 waited = 0;
656 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
657 mutex_exit(&strmatep->sd_lock);
658 cv_wait(&stp->sd_monitor, &stp->sd_lock);
659 mutex_exit(&stp->sd_lock);
660 STRLOCKMATES(stp);
661 waited = 1;
662 }
663 while (strmatep->sd_flag &
664 (STWOPEN|STRCLOSE|STRPLUMB)) {
665 mutex_exit(&stp->sd_lock);
666 cv_wait(&strmatep->sd_monitor,
667 &strmatep->sd_lock);
668 mutex_exit(&strmatep->sd_lock);
669 STRLOCKMATES(stp);
670 waited = 1;
671 }
672 }
673 stp->sd_flag |= STRCLOSE;
674 STRUNLOCKMATES(stp);
675 } else {
676 mutex_enter(&stp->sd_lock);
677 stp->sd_flag |= STRCLOSE;
678 mutex_exit(&stp->sd_lock);
679 }
680
681 ASSERT(qp->q_first == NULL); /* No more delayed write */
682
683 /* Check if an I_LINK was ever done on this stream */
684 if (stp->sd_flag & STRHASLINKS) {
685 netstack_t *ns;
686 str_stack_t *ss;
687
688 ns = netstack_find_by_cred(crp);
689 ASSERT(ns != NULL);
690 ss = ns->netstack_str;
691 ASSERT(ss != NULL);
692
693 (void) munlinkall(stp, LINKCLOSE|LINKNORMAL, crp, &rval, ss);
694 netstack_rele(ss->ss_netstack);
695 }
696
697 while (_SAMESTR(qp)) {
698 /*
699 * Holding sd_lock prevents q_next from changing in
700 * this stream.
701 */
702 mutex_enter(&stp->sd_lock);
703 if (!(flag & (FNDELAY|FNONBLOCK)) && (stp->sd_closetime > 0)) {
704
705 /*
706 * sleep until awakened by strwsrv() or timeout
707 */
708 for (;;) {
709 mutex_enter(QLOCK(qp->q_next));
710 if (!(qp->q_next->q_mblkcnt)) {
711 mutex_exit(QLOCK(qp->q_next));
712 break;
713 }
714 stp->sd_flag |= WSLEEP;
715
716 /* ensure strwsrv gets enabled */
717 qp->q_next->q_flag |= QWANTW;
718 mutex_exit(QLOCK(qp->q_next));
719 /* get out if we timed out or recv'd a signal */
720 if (str_cv_wait(&qp->q_wait, &stp->sd_lock,
721 stp->sd_closetime, 0) <= 0) {
722 break;
723 }
724 }
725 stp->sd_flag &= ~WSLEEP;
726 }
727 mutex_exit(&stp->sd_lock);
728
729 rmq = qp->q_next;
730 if (rmq->q_flag & QISDRV) {
731 ASSERT(!_SAMESTR(rmq));
732 wait_sq_svc(_RD(qp)->q_syncq);
733 }
734
735 qdetach(_RD(rmq), 1, flag, crp, B_FALSE);
736 }
737
738 /*
739 * Since we call pollwakeup in close() now, the poll list should
740 * be empty in most cases. The only exception is the layered devices
741 * (e.g. the console drivers with redirection modules pushed on top
742 * of it). We have to do this after calling qdetach() because
743 * the redirection module won't have torn down the console
744 * redirection until after qdetach() has been invoked.
745 */
746 if (stp->sd_pollist.ph_list != NULL) {
747 pollwakeup(&stp->sd_pollist, POLLERR);
748 pollhead_clean(&stp->sd_pollist);
749 }
750 ASSERT(stp->sd_pollist.ph_list == NULL);
751 ASSERT(stp->sd_sidp == NULL);
752 ASSERT(stp->sd_pgidp == NULL);
753
754 /* Prevent qenable from re-enabling the stream head queue */
755 disable_svc(_RD(qp));
756
757 /*
758 * Wait until service procedure of each queue is
759 * run, if QINSERVICE is set.
760 */
761 wait_svc(_RD(qp));
762
763 /*
764 * Now, flush both queues.
765 */
766 flushq(_RD(qp), FLUSHALL);
767 flushq(qp, FLUSHALL);
768
769 /*
770 * If the write queue of the stream head is pointing to a
771 * read queue, we have a twisted stream. If the read queue
772 * is alive, convert the stream head queues into a dead end.
773 * If the read queue is dead, free the dead pair.
774 */
775 if (qp->q_next && !_SAMESTR(qp)) {
776 if (qp->q_next->q_qinfo == &deadrend) { /* half-closed pipe */
777 flushq(qp->q_next, FLUSHALL); /* ensure no message */
778 shfree(qp->q_next->q_stream);
779 freeq(qp->q_next);
780 freeq(_RD(qp));
781 } else if (qp->q_next == _RD(qp)) { /* fifo */
782 freeq(_RD(qp));
783 } else { /* pipe */
784 freestp = 0;
785 /*
786 * The q_info pointers are never accessed when
787 * SQLOCK is held.
788 */
789 ASSERT(qp->q_syncq == _RD(qp)->q_syncq);
790 mutex_enter(SQLOCK(qp->q_syncq));
791 qp->q_qinfo = &deadwend;
792 _RD(qp)->q_qinfo = &deadrend;
793 mutex_exit(SQLOCK(qp->q_syncq));
794 }
795 } else {
796 freeq(_RD(qp)); /* free stream head queue pair */
797 }
798
799 mutex_enter(&vp->v_lock);
800 if (stp->sd_iocblk) {
801 if (stp->sd_iocblk != (mblk_t *)-1) {
802 freemsg(stp->sd_iocblk);
803 }
804 stp->sd_iocblk = NULL;
805 }
806 stp->sd_vnode = NULL;
807 vp->v_stream = NULL;
808 mutex_exit(&vp->v_lock);
809 mutex_enter(&stp->sd_lock);
810 freemsg(stp->sd_cmdblk);
811 stp->sd_cmdblk = NULL;
812 stp->sd_flag &= ~STRCLOSE;
813 cv_broadcast(&stp->sd_monitor);
814 mutex_exit(&stp->sd_lock);
815
816 if (freestp)
817 shfree(stp);
818 return (0);
819 }
820
821 static int
822 strsink(queue_t *q, mblk_t *bp)
823 {
824 struct copyresp *resp;
825
826 switch (bp->b_datap->db_type) {
827 case M_FLUSH:
828 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) {
829 *bp->b_rptr &= ~FLUSHR;
830 bp->b_flag |= MSGNOLOOP;
831 /*
832 * Protect against the driver passing up
833 * messages after it has done a qprocsoff.
834 */
835 if (_OTHERQ(q)->q_next == NULL)
836 freemsg(bp);
837 else
838 qreply(q, bp);
839 } else {
840 freemsg(bp);
841 }
842 break;
843
844 case M_COPYIN:
845 case M_COPYOUT:
846 if (bp->b_cont) {
847 freemsg(bp->b_cont);
848 bp->b_cont = NULL;
849 }
850 bp->b_datap->db_type = M_IOCDATA;
851 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
852 resp = (struct copyresp *)bp->b_rptr;
853 resp->cp_rval = (caddr_t)1; /* failure */
854 /*
855 * Protect against the driver passing up
856 * messages after it has done a qprocsoff.
857 */
858 if (_OTHERQ(q)->q_next == NULL)
859 freemsg(bp);
860 else
861 qreply(q, bp);
862 break;
863
864 case M_IOCTL:
865 if (bp->b_cont) {
866 freemsg(bp->b_cont);
867 bp->b_cont = NULL;
868 }
869 bp->b_datap->db_type = M_IOCNAK;
870 /*
871 * Protect against the driver passing up
872 * messages after it has done a qprocsoff.
873 */
874 if (_OTHERQ(q)->q_next == NULL)
875 freemsg(bp);
876 else
877 qreply(q, bp);
878 break;
879
880 default:
881 freemsg(bp);
882 break;
883 }
884
885 return (0);
886 }
887
888 /*
889 * Clean up after a process when it closes a stream. This is called
890 * from closef for all closes, whereas strclose is called only for the
891 * last close on a stream. The siglist is scanned for entries for the
892 * current process, and these are removed.
893 */
894 void
895 strclean(struct vnode *vp)
896 {
897 strsig_t *ssp, *pssp, *tssp;
898 stdata_t *stp;
899 int update = 0;
900
901 TRACE_1(TR_FAC_STREAMS_FR,
902 TR_STRCLEAN, "strclean:%p", vp);
903 stp = vp->v_stream;
904 pssp = NULL;
905 mutex_enter(&stp->sd_lock);
906 ssp = stp->sd_siglist;
907 while (ssp) {
908 if (ssp->ss_pidp == curproc->p_pidp) {
909 tssp = ssp->ss_next;
910 if (pssp)
911 pssp->ss_next = tssp;
912 else
913 stp->sd_siglist = tssp;
914 mutex_enter(&pidlock);
915 PID_RELE(ssp->ss_pidp);
916 mutex_exit(&pidlock);
917 kmem_free(ssp, sizeof (strsig_t));
918 update = 1;
919 ssp = tssp;
920 } else {
921 pssp = ssp;
922 ssp = ssp->ss_next;
923 }
924 }
925 if (update) {
926 stp->sd_sigflags = 0;
927 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
928 stp->sd_sigflags |= ssp->ss_events;
929 }
930 mutex_exit(&stp->sd_lock);
931 }
932
933 /*
934 * Used on the last close to remove any remaining items on the siglist.
935 * These could be present on the siglist due to I_ESETSIG calls that
936 * use process groups or processed that do not have an open file descriptor
937 * for this stream (Such entries would not be removed by strclean).
938 */
939 static void
940 strcleanall(struct vnode *vp)
941 {
942 strsig_t *ssp, *nssp;
943 stdata_t *stp;
944
945 stp = vp->v_stream;
946 mutex_enter(&stp->sd_lock);
947 ssp = stp->sd_siglist;
948 stp->sd_siglist = NULL;
949 while (ssp) {
950 nssp = ssp->ss_next;
951 mutex_enter(&pidlock);
952 PID_RELE(ssp->ss_pidp);
953 mutex_exit(&pidlock);
954 kmem_free(ssp, sizeof (strsig_t));
955 ssp = nssp;
956 }
957 stp->sd_sigflags = 0;
958 mutex_exit(&stp->sd_lock);
959 }
960
961 /*
962 * Retrieve the next message from the logical stream head read queue
963 * using either rwnext (if sync stream) or getq_noenab.
964 * It is the callers responsibility to call qbackenable after
965 * it is finished with the message. The caller should not call
966 * qbackenable until after any putback calls to avoid spurious backenabling.
967 */
968 mblk_t *
969 strget(struct stdata *stp, queue_t *q, struct uio *uiop, int first,
970 int *errorp)
971 {
972 mblk_t *bp;
973 int error;
974 ssize_t rbytes = 0;
975
976 /* Holding sd_lock prevents the read queue from changing */
977 ASSERT(MUTEX_HELD(&stp->sd_lock));
978
979 if (uiop != NULL && stp->sd_struiordq != NULL &&
980 q->q_first == NULL &&
981 (!first || (stp->sd_wakeq & RSLEEP))) {
982 /*
983 * Stream supports rwnext() for the read side.
984 * If this is the first time we're called by e.g. strread
985 * only do the downcall if there is a deferred wakeup
986 * (registered in sd_wakeq).
987 */
988 struiod_t uiod;
989 struct iovec buf[IOV_MAX_STACK];
990 int iovlen = 0;
991
992 if (first)
993 stp->sd_wakeq &= ~RSLEEP;
994
995 if (uiop->uio_iovcnt > IOV_MAX_STACK) {
996 iovlen = uiop->uio_iovcnt * sizeof (iovec_t);
997 uiod.d_iov = kmem_alloc(iovlen, KM_SLEEP);
998 } else {
999 uiod.d_iov = buf;
1000 }
1001
1002 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, uiop->uio_iovcnt);
1003 uiod.d_mp = 0;
1004 /*
1005 * Mark that a thread is in rwnext on the read side
1006 * to prevent strrput from nacking ioctls immediately.
1007 * When the last concurrent rwnext returns
1008 * the ioctls are nack'ed.
1009 */
1010 ASSERT(MUTEX_HELD(&stp->sd_lock));
1011 stp->sd_struiodnak++;
1012 /*
1013 * Note: rwnext will drop sd_lock.
1014 */
1015 error = rwnext(q, &uiod);
1016 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
1017 mutex_enter(&stp->sd_lock);
1018 stp->sd_struiodnak--;
1019 while (stp->sd_struiodnak == 0 &&
1020 ((bp = stp->sd_struionak) != NULL)) {
1021 stp->sd_struionak = bp->b_next;
1022 bp->b_next = NULL;
1023 bp->b_datap->db_type = M_IOCNAK;
1024 /*
1025 * Protect against the driver passing up
1026 * messages after it has done a qprocsoff.
1027 */
1028 if (_OTHERQ(q)->q_next == NULL)
1029 freemsg(bp);
1030 else {
1031 mutex_exit(&stp->sd_lock);
1032 qreply(q, bp);
1033 mutex_enter(&stp->sd_lock);
1034 }
1035 }
1036 ASSERT(MUTEX_HELD(&stp->sd_lock));
1037 if (error == 0 || error == EWOULDBLOCK) {
1038 if ((bp = uiod.d_mp) != NULL) {
1039 *errorp = 0;
1040 ASSERT(MUTEX_HELD(&stp->sd_lock));
1041 if (iovlen != 0)
1042 kmem_free(uiod.d_iov, iovlen);
1043 return (bp);
1044 }
1045 error = 0;
1046 } else if (error == EINVAL) {
1047 /*
1048 * The stream plumbing must have
1049 * changed while we were away, so
1050 * just turn off rwnext()s.
1051 */
1052 error = 0;
1053 } else if (error == EBUSY) {
1054 /*
1055 * The module might have data in transit using putnext
1056 * Fall back on waiting + getq.
1057 */
1058 error = 0;
1059 } else {
1060 *errorp = error;
1061 ASSERT(MUTEX_HELD(&stp->sd_lock));
1062 if (iovlen != 0)
1063 kmem_free(uiod.d_iov, iovlen);
1064 return (NULL);
1065 }
1066
1067 if (iovlen != 0)
1068 kmem_free(uiod.d_iov, iovlen);
1069
1070 /*
1071 * Try a getq in case a rwnext() generated mblk
1072 * has bubbled up via strrput().
1073 */
1074 }
1075 *errorp = 0;
1076 ASSERT(MUTEX_HELD(&stp->sd_lock));
1077
1078 /*
1079 * If we have a valid uio, try and use this as a guide for how
1080 * many bytes to retrieve from the queue via getq_noenab().
1081 * Doing this can avoid unneccesary counting of overlong
1082 * messages in putback(). We currently only do this for sockets
1083 * and only if there is no sd_rputdatafunc hook.
1084 *
1085 * The sd_rputdatafunc hook transforms the entire message
1086 * before any bytes in it can be given to a client. So, rbytes
1087 * must be 0 if there is a hook.
1088 */
1089 if ((uiop != NULL) && (stp->sd_vnode->v_type == VSOCK) &&
1090 (stp->sd_rputdatafunc == NULL))
1091 rbytes = uiop->uio_resid;
1092
1093 return (getq_noenab(q, rbytes));
1094 }
1095
1096 /*
1097 * Copy out the message pointed to by `bp' into the uio pointed to by `uiop'.
1098 * If the message does not fit in the uio the remainder of it is returned;
1099 * otherwise NULL is returned. Any embedded zero-length mblk_t's are
1100 * consumed, even if uio_resid reaches zero. On error, `*errorp' is set to
1101 * the error code, the message is consumed, and NULL is returned.
1102 */
1103 static mblk_t *
1104 struiocopyout(mblk_t *bp, struct uio *uiop, int *errorp)
1105 {
1106 int error;
1107 ptrdiff_t n;
1108 mblk_t *nbp;
1109
1110 ASSERT(bp->b_wptr >= bp->b_rptr);
1111
1112 do {
1113 if ((n = MIN(uiop->uio_resid, MBLKL(bp))) != 0) {
1114 ASSERT(n > 0);
1115
1116 error = uiomove(bp->b_rptr, n, UIO_READ, uiop);
1117 if (error != 0) {
1118 freemsg(bp);
1119 *errorp = error;
1120 return (NULL);
1121 }
1122 }
1123
1124 bp->b_rptr += n;
1125 while (bp != NULL && (bp->b_rptr >= bp->b_wptr)) {
1126 nbp = bp;
1127 bp = bp->b_cont;
1128 freeb(nbp);
1129 }
1130 } while (bp != NULL && uiop->uio_resid > 0);
1131
1132 *errorp = 0;
1133 return (bp);
1134 }
1135
1136 /*
1137 * Read a stream according to the mode flags in sd_flag:
1138 *
1139 * (default mode) - Byte stream, msg boundaries are ignored
1140 * RD_MSGDIS (msg discard) - Read on msg boundaries and throw away
1141 * any data remaining in msg
1142 * RD_MSGNODIS (msg non-discard) - Read on msg boundaries and put back
1143 * any remaining data on head of read queue
1144 *
1145 * Consume readable messages on the front of the queue until
1146 * ttolwp(curthread)->lwp_count
1147 * is satisfied, the readable messages are exhausted, or a message
1148 * boundary is reached in a message mode. If no data was read and
1149 * the stream was not opened with the NDELAY flag, block until data arrives.
1150 * Otherwise return the data read and update the count.
1151 *
1152 * In default mode a 0 length message signifies end-of-file and terminates
1153 * a read in progress. The 0 length message is removed from the queue
1154 * only if it is the only message read (no data is read).
1155 *
1156 * An attempt to read an M_PROTO or M_PCPROTO message results in an
1157 * EBADMSG error return, unless either RD_PROTDAT or RD_PROTDIS are set.
1158 * If RD_PROTDAT is set, M_PROTO and M_PCPROTO messages are read as data.
1159 * If RD_PROTDIS is set, the M_PROTO and M_PCPROTO parts of the message
1160 * are unlinked from and M_DATA blocks in the message, the protos are
1161 * thrown away, and the data is read.
1162 */
1163 /* ARGSUSED */
1164 int
1165 strread(struct vnode *vp, struct uio *uiop, cred_t *crp)
1166 {
1167 struct stdata *stp;
1168 mblk_t *bp, *nbp;
1169 queue_t *q;
1170 int error = 0;
1171 uint_t old_sd_flag;
1172 int first;
1173 char rflg;
1174 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
1175 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
1176 short delim;
1177 unsigned char pri = 0;
1178 char waitflag;
1179 unsigned char type;
1180
1181 TRACE_1(TR_FAC_STREAMS_FR,
1182 TR_STRREAD_ENTER, "strread:%p", vp);
1183 ASSERT(vp->v_stream);
1184 stp = vp->v_stream;
1185
1186 mutex_enter(&stp->sd_lock);
1187
1188 if ((error = i_straccess(stp, JCREAD)) != 0) {
1189 mutex_exit(&stp->sd_lock);
1190 return (error);
1191 }
1192
1193 if (stp->sd_flag & (STRDERR|STPLEX)) {
1194 error = strgeterr(stp, STRDERR|STPLEX, 0);
1195 if (error != 0) {
1196 mutex_exit(&stp->sd_lock);
1197 return (error);
1198 }
1199 }
1200
1201 /*
1202 * Loop terminates when uiop->uio_resid == 0.
1203 */
1204 rflg = 0;
1205 waitflag = READWAIT;
1206 q = _RD(stp->sd_wrq);
1207 for (;;) {
1208 ASSERT(MUTEX_HELD(&stp->sd_lock));
1209 old_sd_flag = stp->sd_flag;
1210 mark = 0;
1211 delim = 0;
1212 first = 1;
1213 while ((bp = strget(stp, q, uiop, first, &error)) == NULL) {
1214 int done = 0;
1215
1216 ASSERT(MUTEX_HELD(&stp->sd_lock));
1217
1218 if (error != 0)
1219 goto oops;
1220
1221 if (stp->sd_flag & (STRHUP|STREOF)) {
1222 goto oops;
1223 }
1224 if (rflg && !(stp->sd_flag & STRDELIM)) {
1225 goto oops;
1226 }
1227 /*
1228 * If a read(fd,buf,0) has been done, there is no
1229 * need to sleep. We always have zero bytes to
1230 * return.
1231 */
1232 if (uiop->uio_resid == 0) {
1233 goto oops;
1234 }
1235
1236 qbackenable(q, 0);
1237
1238 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_WAIT,
1239 "strread calls strwaitq:%p, %p, %p",
1240 vp, uiop, crp);
1241 if ((error = strwaitq(stp, waitflag, uiop->uio_resid,
1242 uiop->uio_fmode, -1, &done)) != 0 || done) {
1243 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_DONE,
1244 "strread error or done:%p, %p, %p",
1245 vp, uiop, crp);
1246 if ((uiop->uio_fmode & FNDELAY) &&
1247 (stp->sd_flag & OLDNDELAY) &&
1248 (error == EAGAIN))
1249 error = 0;
1250 goto oops;
1251 }
1252 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_AWAKE,
1253 "strread awakes:%p, %p, %p", vp, uiop, crp);
1254 if ((error = i_straccess(stp, JCREAD)) != 0) {
1255 goto oops;
1256 }
1257 first = 0;
1258 }
1259
1260 ASSERT(MUTEX_HELD(&stp->sd_lock));
1261 ASSERT(bp);
1262 pri = bp->b_band;
1263 /*
1264 * Extract any mark information. If the message is not
1265 * completely consumed this information will be put in the mblk
1266 * that is putback.
1267 * If MSGMARKNEXT is set and the message is completely consumed
1268 * the STRATMARK flag will be set below. Likewise, if
1269 * MSGNOTMARKNEXT is set and the message is
1270 * completely consumed STRNOTATMARK will be set.
1271 *
1272 * For some unknown reason strread only breaks the read at the
1273 * last mark.
1274 */
1275 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
1276 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
1277 (MSGMARKNEXT|MSGNOTMARKNEXT));
1278 if (mark != 0 && bp == stp->sd_mark) {
1279 if (rflg) {
1280 putback(stp, q, bp, pri);
1281 goto oops;
1282 }
1283 mark |= _LASTMARK;
1284 stp->sd_mark = NULL;
1285 }
1286 if ((stp->sd_flag & STRDELIM) && (bp->b_flag & MSGDELIM))
1287 delim = 1;
1288 mutex_exit(&stp->sd_lock);
1289
1290 if (STREAM_NEEDSERVICE(stp))
1291 stream_runservice(stp);
1292
1293 type = bp->b_datap->db_type;
1294
1295 switch (type) {
1296
1297 case M_DATA:
1298 ismdata:
1299 if (msgnodata(bp)) {
1300 if (mark || delim) {
1301 freemsg(bp);
1302 } else if (rflg) {
1303
1304 /*
1305 * If already read data put zero
1306 * length message back on queue else
1307 * free msg and return 0.
1308 */
1309 bp->b_band = pri;
1310 mutex_enter(&stp->sd_lock);
1311 putback(stp, q, bp, pri);
1312 mutex_exit(&stp->sd_lock);
1313 } else {
1314 freemsg(bp);
1315 }
1316 error = 0;
1317 goto oops1;
1318 }
1319
1320 rflg = 1;
1321 waitflag |= NOINTR;
1322 bp = struiocopyout(bp, uiop, &error);
1323 if (error != 0)
1324 goto oops1;
1325
1326 mutex_enter(&stp->sd_lock);
1327 if (bp) {
1328 /*
1329 * Have remaining data in message.
1330 * Free msg if in discard mode.
1331 */
1332 if (stp->sd_read_opt & RD_MSGDIS) {
1333 freemsg(bp);
1334 } else {
1335 bp->b_band = pri;
1336 if ((mark & _LASTMARK) &&
1337 (stp->sd_mark == NULL))
1338 stp->sd_mark = bp;
1339 bp->b_flag |= mark & ~_LASTMARK;
1340 if (delim)
1341 bp->b_flag |= MSGDELIM;
1342 if (msgnodata(bp))
1343 freemsg(bp);
1344 else
1345 putback(stp, q, bp, pri);
1346 }
1347 } else {
1348 /*
1349 * Consumed the complete message.
1350 * Move the MSG*MARKNEXT information
1351 * to the stream head just in case
1352 * the read queue becomes empty.
1353 *
1354 * If the stream head was at the mark
1355 * (STRATMARK) before we dropped sd_lock above
1356 * and some data was consumed then we have
1357 * moved past the mark thus STRATMARK is
1358 * cleared. However, if a message arrived in
1359 * strrput during the copyout above causing
1360 * STRATMARK to be set we can not clear that
1361 * flag.
1362 */
1363 if (mark &
1364 (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
1365 if (mark & MSGMARKNEXT) {
1366 stp->sd_flag &= ~STRNOTATMARK;
1367 stp->sd_flag |= STRATMARK;
1368 } else if (mark & MSGNOTMARKNEXT) {
1369 stp->sd_flag &= ~STRATMARK;
1370 stp->sd_flag |= STRNOTATMARK;
1371 } else {
1372 stp->sd_flag &=
1373 ~(STRATMARK|STRNOTATMARK);
1374 }
1375 } else if (rflg && (old_sd_flag & STRATMARK)) {
1376 stp->sd_flag &= ~STRATMARK;
1377 }
1378 }
1379
1380 /*
1381 * Check for signal messages at the front of the read
1382 * queue and generate the signal(s) if appropriate.
1383 * The only signal that can be on queue is M_SIG at
1384 * this point.
1385 */
1386 while ((((bp = q->q_first)) != NULL) &&
1387 (bp->b_datap->db_type == M_SIG)) {
1388 bp = getq_noenab(q, 0);
1389 /*
1390 * sd_lock is held so the content of the
1391 * read queue can not change.
1392 */
1393 ASSERT(bp != NULL && DB_TYPE(bp) == M_SIG);
1394 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
1395 mutex_exit(&stp->sd_lock);
1396 freemsg(bp);
1397 if (STREAM_NEEDSERVICE(stp))
1398 stream_runservice(stp);
1399 mutex_enter(&stp->sd_lock);
1400 }
1401
1402 if ((uiop->uio_resid == 0) || (mark & _LASTMARK) ||
1403 delim ||
1404 (stp->sd_read_opt & (RD_MSGDIS|RD_MSGNODIS))) {
1405 goto oops;
1406 }
1407 continue;
1408
1409 case M_SIG:
1410 strsignal(stp, *bp->b_rptr, (int32_t)bp->b_band);
1411 freemsg(bp);
1412 mutex_enter(&stp->sd_lock);
1413 continue;
1414
1415 case M_PROTO:
1416 case M_PCPROTO:
1417 /*
1418 * Only data messages are readable.
1419 * Any others generate an error, unless
1420 * RD_PROTDIS or RD_PROTDAT is set.
1421 */
1422 if (stp->sd_read_opt & RD_PROTDAT) {
1423 for (nbp = bp; nbp; nbp = nbp->b_next) {
1424 if ((nbp->b_datap->db_type ==
1425 M_PROTO) ||
1426 (nbp->b_datap->db_type ==
1427 M_PCPROTO)) {
1428 nbp->b_datap->db_type = M_DATA;
1429 } else {
1430 break;
1431 }
1432 }
1433 /*
1434 * clear stream head hi pri flag based on
1435 * first message
1436 */
1437 if (type == M_PCPROTO) {
1438 mutex_enter(&stp->sd_lock);
1439 stp->sd_flag &= ~STRPRI;
1440 mutex_exit(&stp->sd_lock);
1441 }
1442 goto ismdata;
1443 } else if (stp->sd_read_opt & RD_PROTDIS) {
1444 /*
1445 * discard non-data messages
1446 */
1447 while (bp &&
1448 ((bp->b_datap->db_type == M_PROTO) ||
1449 (bp->b_datap->db_type == M_PCPROTO))) {
1450 nbp = unlinkb(bp);
1451 freeb(bp);
1452 bp = nbp;
1453 }
1454 /*
1455 * clear stream head hi pri flag based on
1456 * first message
1457 */
1458 if (type == M_PCPROTO) {
1459 mutex_enter(&stp->sd_lock);
1460 stp->sd_flag &= ~STRPRI;
1461 mutex_exit(&stp->sd_lock);
1462 }
1463 if (bp) {
1464 bp->b_band = pri;
1465 goto ismdata;
1466 } else {
1467 break;
1468 }
1469 }
1470 /* FALLTHRU */
1471 case M_PASSFP:
1472 if ((bp->b_datap->db_type == M_PASSFP) &&
1473 (stp->sd_read_opt & RD_PROTDIS)) {
1474 freemsg(bp);
1475 break;
1476 }
1477 mutex_enter(&stp->sd_lock);
1478 putback(stp, q, bp, pri);
1479 mutex_exit(&stp->sd_lock);
1480 if (rflg == 0)
1481 error = EBADMSG;
1482 goto oops1;
1483
1484 default:
1485 /*
1486 * Garbage on stream head read queue.
1487 */
1488 cmn_err(CE_WARN, "bad %x found at stream head\n",
1489 bp->b_datap->db_type);
1490 freemsg(bp);
1491 goto oops1;
1492 }
1493 mutex_enter(&stp->sd_lock);
1494 }
1495 oops:
1496 mutex_exit(&stp->sd_lock);
1497 oops1:
1498 qbackenable(q, pri);
1499 return (error);
1500 #undef _LASTMARK
1501 }
1502
1503 /*
1504 * Default processing of M_PROTO/M_PCPROTO messages.
1505 * Determine which wakeups and signals are needed.
1506 * This can be replaced by a user-specified procedure for kernel users
1507 * of STREAMS.
1508 */
1509 /* ARGSUSED */
1510 mblk_t *
1511 strrput_proto(vnode_t *vp, mblk_t *mp,
1512 strwakeup_t *wakeups, strsigset_t *firstmsgsigs,
1513 strsigset_t *allmsgsigs, strpollset_t *pollwakeups)
1514 {
1515 *wakeups = RSLEEP;
1516 *allmsgsigs = 0;
1517
1518 switch (mp->b_datap->db_type) {
1519 case M_PROTO:
1520 if (mp->b_band == 0) {
1521 *firstmsgsigs = S_INPUT | S_RDNORM;
1522 *pollwakeups = POLLIN | POLLRDNORM;
1523 } else {
1524 *firstmsgsigs = S_INPUT | S_RDBAND;
1525 *pollwakeups = POLLIN | POLLRDBAND;
1526 }
1527 break;
1528 case M_PCPROTO:
1529 *firstmsgsigs = S_HIPRI;
1530 *pollwakeups = POLLPRI;
1531 break;
1532 }
1533 return (mp);
1534 }
1535
1536 /*
1537 * Default processing of everything but M_DATA, M_PROTO, M_PCPROTO and
1538 * M_PASSFP messages.
1539 * Determine which wakeups and signals are needed.
1540 * This can be replaced by a user-specified procedure for kernel users
1541 * of STREAMS.
1542 */
1543 /* ARGSUSED */
1544 mblk_t *
1545 strrput_misc(vnode_t *vp, mblk_t *mp,
1546 strwakeup_t *wakeups, strsigset_t *firstmsgsigs,
1547 strsigset_t *allmsgsigs, strpollset_t *pollwakeups)
1548 {
1549 *wakeups = 0;
1550 *firstmsgsigs = 0;
1551 *allmsgsigs = 0;
1552 *pollwakeups = 0;
1553 return (mp);
1554 }
1555
1556 /*
1557 * Stream read put procedure. Called from downstream driver/module
1558 * with messages for the stream head. Data, protocol, and in-stream
1559 * signal messages are placed on the queue, others are handled directly.
1560 */
1561 int
1562 strrput(queue_t *q, mblk_t *bp)
1563 {
1564 struct stdata *stp;
1565 ulong_t rput_opt;
1566 strwakeup_t wakeups;
1567 strsigset_t firstmsgsigs; /* Signals if first message on queue */
1568 strsigset_t allmsgsigs; /* Signals for all messages */
1569 strsigset_t signals; /* Signals events to generate */
1570 strpollset_t pollwakeups;
1571 mblk_t *nextbp;
1572 uchar_t band = 0;
1573 int hipri_sig;
1574
1575 stp = (struct stdata *)q->q_ptr;
1576 /*
1577 * Use rput_opt for optimized access to the SR_ flags except
1578 * SR_POLLIN. That flag has to be checked under sd_lock since it
1579 * is modified by strpoll().
1580 */
1581 rput_opt = stp->sd_rput_opt;
1582
1583 ASSERT(qclaimed(q));
1584 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_ENTER,
1585 "strrput called with message type:q %p bp %p", q, bp);
1586
1587 /*
1588 * Perform initial processing and pass to the parameterized functions.
1589 */
1590 ASSERT(bp->b_next == NULL);
1591
1592 switch (bp->b_datap->db_type) {
1593 case M_DATA:
1594 /*
1595 * sockfs is the only consumer of STREOF and when it is set,
1596 * it implies that the receiver is not interested in receiving
1597 * any more data, hence the mblk is freed to prevent unnecessary
1598 * message queueing at the stream head.
1599 */
1600 if (stp->sd_flag == STREOF) {
1601 freemsg(bp);
1602 return (0);
1603 }
1604 if ((rput_opt & SR_IGN_ZEROLEN) &&
1605 bp->b_rptr == bp->b_wptr && msgnodata(bp)) {
1606 /*
1607 * Ignore zero-length M_DATA messages. These might be
1608 * generated by some transports.
1609 * The zero-length M_DATA messages, even if they
1610 * are ignored, should effect the atmark tracking and
1611 * should wake up a thread sleeping in strwaitmark.
1612 */
1613 mutex_enter(&stp->sd_lock);
1614 if (bp->b_flag & MSGMARKNEXT) {
1615 /*
1616 * Record the position of the mark either
1617 * in q_last or in STRATMARK.
1618 */
1619 if (q->q_last != NULL) {
1620 q->q_last->b_flag &= ~MSGNOTMARKNEXT;
1621 q->q_last->b_flag |= MSGMARKNEXT;
1622 } else {
1623 stp->sd_flag &= ~STRNOTATMARK;
1624 stp->sd_flag |= STRATMARK;
1625 }
1626 } else if (bp->b_flag & MSGNOTMARKNEXT) {
1627 /*
1628 * Record that this is not the position of
1629 * the mark either in q_last or in
1630 * STRNOTATMARK.
1631 */
1632 if (q->q_last != NULL) {
1633 q->q_last->b_flag &= ~MSGMARKNEXT;
1634 q->q_last->b_flag |= MSGNOTMARKNEXT;
1635 } else {
1636 stp->sd_flag &= ~STRATMARK;
1637 stp->sd_flag |= STRNOTATMARK;
1638 }
1639 }
1640 if (stp->sd_flag & RSLEEP) {
1641 stp->sd_flag &= ~RSLEEP;
1642 cv_broadcast(&q->q_wait);
1643 }
1644 mutex_exit(&stp->sd_lock);
1645 freemsg(bp);
1646 return (0);
1647 }
1648 wakeups = RSLEEP;
1649 if (bp->b_band == 0) {
1650 firstmsgsigs = S_INPUT | S_RDNORM;
1651 pollwakeups = POLLIN | POLLRDNORM;
1652 } else {
1653 firstmsgsigs = S_INPUT | S_RDBAND;
1654 pollwakeups = POLLIN | POLLRDBAND;
1655 }
1656 if (rput_opt & SR_SIGALLDATA)
1657 allmsgsigs = firstmsgsigs;
1658 else
1659 allmsgsigs = 0;
1660
1661 mutex_enter(&stp->sd_lock);
1662 if ((rput_opt & SR_CONSOL_DATA) &&
1663 (q->q_last != NULL) &&
1664 (bp->b_flag & (MSGMARK|MSGDELIM)) == 0) {
1665 /*
1666 * Consolidate an M_DATA message onto an M_DATA,
1667 * M_PROTO, or M_PCPROTO by merging it with q_last.
1668 * The consolidation does not take place if
1669 * the old message is marked with either of the
1670 * marks or the delim flag or if the new
1671 * message is marked with MSGMARK. The MSGMARK
1672 * check is needed to handle the odd semantics of
1673 * MSGMARK where essentially the whole message
1674 * is to be treated as marked.
1675 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from the
1676 * new message to the front of the b_cont chain.
1677 */
1678 mblk_t *lbp = q->q_last;
1679 unsigned char db_type = lbp->b_datap->db_type;
1680
1681 if ((db_type == M_DATA || db_type == M_PROTO ||
1682 db_type == M_PCPROTO) &&
1683 !(lbp->b_flag & (MSGDELIM|MSGMARK|MSGMARKNEXT))) {
1684 rmvq_noenab(q, lbp);
1685 /*
1686 * The first message in the b_cont list
1687 * tracks MSGMARKNEXT and MSGNOTMARKNEXT.
1688 * We need to handle the case where we
1689 * are appending:
1690 *
1691 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT.
1692 * 2) a MSGMARKNEXT to a plain message.
1693 * 3) a MSGNOTMARKNEXT to a plain message
1694 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT
1695 * message.
1696 *
1697 * Thus we never append a MSGMARKNEXT or
1698 * MSGNOTMARKNEXT to a MSGMARKNEXT message.
1699 */
1700 if (bp->b_flag & MSGMARKNEXT) {
1701 lbp->b_flag |= MSGMARKNEXT;
1702 lbp->b_flag &= ~MSGNOTMARKNEXT;
1703 bp->b_flag &= ~MSGMARKNEXT;
1704 } else if (bp->b_flag & MSGNOTMARKNEXT) {
1705 lbp->b_flag |= MSGNOTMARKNEXT;
1706 bp->b_flag &= ~MSGNOTMARKNEXT;
1707 }
1708
1709 linkb(lbp, bp);
1710 bp = lbp;
1711 /*
1712 * The new message logically isn't the first
1713 * even though the q_first check below thinks
1714 * it is. Clear the firstmsgsigs to make it
1715 * not appear to be first.
1716 */
1717 firstmsgsigs = 0;
1718 }
1719 }
1720 break;
1721
1722 case M_PASSFP:
1723 wakeups = RSLEEP;
1724 allmsgsigs = 0;
1725 if (bp->b_band == 0) {
1726 firstmsgsigs = S_INPUT | S_RDNORM;
1727 pollwakeups = POLLIN | POLLRDNORM;
1728 } else {
1729 firstmsgsigs = S_INPUT | S_RDBAND;
1730 pollwakeups = POLLIN | POLLRDBAND;
1731 }
1732 mutex_enter(&stp->sd_lock);
1733 break;
1734
1735 case M_PROTO:
1736 case M_PCPROTO:
1737 ASSERT(stp->sd_rprotofunc != NULL);
1738 bp = (stp->sd_rprotofunc)(stp->sd_vnode, bp,
1739 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups);
1740 #define ALLSIG (S_INPUT|S_HIPRI|S_OUTPUT|S_MSG|S_ERROR|S_HANGUP|S_RDNORM|\
1741 S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)
1742 #define ALLPOLL (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLWRNORM|POLLRDBAND|\
1743 POLLWRBAND)
1744
1745 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0);
1746 ASSERT((firstmsgsigs & ~ALLSIG) == 0);
1747 ASSERT((allmsgsigs & ~ALLSIG) == 0);
1748 ASSERT((pollwakeups & ~ALLPOLL) == 0);
1749
1750 mutex_enter(&stp->sd_lock);
1751 break;
1752
1753 default:
1754 ASSERT(stp->sd_rmiscfunc != NULL);
1755 bp = (stp->sd_rmiscfunc)(stp->sd_vnode, bp,
1756 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups);
1757 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0);
1758 ASSERT((firstmsgsigs & ~ALLSIG) == 0);
1759 ASSERT((allmsgsigs & ~ALLSIG) == 0);
1760 ASSERT((pollwakeups & ~ALLPOLL) == 0);
1761 #undef ALLSIG
1762 #undef ALLPOLL
1763 mutex_enter(&stp->sd_lock);
1764 break;
1765 }
1766 ASSERT(MUTEX_HELD(&stp->sd_lock));
1767
1768 /* By default generate superset of signals */
1769 signals = (firstmsgsigs | allmsgsigs);
1770
1771 /*
1772 * The proto and misc functions can return multiple messages
1773 * as a b_next chain. Such messages are processed separately.
1774 */
1775 one_more:
1776 hipri_sig = 0;
1777 if (bp == NULL) {
1778 nextbp = NULL;
1779 } else {
1780 nextbp = bp->b_next;
1781 bp->b_next = NULL;
1782
1783 switch (bp->b_datap->db_type) {
1784 case M_PCPROTO:
1785 /*
1786 * Only one priority protocol message is allowed at the
1787 * stream head at a time.
1788 */
1789 if (stp->sd_flag & STRPRI) {
1790 TRACE_0(TR_FAC_STREAMS_FR, TR_STRRPUT_PROTERR,
1791 "M_PCPROTO already at head");
1792 freemsg(bp);
1793 mutex_exit(&stp->sd_lock);
1794 goto done;
1795 }
1796 stp->sd_flag |= STRPRI;
1797 hipri_sig = 1;
1798 /* FALLTHRU */
1799 case M_DATA:
1800 case M_PROTO:
1801 case M_PASSFP:
1802 band = bp->b_band;
1803 /*
1804 * Marking doesn't work well when messages
1805 * are marked in more than one band. We only
1806 * remember the last message received, even if
1807 * it is placed on the queue ahead of other
1808 * marked messages.
1809 */
1810 if (bp->b_flag & MSGMARK)
1811 stp->sd_mark = bp;
1812 (void) putq(q, bp);
1813
1814 /*
1815 * If message is a PCPROTO message, always use
1816 * firstmsgsigs to determine if a signal should be
1817 * sent as strrput is the only place to send
1818 * signals for PCPROTO. Other messages are based on
1819 * the STRGETINPROG flag. The flag determines if
1820 * strrput or (k)strgetmsg will be responsible for
1821 * sending the signals, in the firstmsgsigs case.
1822 */
1823 if ((hipri_sig == 1) ||
1824 (((stp->sd_flag & STRGETINPROG) == 0) &&
1825 (q->q_first == bp)))
1826 signals = (firstmsgsigs | allmsgsigs);
1827 else
1828 signals = allmsgsigs;
1829 break;
1830
1831 default:
1832 mutex_exit(&stp->sd_lock);
1833 (void) strrput_nondata(q, bp);
1834 mutex_enter(&stp->sd_lock);
1835 break;
1836 }
1837 }
1838 ASSERT(MUTEX_HELD(&stp->sd_lock));
1839 /*
1840 * Wake sleeping read/getmsg and cancel deferred wakeup
1841 */
1842 if (wakeups & RSLEEP)
1843 stp->sd_wakeq &= ~RSLEEP;
1844
1845 wakeups &= stp->sd_flag;
1846 if (wakeups & RSLEEP) {
1847 stp->sd_flag &= ~RSLEEP;
1848 cv_broadcast(&q->q_wait);
1849 }
1850 if (wakeups & WSLEEP) {
1851 stp->sd_flag &= ~WSLEEP;
1852 cv_broadcast(&_WR(q)->q_wait);
1853 }
1854
1855 if (pollwakeups != 0) {
1856 if (pollwakeups == (POLLIN | POLLRDNORM)) {
1857 /*
1858 * Can't use rput_opt since it was not
1859 * read when sd_lock was held and SR_POLLIN is changed
1860 * by strpoll() under sd_lock.
1861 */
1862 if (!(stp->sd_rput_opt & SR_POLLIN))
1863 goto no_pollwake;
1864 stp->sd_rput_opt &= ~SR_POLLIN;
1865 }
1866 mutex_exit(&stp->sd_lock);
1867 pollwakeup(&stp->sd_pollist, pollwakeups);
1868 mutex_enter(&stp->sd_lock);
1869 }
1870 no_pollwake:
1871
1872 /*
1873 * strsendsig can handle multiple signals with a
1874 * single call.
1875 */
1876 if (stp->sd_sigflags & signals)
1877 strsendsig(stp->sd_siglist, signals, band, 0);
1878 mutex_exit(&stp->sd_lock);
1879
1880
1881 done:
1882 if (nextbp == NULL)
1883 return (0);
1884
1885 /*
1886 * Any signals were handled the first time.
1887 * Wakeups and pollwakeups are redone to avoid any race
1888 * conditions - all the messages are not queued until the
1889 * last message has been processed by strrput.
1890 */
1891 bp = nextbp;
1892 signals = firstmsgsigs = allmsgsigs = 0;
1893 mutex_enter(&stp->sd_lock);
1894 goto one_more;
1895 }
1896
1897 static void
1898 log_dupioc(queue_t *rq, mblk_t *bp)
1899 {
1900 queue_t *wq, *qp;
1901 char *modnames, *mnp, *dname;
1902 size_t maxmodstr;
1903 boolean_t islast;
1904
1905 /*
1906 * Allocate a buffer large enough to hold the names of nstrpush modules
1907 * and one driver, with spaces between and NUL terminator. If we can't
1908 * get memory, then we'll just log the driver name.
1909 */
1910 maxmodstr = nstrpush * (FMNAMESZ + 1);
1911 mnp = modnames = kmem_alloc(maxmodstr, KM_NOSLEEP);
1912
1913 /* march down write side to print log message down to the driver */
1914 wq = WR(rq);
1915
1916 /* make sure q_next doesn't shift around while we're grabbing data */
1917 claimstr(wq);
1918 qp = wq->q_next;
1919 do {
1920 dname = Q2NAME(qp);
1921 islast = !SAMESTR(qp) || qp->q_next == NULL;
1922 if (modnames == NULL) {
1923 /*
1924 * If we don't have memory, then get the driver name in
1925 * the log where we can see it. Note that memory
1926 * pressure is a possible cause of these sorts of bugs.
1927 */
1928 if (islast) {
1929 modnames = dname;
1930 maxmodstr = 0;
1931 }
1932 } else {
1933 mnp += snprintf(mnp, FMNAMESZ + 1, "%s", dname);
1934 if (!islast)
1935 *mnp++ = ' ';
1936 }
1937 qp = qp->q_next;
1938 } while (!islast);
1939 releasestr(wq);
1940 /* Cannot happen unless stream head is corrupt. */
1941 ASSERT(modnames != NULL);
1942 (void) strlog(rq->q_qinfo->qi_minfo->mi_idnum, 0, 1,
1943 SL_CONSOLE|SL_TRACE|SL_ERROR,
1944 "Warning: stream %p received duplicate %X M_IOC%s; module list: %s",
1945 rq->q_ptr, ((struct iocblk *)bp->b_rptr)->ioc_cmd,
1946 (DB_TYPE(bp) == M_IOCACK ? "ACK" : "NAK"), modnames);
1947 if (maxmodstr != 0)
1948 kmem_free(modnames, maxmodstr);
1949 }
1950
1951 int
1952 strrput_nondata(queue_t *q, mblk_t *bp)
1953 {
1954 struct stdata *stp;
1955 struct iocblk *iocbp;
1956 struct stroptions *sop;
1957 struct copyreq *reqp;
1958 struct copyresp *resp;
1959 unsigned char bpri;
1960 unsigned char flushed_already = 0;
1961
1962 stp = (struct stdata *)q->q_ptr;
1963
1964 ASSERT(!(stp->sd_flag & STPLEX));
1965 ASSERT(qclaimed(q));
1966
1967 switch (bp->b_datap->db_type) {
1968 case M_ERROR:
1969 /*
1970 * An error has occurred downstream, the errno is in the first
1971 * bytes of the message.
1972 */
1973 if ((bp->b_wptr - bp->b_rptr) == 2) { /* New flavor */
1974 unsigned char rw = 0;
1975
1976 mutex_enter(&stp->sd_lock);
1977 if (*bp->b_rptr != NOERROR) { /* read error */
1978 if (*bp->b_rptr != 0) {
1979 if (stp->sd_flag & STRDERR)
1980 flushed_already |= FLUSHR;
1981 stp->sd_flag |= STRDERR;
1982 rw |= FLUSHR;
1983 } else {
1984 stp->sd_flag &= ~STRDERR;
1985 }
1986 stp->sd_rerror = *bp->b_rptr;
1987 }
1988 bp->b_rptr++;
1989 if (*bp->b_rptr != NOERROR) { /* write error */
1990 if (*bp->b_rptr != 0) {
1991 if (stp->sd_flag & STWRERR)
1992 flushed_already |= FLUSHW;
1993 stp->sd_flag |= STWRERR;
1994 rw |= FLUSHW;
1995 } else {
1996 stp->sd_flag &= ~STWRERR;
1997 }
1998 stp->sd_werror = *bp->b_rptr;
1999 }
2000 if (rw) {
2001 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_WAKE,
2002 "strrput cv_broadcast:q %p, bp %p",
2003 q, bp);
2004 cv_broadcast(&q->q_wait); /* readers */
2005 cv_broadcast(&_WR(q)->q_wait); /* writers */
2006 cv_broadcast(&stp->sd_monitor); /* ioctllers */
2007
2008 mutex_exit(&stp->sd_lock);
2009 pollwakeup(&stp->sd_pollist, POLLERR);
2010 mutex_enter(&stp->sd_lock);
2011
2012 if (stp->sd_sigflags & S_ERROR)
2013 strsendsig(stp->sd_siglist, S_ERROR, 0,
2014 ((rw & FLUSHR) ? stp->sd_rerror :
2015 stp->sd_werror));
2016 mutex_exit(&stp->sd_lock);
2017 /*
2018 * Send the M_FLUSH only
2019 * for the first M_ERROR
2020 * message on the stream
2021 */
2022 if (flushed_already == rw) {
2023 freemsg(bp);
2024 return (0);
2025 }
2026
2027 bp->b_datap->db_type = M_FLUSH;
2028 *bp->b_rptr = rw;
2029 bp->b_wptr = bp->b_rptr + 1;
2030 /*
2031 * Protect against the driver
2032 * passing up messages after
2033 * it has done a qprocsoff
2034 */
2035 if (_OTHERQ(q)->q_next == NULL)
2036 freemsg(bp);
2037 else
2038 qreply(q, bp);
2039 return (0);
2040 } else
2041 mutex_exit(&stp->sd_lock);
2042 } else if (*bp->b_rptr != 0) { /* Old flavor */
2043 if (stp->sd_flag & (STRDERR|STWRERR))
2044 flushed_already = FLUSHRW;
2045 mutex_enter(&stp->sd_lock);
2046 stp->sd_flag |= (STRDERR|STWRERR);
2047 stp->sd_rerror = *bp->b_rptr;
2048 stp->sd_werror = *bp->b_rptr;
2049 TRACE_2(TR_FAC_STREAMS_FR,
2050 TR_STRRPUT_WAKE2,
2051 "strrput wakeup #2:q %p, bp %p", q, bp);
2052 cv_broadcast(&q->q_wait); /* the readers */
2053 cv_broadcast(&_WR(q)->q_wait); /* the writers */
2054 cv_broadcast(&stp->sd_monitor); /* ioctllers */
2055
2056 mutex_exit(&stp->sd_lock);
2057 pollwakeup(&stp->sd_pollist, POLLERR);
2058 mutex_enter(&stp->sd_lock);
2059
2060 if (stp->sd_sigflags & S_ERROR)
2061 strsendsig(stp->sd_siglist, S_ERROR, 0,
2062 (stp->sd_werror ? stp->sd_werror :
2063 stp->sd_rerror));
2064 mutex_exit(&stp->sd_lock);
2065
2066 /*
2067 * Send the M_FLUSH only
2068 * for the first M_ERROR
2069 * message on the stream
2070 */
2071 if (flushed_already != FLUSHRW) {
2072 bp->b_datap->db_type = M_FLUSH;
2073 *bp->b_rptr = FLUSHRW;
2074 /*
2075 * Protect against the driver passing up
2076 * messages after it has done a
2077 * qprocsoff.
2078 */
2079 if (_OTHERQ(q)->q_next == NULL)
2080 freemsg(bp);
2081 else
2082 qreply(q, bp);
2083 return (0);
2084 }
2085 }
2086 freemsg(bp);
2087 return (0);
2088
2089 case M_HANGUP:
2090
2091 freemsg(bp);
2092 mutex_enter(&stp->sd_lock);
2093 stp->sd_werror = ENXIO;
2094 stp->sd_flag |= STRHUP;
2095 stp->sd_flag &= ~(WSLEEP|RSLEEP);
2096
2097 /*
2098 * send signal if controlling tty
2099 */
2100
2101 if (stp->sd_sidp) {
2102 prsignal(stp->sd_sidp, SIGHUP);
2103 if (stp->sd_sidp != stp->sd_pgidp)
2104 pgsignal(stp->sd_pgidp, SIGTSTP);
2105 }
2106
2107 /*
2108 * wake up read, write, and exception pollers and
2109 * reset wakeup mechanism.
2110 */
2111 cv_broadcast(&q->q_wait); /* the readers */
2112 cv_broadcast(&_WR(q)->q_wait); /* the writers */
2113 cv_broadcast(&stp->sd_monitor); /* the ioctllers */
2114 strhup(stp);
2115 mutex_exit(&stp->sd_lock);
2116 return (0);
2117
2118 case M_UNHANGUP:
2119 freemsg(bp);
2120 mutex_enter(&stp->sd_lock);
2121 stp->sd_werror = 0;
2122 stp->sd_flag &= ~STRHUP;
2123 mutex_exit(&stp->sd_lock);
2124 return (0);
2125
2126 case M_SIG:
2127 /*
2128 * Someone downstream wants to post a signal. The
2129 * signal to post is contained in the first byte of the
2130 * message. If the message would go on the front of
2131 * the queue, send a signal to the process group
2132 * (if not SIGPOLL) or to the siglist processes
2133 * (SIGPOLL). If something is already on the queue,
2134 * OR if we are delivering a delayed suspend (*sigh*
2135 * another "tty" hack) and there's no one sleeping already,
2136 * just enqueue the message.
2137 */
2138 mutex_enter(&stp->sd_lock);
2139 if (q->q_first || (*bp->b_rptr == SIGTSTP &&
2140 !(stp->sd_flag & RSLEEP))) {
2141 (void) putq(q, bp);
2142 mutex_exit(&stp->sd_lock);
2143 return (0);
2144 }
2145 mutex_exit(&stp->sd_lock);
2146 /* FALLTHRU */
2147
2148 case M_PCSIG:
2149 /*
2150 * Don't enqueue, just post the signal.
2151 */
2152 strsignal(stp, *bp->b_rptr, 0L);
2153 freemsg(bp);
2154 return (0);
2155
2156 case M_CMD:
2157 if (MBLKL(bp) != sizeof (cmdblk_t)) {
2158 freemsg(bp);
2159 return (0);
2160 }
2161
2162 mutex_enter(&stp->sd_lock);
2163 if (stp->sd_flag & STRCMDWAIT) {
2164 ASSERT(stp->sd_cmdblk == NULL);
2165 stp->sd_cmdblk = bp;
2166 cv_broadcast(&stp->sd_monitor);
2167 mutex_exit(&stp->sd_lock);
2168 } else {
2169 mutex_exit(&stp->sd_lock);
2170 freemsg(bp);
2171 }
2172 return (0);
2173
2174 case M_FLUSH:
2175 /*
2176 * Flush queues. The indication of which queues to flush
2177 * is in the first byte of the message. If the read queue
2178 * is specified, then flush it. If FLUSHBAND is set, just
2179 * flush the band specified by the second byte of the message.
2180 *
2181 * If a module has issued a M_SETOPT to not flush hi
2182 * priority messages off of the stream head, then pass this
2183 * flag into the flushq code to preserve such messages.
2184 */
2185
2186 if (*bp->b_rptr & FLUSHR) {
2187 mutex_enter(&stp->sd_lock);
2188 if (*bp->b_rptr & FLUSHBAND) {
2189 ASSERT((bp->b_wptr - bp->b_rptr) >= 2);
2190 flushband(q, *(bp->b_rptr + 1), FLUSHALL);
2191 } else
2192 flushq_common(q, FLUSHALL,
2193 stp->sd_read_opt & RFLUSHPCPROT);
2194 if ((q->q_first == NULL) ||
2195 (q->q_first->b_datap->db_type < QPCTL))
2196 stp->sd_flag &= ~STRPRI;
2197 else {
2198 ASSERT(stp->sd_flag & STRPRI);
2199 }
2200 mutex_exit(&stp->sd_lock);
2201 }
2202 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) {
2203 *bp->b_rptr &= ~FLUSHR;
2204 bp->b_flag |= MSGNOLOOP;
2205 /*
2206 * Protect against the driver passing up
2207 * messages after it has done a qprocsoff.
2208 */
2209 if (_OTHERQ(q)->q_next == NULL)
2210 freemsg(bp);
2211 else
2212 qreply(q, bp);
2213 return (0);
2214 }
2215 freemsg(bp);
2216 return (0);
2217
2218 case M_IOCACK:
2219 case M_IOCNAK:
2220 iocbp = (struct iocblk *)bp->b_rptr;
2221 /*
2222 * If not waiting for ACK or NAK then just free msg.
2223 * If incorrect id sequence number then just free msg.
2224 * If already have ACK or NAK for user then this is a
2225 * duplicate, display a warning and free the msg.
2226 */
2227 mutex_enter(&stp->sd_lock);
2228 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk ||
2229 (stp->sd_iocid != iocbp->ioc_id)) {
2230 /*
2231 * If the ACK/NAK is a dup, display a message
2232 * Dup is when sd_iocid == ioc_id, and
2233 * sd_iocblk == <valid ptr> or -1 (the former
2234 * is when an ioctl has been put on the stream
2235 * head, but has not yet been consumed, the
2236 * later is when it has been consumed).
2237 */
2238 if ((stp->sd_iocid == iocbp->ioc_id) &&
2239 (stp->sd_iocblk != NULL)) {
2240 log_dupioc(q, bp);
2241 }
2242 freemsg(bp);
2243 mutex_exit(&stp->sd_lock);
2244 return (0);
2245 }
2246
2247 /*
2248 * Assign ACK or NAK to user and wake up.
2249 */
2250 stp->sd_iocblk = bp;
2251 cv_broadcast(&stp->sd_monitor);
2252 mutex_exit(&stp->sd_lock);
2253 return (0);
2254
2255 case M_COPYIN:
2256 case M_COPYOUT:
2257 reqp = (struct copyreq *)bp->b_rptr;
2258
2259 /*
2260 * If not waiting for ACK or NAK then just fail request.
2261 * If already have ACK, NAK, or copy request, then just
2262 * fail request.
2263 * If incorrect id sequence number then just fail request.
2264 */
2265 mutex_enter(&stp->sd_lock);
2266 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk ||
2267 (stp->sd_iocid != reqp->cq_id)) {
2268 if (bp->b_cont) {
2269 freemsg(bp->b_cont);
2270 bp->b_cont = NULL;
2271 }
2272 bp->b_datap->db_type = M_IOCDATA;
2273 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
2274 resp = (struct copyresp *)bp->b_rptr;
2275 resp->cp_rval = (caddr_t)1; /* failure */
2276 mutex_exit(&stp->sd_lock);
2277 putnext(stp->sd_wrq, bp);
2278 return (0);
2279 }
2280
2281 /*
2282 * Assign copy request to user and wake up.
2283 */
2284 stp->sd_iocblk = bp;
2285 cv_broadcast(&stp->sd_monitor);
2286 mutex_exit(&stp->sd_lock);
2287 return (0);
2288
2289 case M_SETOPTS:
2290 /*
2291 * Set stream head options (read option, write offset,
2292 * min/max packet size, and/or high/low water marks for
2293 * the read side only).
2294 */
2295
2296 bpri = 0;
2297 sop = (struct stroptions *)bp->b_rptr;
2298 mutex_enter(&stp->sd_lock);
2299 if (sop->so_flags & SO_READOPT) {
2300 switch (sop->so_readopt & RMODEMASK) {
2301 case RNORM:
2302 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS);
2303 break;
2304
2305 case RMSGD:
2306 stp->sd_read_opt =
2307 ((stp->sd_read_opt & ~RD_MSGNODIS) |
2308 RD_MSGDIS);
2309 break;
2310
2311 case RMSGN:
2312 stp->sd_read_opt =
2313 ((stp->sd_read_opt & ~RD_MSGDIS) |
2314 RD_MSGNODIS);
2315 break;
2316 }
2317 switch (sop->so_readopt & RPROTMASK) {
2318 case RPROTNORM:
2319 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS);
2320 break;
2321
2322 case RPROTDAT:
2323 stp->sd_read_opt =
2324 ((stp->sd_read_opt & ~RD_PROTDIS) |
2325 RD_PROTDAT);
2326 break;
2327
2328 case RPROTDIS:
2329 stp->sd_read_opt =
2330 ((stp->sd_read_opt & ~RD_PROTDAT) |
2331 RD_PROTDIS);
2332 break;
2333 }
2334 switch (sop->so_readopt & RFLUSHMASK) {
2335 case RFLUSHPCPROT:
2336 /*
2337 * This sets the stream head to NOT flush
2338 * M_PCPROTO messages.
2339 */
2340 stp->sd_read_opt |= RFLUSHPCPROT;
2341 break;
2342 }
2343 }
2344 if (sop->so_flags & SO_ERROPT) {
2345 switch (sop->so_erropt & RERRMASK) {
2346 case RERRNORM:
2347 stp->sd_flag &= ~STRDERRNONPERSIST;
2348 break;
2349 case RERRNONPERSIST:
2350 stp->sd_flag |= STRDERRNONPERSIST;
2351 break;
2352 }
2353 switch (sop->so_erropt & WERRMASK) {
2354 case WERRNORM:
2355 stp->sd_flag &= ~STWRERRNONPERSIST;
2356 break;
2357 case WERRNONPERSIST:
2358 stp->sd_flag |= STWRERRNONPERSIST;
2359 break;
2360 }
2361 }
2362 if (sop->so_flags & SO_COPYOPT) {
2363 if (sop->so_copyopt & ZCVMSAFE) {
2364 stp->sd_copyflag |= STZCVMSAFE;
2365 stp->sd_copyflag &= ~STZCVMUNSAFE;
2366 } else if (sop->so_copyopt & ZCVMUNSAFE) {
2367 stp->sd_copyflag |= STZCVMUNSAFE;
2368 stp->sd_copyflag &= ~STZCVMSAFE;
2369 }
2370
2371 if (sop->so_copyopt & COPYCACHED) {
2372 stp->sd_copyflag |= STRCOPYCACHED;
2373 }
2374 }
2375 if (sop->so_flags & SO_WROFF)
2376 stp->sd_wroff = sop->so_wroff;
2377 if (sop->so_flags & SO_TAIL)
2378 stp->sd_tail = sop->so_tail;
2379 if (sop->so_flags & SO_MINPSZ)
2380 q->q_minpsz = sop->so_minpsz;
2381 if (sop->so_flags & SO_MAXPSZ)
2382 q->q_maxpsz = sop->so_maxpsz;
2383 if (sop->so_flags & SO_MAXBLK)
2384 stp->sd_maxblk = sop->so_maxblk;
2385 if (sop->so_flags & SO_HIWAT) {
2386 if (sop->so_flags & SO_BAND) {
2387 if (strqset(q, QHIWAT,
2388 sop->so_band, sop->so_hiwat)) {
2389 cmn_err(CE_WARN, "strrput: could not "
2390 "allocate qband\n");
2391 } else {
2392 bpri = sop->so_band;
2393 }
2394 } else {
2395 q->q_hiwat = sop->so_hiwat;
2396 }
2397 }
2398 if (sop->so_flags & SO_LOWAT) {
2399 if (sop->so_flags & SO_BAND) {
2400 if (strqset(q, QLOWAT,
2401 sop->so_band, sop->so_lowat)) {
2402 cmn_err(CE_WARN, "strrput: could not "
2403 "allocate qband\n");
2404 } else {
2405 bpri = sop->so_band;
2406 }
2407 } else {
2408 q->q_lowat = sop->so_lowat;
2409 }
2410 }
2411 if (sop->so_flags & SO_MREADON)
2412 stp->sd_flag |= SNDMREAD;
2413 if (sop->so_flags & SO_MREADOFF)
2414 stp->sd_flag &= ~SNDMREAD;
2415 if (sop->so_flags & SO_NDELON)
2416 stp->sd_flag |= OLDNDELAY;
2417 if (sop->so_flags & SO_NDELOFF)
2418 stp->sd_flag &= ~OLDNDELAY;
2419 if (sop->so_flags & SO_ISTTY)
2420 stp->sd_flag |= STRISTTY;
2421 if (sop->so_flags & SO_ISNTTY)
2422 stp->sd_flag &= ~STRISTTY;
2423 if (sop->so_flags & SO_TOSTOP)
2424 stp->sd_flag |= STRTOSTOP;
2425 if (sop->so_flags & SO_TONSTOP)
2426 stp->sd_flag &= ~STRTOSTOP;
2427 if (sop->so_flags & SO_DELIM)
2428 stp->sd_flag |= STRDELIM;
2429 if (sop->so_flags & SO_NODELIM)
2430 stp->sd_flag &= ~STRDELIM;
2431
2432 mutex_exit(&stp->sd_lock);
2433 freemsg(bp);
2434
2435 /* Check backenable in case the water marks changed */
2436 qbackenable(q, bpri);
2437 return (0);
2438
2439 /*
2440 * The following set of cases deal with situations where two stream
2441 * heads are connected to each other (twisted streams). These messages
2442 * have no meaning at the stream head.
2443 */
2444 case M_BREAK:
2445 case M_CTL:
2446 case M_DELAY:
2447 case M_START:
2448 case M_STOP:
2449 case M_IOCDATA:
2450 case M_STARTI:
2451 case M_STOPI:
2452 freemsg(bp);
2453 return (0);
2454
2455 case M_IOCTL:
2456 /*
2457 * Always NAK this condition
2458 * (makes no sense)
2459 * If there is one or more threads in the read side
2460 * rwnext we have to defer the nacking until that thread
2461 * returns (in strget).
2462 */
2463 mutex_enter(&stp->sd_lock);
2464 if (stp->sd_struiodnak != 0) {
2465 /*
2466 * Defer NAK to the streamhead. Queue at the end
2467 * the list.
2468 */
2469 mblk_t *mp = stp->sd_struionak;
2470
2471 while (mp && mp->b_next)
2472 mp = mp->b_next;
2473 if (mp)
2474 mp->b_next = bp;
2475 else
2476 stp->sd_struionak = bp;
2477 bp->b_next = NULL;
2478 mutex_exit(&stp->sd_lock);
2479 return (0);
2480 }
2481 mutex_exit(&stp->sd_lock);
2482
2483 bp->b_datap->db_type = M_IOCNAK;
2484 /*
2485 * Protect against the driver passing up
2486 * messages after it has done a qprocsoff.
2487 */
2488 if (_OTHERQ(q)->q_next == NULL)
2489 freemsg(bp);
2490 else
2491 qreply(q, bp);
2492 return (0);
2493
2494 default:
2495 #ifdef DEBUG
2496 cmn_err(CE_WARN,
2497 "bad message type %x received at stream head\n",
2498 bp->b_datap->db_type);
2499 #endif
2500 freemsg(bp);
2501 return (0);
2502 }
2503
2504 /* NOTREACHED */
2505 }
2506
2507 /*
2508 * Check if the stream pointed to by `stp' can be written to, and return an
2509 * error code if not. If `eiohup' is set, then return EIO if STRHUP is set.
2510 * If `sigpipeok' is set and the SW_SIGPIPE option is enabled on the stream,
2511 * then always return EPIPE and send a SIGPIPE to the invoking thread.
2512 */
2513 static int
2514 strwriteable(struct stdata *stp, boolean_t eiohup, boolean_t sigpipeok)
2515 {
2516 int error;
2517
2518 ASSERT(MUTEX_HELD(&stp->sd_lock));
2519
2520 /*
2521 * For modem support, POSIX states that on writes, EIO should
2522 * be returned if the stream has been hung up.
2523 */
2524 if (eiohup && (stp->sd_flag & (STPLEX|STRHUP)) == STRHUP)
2525 error = EIO;
2526 else
2527 error = strgeterr(stp, STRHUP|STPLEX|STWRERR, 0);
2528
2529 if (error != 0) {
2530 if (!(stp->sd_flag & STPLEX) &&
2531 (stp->sd_wput_opt & SW_SIGPIPE) && sigpipeok) {
2532 tsignal(curthread, SIGPIPE);
2533 error = EPIPE;
2534 }
2535 }
2536
2537 return (error);
2538 }
2539
2540 /*
2541 * Copyin and send data down a stream.
2542 * The caller will allocate and copyin any control part that precedes the
2543 * message and pass that in as mctl.
2544 *
2545 * Caller should *not* hold sd_lock.
2546 * When EWOULDBLOCK is returned the caller has to redo the canputnext
2547 * under sd_lock in order to avoid missing a backenabling wakeup.
2548 *
2549 * Use iosize = -1 to not send any M_DATA. iosize = 0 sends zero-length M_DATA.
2550 *
2551 * Set MSG_IGNFLOW in flags to ignore flow control for hipri messages.
2552 * For sync streams we can only ignore flow control by reverting to using
2553 * putnext.
2554 *
2555 * If sd_maxblk is less than *iosize this routine might return without
2556 * transferring all of *iosize. In all cases, on return *iosize will contain
2557 * the amount of data that was transferred.
2558 */
2559 static int
2560 strput(struct stdata *stp, mblk_t *mctl, struct uio *uiop, ssize_t *iosize,
2561 int b_flag, int pri, int flags)
2562 {
2563 struiod_t uiod;
2564 struct iovec buf[IOV_MAX_STACK];
2565 int iovlen = 0;
2566 mblk_t *mp;
2567 queue_t *wqp = stp->sd_wrq;
2568 int error = 0;
2569 ssize_t count = *iosize;
2570
2571 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
2572
2573 if (uiop != NULL && count >= 0)
2574 flags |= stp->sd_struiowrq ? STRUIO_POSTPONE : 0;
2575
2576 if (!(flags & STRUIO_POSTPONE)) {
2577 /*
2578 * Use regular canputnext, strmakedata, putnext sequence.
2579 */
2580 if (pri == 0) {
2581 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) {
2582 freemsg(mctl);
2583 return (EWOULDBLOCK);
2584 }
2585 } else {
2586 if (!(flags & MSG_IGNFLOW) && !bcanputnext(wqp, pri)) {
2587 freemsg(mctl);
2588 return (EWOULDBLOCK);
2589 }
2590 }
2591
2592 if ((error = strmakedata(iosize, uiop, stp, flags,
2593 &mp)) != 0) {
2594 freemsg(mctl);
2595 /*
2596 * need to change return code to ENOMEM
2597 * so that this is not confused with
2598 * flow control, EAGAIN.
2599 */
2600
2601 if (error == EAGAIN)
2602 return (ENOMEM);
2603 else
2604 return (error);
2605 }
2606 if (mctl != NULL) {
2607 if (mctl->b_cont == NULL)
2608 mctl->b_cont = mp;
2609 else if (mp != NULL)
2610 linkb(mctl, mp);
2611 mp = mctl;
2612 } else if (mp == NULL)
2613 return (0);
2614
2615 mp->b_flag |= b_flag;
2616 mp->b_band = (uchar_t)pri;
2617
2618 if (flags & MSG_IGNFLOW) {
2619 /*
2620 * XXX Hack: Don't get stuck running service
2621 * procedures. This is needed for sockfs when
2622 * sending the unbind message out of the rput
2623 * procedure - we don't want a put procedure
2624 * to run service procedures.
2625 */
2626 putnext(wqp, mp);
2627 } else {
2628 stream_willservice(stp);
2629 putnext(wqp, mp);
2630 stream_runservice(stp);
2631 }
2632 return (0);
2633 }
2634 /*
2635 * Stream supports rwnext() for the write side.
2636 */
2637 if ((error = strmakedata(iosize, uiop, stp, flags, &mp)) != 0) {
2638 freemsg(mctl);
2639 /*
2640 * map EAGAIN to ENOMEM since EAGAIN means "flow controlled".
2641 */
2642 return (error == EAGAIN ? ENOMEM : error);
2643 }
2644 if (mctl != NULL) {
2645 if (mctl->b_cont == NULL)
2646 mctl->b_cont = mp;
2647 else if (mp != NULL)
2648 linkb(mctl, mp);
2649 mp = mctl;
2650 } else if (mp == NULL) {
2651 return (0);
2652 }
2653
2654 mp->b_flag |= b_flag;
2655 mp->b_band = (uchar_t)pri;
2656
2657 if (uiop->uio_iovcnt > IOV_MAX_STACK) {
2658 iovlen = uiop->uio_iovcnt * sizeof (iovec_t);
2659 uiod.d_iov = (struct iovec *)kmem_alloc(iovlen, KM_SLEEP);
2660 } else {
2661 uiod.d_iov = buf;
2662 }
2663
2664 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, uiop->uio_iovcnt);
2665 uiod.d_uio.uio_offset = 0;
2666 uiod.d_mp = mp;
2667 error = rwnext(wqp, &uiod);
2668 if (! uiod.d_mp) {
2669 uioskip(uiop, *iosize);
2670 if (iovlen != 0)
2671 kmem_free(uiod.d_iov, iovlen);
2672 return (error);
2673 }
2674 ASSERT(mp == uiod.d_mp);
2675 if (error == EINVAL) {
2676 /*
2677 * The stream plumbing must have changed while
2678 * we were away, so just turn off rwnext()s.
2679 */
2680 error = 0;
2681 } else if (error == EBUSY || error == EWOULDBLOCK) {
2682 /*
2683 * Couldn't enter a perimeter or took a page fault,
2684 * so fall-back to putnext().
2685 */
2686 error = 0;
2687 } else {
2688 freemsg(mp);
2689 if (iovlen != 0)
2690 kmem_free(uiod.d_iov, iovlen);
2691 return (error);
2692 }
2693 /* Have to check canput before consuming data from the uio */
2694 if (pri == 0) {
2695 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) {
2696 freemsg(mp);
2697 if (iovlen != 0)
2698 kmem_free(uiod.d_iov, iovlen);
2699 return (EWOULDBLOCK);
2700 }
2701 } else {
2702 if (!bcanputnext(wqp, pri) && !(flags & MSG_IGNFLOW)) {
2703 freemsg(mp);
2704 if (iovlen != 0)
2705 kmem_free(uiod.d_iov, iovlen);
2706 return (EWOULDBLOCK);
2707 }
2708 }
2709 ASSERT(mp == uiod.d_mp);
2710 /* Copyin data from the uio */
2711 if ((error = struioget(wqp, mp, &uiod, 0)) != 0) {
2712 freemsg(mp);
2713 if (iovlen != 0)
2714 kmem_free(uiod.d_iov, iovlen);
2715 return (error);
2716 }
2717 uioskip(uiop, *iosize);
2718 if (flags & MSG_IGNFLOW) {
2719 /*
2720 * XXX Hack: Don't get stuck running service procedures.
2721 * This is needed for sockfs when sending the unbind message
2722 * out of the rput procedure - we don't want a put procedure
2723 * to run service procedures.
2724 */
2725 putnext(wqp, mp);
2726 } else {
2727 stream_willservice(stp);
2728 putnext(wqp, mp);
2729 stream_runservice(stp);
2730 }
2731 if (iovlen != 0)
2732 kmem_free(uiod.d_iov, iovlen);
2733 return (0);
2734 }
2735
2736 /*
2737 * Write attempts to break the write request into messages conforming
2738 * with the minimum and maximum packet sizes set downstream.
2739 *
2740 * Write will not block if downstream queue is full and
2741 * O_NDELAY is set, otherwise it will block waiting for the queue to get room.
2742 *
2743 * A write of zero bytes gets packaged into a zero length message and sent
2744 * downstream like any other message.
2745 *
2746 * If buffers of the requested sizes are not available, the write will
2747 * sleep until the buffers become available.
2748 *
2749 * Write (if specified) will supply a write offset in a message if it
2750 * makes sense. This can be specified by downstream modules as part of
2751 * a M_SETOPTS message. Write will not supply the write offset if it
2752 * cannot supply any data in a buffer. In other words, write will never
2753 * send down an empty packet due to a write offset.
2754 */
2755 /* ARGSUSED2 */
2756 int
2757 strwrite(struct vnode *vp, struct uio *uiop, cred_t *crp)
2758 {
2759 return (strwrite_common(vp, uiop, crp, 0));
2760 }
2761
2762 /* ARGSUSED2 */
2763 int
2764 strwrite_common(struct vnode *vp, struct uio *uiop, cred_t *crp, int wflag)
2765 {
2766 struct stdata *stp;
2767 struct queue *wqp;
2768 ssize_t rmin, rmax;
2769 ssize_t iosize;
2770 int waitflag;
2771 int tempmode;
2772 int error = 0;
2773 int b_flag;
2774
2775 ASSERT(vp->v_stream);
2776 stp = vp->v_stream;
2777
2778 mutex_enter(&stp->sd_lock);
2779
2780 if ((error = i_straccess(stp, JCWRITE)) != 0) {
2781 mutex_exit(&stp->sd_lock);
2782 return (error);
2783 }
2784
2785 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
2786 error = strwriteable(stp, B_TRUE, B_TRUE);
2787 if (error != 0) {
2788 mutex_exit(&stp->sd_lock);
2789 return (error);
2790 }
2791 }
2792
2793 mutex_exit(&stp->sd_lock);
2794
2795 wqp = stp->sd_wrq;
2796
2797 /* get these values from them cached in the stream head */
2798 rmin = stp->sd_qn_minpsz;
2799 rmax = stp->sd_qn_maxpsz;
2800
2801 /*
2802 * Check the min/max packet size constraints. If min packet size
2803 * is non-zero, the write cannot be split into multiple messages
2804 * and still guarantee the size constraints.
2805 */
2806 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_IN, "strwrite in:q %p", wqp);
2807
2808 ASSERT((rmax >= 0) || (rmax == INFPSZ));
2809 if (rmax == 0) {
2810 return (0);
2811 }
2812 if (rmin > 0) {
2813 if (uiop->uio_resid < rmin) {
2814 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2815 "strwrite out:q %p out %d error %d",
2816 wqp, 0, ERANGE);
2817 return (ERANGE);
2818 }
2819 if ((rmax != INFPSZ) && (uiop->uio_resid > rmax)) {
2820 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2821 "strwrite out:q %p out %d error %d",
2822 wqp, 1, ERANGE);
2823 return (ERANGE);
2824 }
2825 }
2826
2827 /*
2828 * Do until count satisfied or error.
2829 */
2830 waitflag = WRITEWAIT | wflag;
2831 if (stp->sd_flag & OLDNDELAY)
2832 tempmode = uiop->uio_fmode & ~FNDELAY;
2833 else
2834 tempmode = uiop->uio_fmode;
2835
2836 if (rmax == INFPSZ)
2837 rmax = uiop->uio_resid;
2838
2839 /*
2840 * Note that tempmode does not get used in strput/strmakedata
2841 * but only in strwaitq. The other routines use uio_fmode
2842 * unmodified.
2843 */
2844
2845 /* LINTED: constant in conditional context */
2846 while (1) { /* breaks when uio_resid reaches zero */
2847 /*
2848 * Determine the size of the next message to be
2849 * packaged. May have to break write into several
2850 * messages based on max packet size.
2851 */
2852 iosize = MIN(uiop->uio_resid, rmax);
2853
2854 /*
2855 * Put block downstream when flow control allows it.
2856 */
2857 if ((stp->sd_flag & STRDELIM) && (uiop->uio_resid == iosize))
2858 b_flag = MSGDELIM;
2859 else
2860 b_flag = 0;
2861
2862 for (;;) {
2863 int done = 0;
2864
2865 error = strput(stp, NULL, uiop, &iosize, b_flag, 0, 0);
2866 if (error == 0)
2867 break;
2868 if (error != EWOULDBLOCK)
2869 goto out;
2870
2871 mutex_enter(&stp->sd_lock);
2872 /*
2873 * Check for a missed wakeup.
2874 * Needed since strput did not hold sd_lock across
2875 * the canputnext.
2876 */
2877 if (canputnext(wqp)) {
2878 /* Try again */
2879 mutex_exit(&stp->sd_lock);
2880 continue;
2881 }
2882 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAIT,
2883 "strwrite wait:q %p wait", wqp);
2884 if ((error = strwaitq(stp, waitflag, (ssize_t)0,
2885 tempmode, -1, &done)) != 0 || done) {
2886 mutex_exit(&stp->sd_lock);
2887 if ((vp->v_type == VFIFO) &&
2888 (uiop->uio_fmode & FNDELAY) &&
2889 (error == EAGAIN))
2890 error = 0;
2891 goto out;
2892 }
2893 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAKE,
2894 "strwrite wake:q %p awakes", wqp);
2895 if ((error = i_straccess(stp, JCWRITE)) != 0) {
2896 mutex_exit(&stp->sd_lock);
2897 goto out;
2898 }
2899 mutex_exit(&stp->sd_lock);
2900 }
2901 waitflag |= NOINTR;
2902 TRACE_2(TR_FAC_STREAMS_FR, TR_STRWRITE_RESID,
2903 "strwrite resid:q %p uiop %p", wqp, uiop);
2904 if (uiop->uio_resid) {
2905 /* Recheck for errors - needed for sockets */
2906 if ((stp->sd_wput_opt & SW_RECHECK_ERR) &&
2907 (stp->sd_flag & (STWRERR|STRHUP|STPLEX))) {
2908 mutex_enter(&stp->sd_lock);
2909 error = strwriteable(stp, B_FALSE, B_TRUE);
2910 mutex_exit(&stp->sd_lock);
2911 if (error != 0)
2912 return (error);
2913 }
2914 continue;
2915 }
2916 break;
2917 }
2918 out:
2919 /*
2920 * For historical reasons, applications expect EAGAIN when a data
2921 * mblk_t cannot be allocated, so change ENOMEM back to EAGAIN.
2922 */
2923 if (error == ENOMEM)
2924 error = EAGAIN;
2925 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2926 "strwrite out:q %p out %d error %d", wqp, 2, error);
2927 return (error);
2928 }
2929
2930 /*
2931 * Stream head write service routine.
2932 * Its job is to wake up any sleeping writers when a queue
2933 * downstream needs data (part of the flow control in putq and getq).
2934 * It also must wake anyone sleeping on a poll().
2935 * For stream head right below mux module, it must also invoke put procedure
2936 * of next downstream module.
2937 */
2938 int
2939 strwsrv(queue_t *q)
2940 {
2941 struct stdata *stp;
2942 queue_t *tq;
2943 qband_t *qbp;
2944 int i;
2945 qband_t *myqbp;
2946 int isevent;
2947 unsigned char qbf[NBAND]; /* band flushing backenable flags */
2948
2949 TRACE_1(TR_FAC_STREAMS_FR,
2950 TR_STRWSRV, "strwsrv:q %p", q);
2951 stp = (struct stdata *)q->q_ptr;
2952 ASSERT(qclaimed(q));
2953 mutex_enter(&stp->sd_lock);
2954 ASSERT(!(stp->sd_flag & STPLEX));
2955
2956 if (stp->sd_flag & WSLEEP) {
2957 stp->sd_flag &= ~WSLEEP;
2958 cv_broadcast(&q->q_wait);
2959 }
2960 mutex_exit(&stp->sd_lock);
2961
2962 /* The other end of a stream pipe went away. */
2963 if ((tq = q->q_next) == NULL) {
2964 return (0);
2965 }
2966
2967 /* Find the next module forward that has a service procedure */
2968 claimstr(q);
2969 tq = q->q_nfsrv;
2970 ASSERT(tq != NULL);
2971
2972 if ((q->q_flag & QBACK)) {
2973 if ((tq->q_flag & QFULL)) {
2974 mutex_enter(QLOCK(tq));
2975 if (!(tq->q_flag & QFULL)) {
2976 mutex_exit(QLOCK(tq));
2977 goto wakeup;
2978 }
2979 /*
2980 * The queue must have become full again. Set QWANTW
2981 * again so strwsrv will be back enabled when
2982 * the queue becomes non-full next time.
2983 */
2984 tq->q_flag |= QWANTW;
2985 mutex_exit(QLOCK(tq));
2986 } else {
2987 wakeup:
2988 pollwakeup(&stp->sd_pollist, POLLWRNORM);
2989 mutex_enter(&stp->sd_lock);
2990 if (stp->sd_sigflags & S_WRNORM)
2991 strsendsig(stp->sd_siglist, S_WRNORM, 0, 0);
2992 mutex_exit(&stp->sd_lock);
2993 }
2994 }
2995
2996 isevent = 0;
2997 i = 1;
2998 bzero((caddr_t)qbf, NBAND);
2999 mutex_enter(QLOCK(tq));
3000 if ((myqbp = q->q_bandp) != NULL)
3001 for (qbp = tq->q_bandp; qbp && myqbp; qbp = qbp->qb_next) {
3002 ASSERT(myqbp);
3003 if ((myqbp->qb_flag & QB_BACK)) {
3004 if (qbp->qb_flag & QB_FULL) {
3005 /*
3006 * The band must have become full again.
3007 * Set QB_WANTW again so strwsrv will
3008 * be back enabled when the band becomes
3009 * non-full next time.
3010 */
3011 qbp->qb_flag |= QB_WANTW;
3012 } else {
3013 isevent = 1;
3014 qbf[i] = 1;
3015 }
3016 }
3017 myqbp = myqbp->qb_next;
3018 i++;
3019 }
3020 mutex_exit(QLOCK(tq));
3021
3022 if (isevent) {
3023 for (i = tq->q_nband; i; i--) {
3024 if (qbf[i]) {
3025 pollwakeup(&stp->sd_pollist, POLLWRBAND);
3026 mutex_enter(&stp->sd_lock);
3027 if (stp->sd_sigflags & S_WRBAND)
3028 strsendsig(stp->sd_siglist, S_WRBAND,
3029 (uchar_t)i, 0);
3030 mutex_exit(&stp->sd_lock);
3031 }
3032 }
3033 }
3034
3035 releasestr(q);
3036 return (0);
3037 }
3038
3039 /*
3040 * Special case of strcopyin/strcopyout for copying
3041 * struct strioctl that can deal with both data
3042 * models.
3043 */
3044
3045 #ifdef _LP64
3046
3047 static int
3048 strcopyin_strioctl(void *from, void *to, int flag, int copyflag)
3049 {
3050 struct strioctl32 strioc32;
3051 struct strioctl *striocp;
3052
3053 if (copyflag & U_TO_K) {
3054 ASSERT((copyflag & K_TO_K) == 0);
3055
3056 if ((flag & FMODELS) == DATAMODEL_ILP32) {
3057 if (copyin(from, &strioc32, sizeof (strioc32)))
3058 return (EFAULT);
3059
3060 striocp = (struct strioctl *)to;
3061 striocp->ic_cmd = strioc32.ic_cmd;
3062 striocp->ic_timout = strioc32.ic_timout;
3063 striocp->ic_len = strioc32.ic_len;
3064 striocp->ic_dp = (char *)(uintptr_t)strioc32.ic_dp;
3065
3066 } else { /* NATIVE data model */
3067 if (copyin(from, to, sizeof (struct strioctl))) {
3068 return (EFAULT);
3069 } else {
3070 return (0);
3071 }
3072 }
3073 } else {
3074 ASSERT(copyflag & K_TO_K);
3075 bcopy(from, to, sizeof (struct strioctl));
3076 }
3077 return (0);
3078 }
3079
3080 static int
3081 strcopyout_strioctl(void *from, void *to, int flag, int copyflag)
3082 {
3083 struct strioctl32 strioc32;
3084 struct strioctl *striocp;
3085
3086 if (copyflag & U_TO_K) {
3087 ASSERT((copyflag & K_TO_K) == 0);
3088
3089 if ((flag & FMODELS) == DATAMODEL_ILP32) {
3090 striocp = (struct strioctl *)from;
3091 strioc32.ic_cmd = striocp->ic_cmd;
3092 strioc32.ic_timout = striocp->ic_timout;
3093 strioc32.ic_len = striocp->ic_len;
3094 strioc32.ic_dp = (caddr32_t)(uintptr_t)striocp->ic_dp;
3095 ASSERT((char *)(uintptr_t)strioc32.ic_dp ==
3096 striocp->ic_dp);
3097
3098 if (copyout(&strioc32, to, sizeof (strioc32)))
3099 return (EFAULT);
3100
3101 } else { /* NATIVE data model */
3102 if (copyout(from, to, sizeof (struct strioctl))) {
3103 return (EFAULT);
3104 } else {
3105 return (0);
3106 }
3107 }
3108 } else {
3109 ASSERT(copyflag & K_TO_K);
3110 bcopy(from, to, sizeof (struct strioctl));
3111 }
3112 return (0);
3113 }
3114
3115 #else /* ! _LP64 */
3116
3117 /* ARGSUSED2 */
3118 static int
3119 strcopyin_strioctl(void *from, void *to, int flag, int copyflag)
3120 {
3121 return (strcopyin(from, to, sizeof (struct strioctl), copyflag));
3122 }
3123
3124 /* ARGSUSED2 */
3125 static int
3126 strcopyout_strioctl(void *from, void *to, int flag, int copyflag)
3127 {
3128 return (strcopyout(from, to, sizeof (struct strioctl), copyflag));
3129 }
3130
3131 #endif /* _LP64 */
3132
3133 /*
3134 * Determine type of job control semantics expected by user. The
3135 * possibilities are:
3136 * JCREAD - Behaves like read() on fd; send SIGTTIN
3137 * JCWRITE - Behaves like write() on fd; send SIGTTOU if TOSTOP set
3138 * JCSETP - Sets a value in the stream; send SIGTTOU, ignore TOSTOP
3139 * JCGETP - Gets a value in the stream; no signals.
3140 * See straccess in strsubr.c for usage of these values.
3141 *
3142 * This routine also returns -1 for I_STR as a special case; the
3143 * caller must call again with the real ioctl number for
3144 * classification.
3145 */
3146 static int
3147 job_control_type(int cmd)
3148 {
3149 switch (cmd) {
3150 case I_STR:
3151 return (-1);
3152
3153 case I_RECVFD:
3154 case I_E_RECVFD:
3155 return (JCREAD);
3156
3157 case I_FDINSERT:
3158 case I_SENDFD:
3159 return (JCWRITE);
3160
3161 case TCSETA:
3162 case TCSETAW:
3163 case TCSETAF:
3164 case TCSBRK:
3165 case TCXONC:
3166 case TCFLSH:
3167 case TCDSET: /* Obsolete */
3168 case TIOCSWINSZ:
3169 case TCSETS:
3170 case TCSETSW:
3171 case TCSETSF:
3172 case TIOCSETD:
3173 case TIOCHPCL:
3174 case TIOCSETP:
3175 case TIOCSETN:
3176 case TIOCEXCL:
3177 case TIOCNXCL:
3178 case TIOCFLUSH:
3179 case TIOCSETC:
3180 case TIOCLBIS:
3181 case TIOCLBIC:
3182 case TIOCLSET:
3183 case TIOCSBRK:
3184 case TIOCCBRK:
3185 case TIOCSDTR:
3186 case TIOCCDTR:
3187 case TIOCSLTC:
3188 case TIOCSTOP:
3189 case TIOCSTART:
3190 case TIOCSTI:
3191 case TIOCSPGRP:
3192 case TIOCMSET:
3193 case TIOCMBIS:
3194 case TIOCMBIC:
3195 case TIOCREMOTE:
3196 case TIOCSIGNAL:
3197 case LDSETT:
3198 case LDSMAP: /* Obsolete */
3199 case DIOCSETP:
3200 case I_FLUSH:
3201 case I_SRDOPT:
3202 case I_SETSIG:
3203 case I_SWROPT:
3204 case I_FLUSHBAND:
3205 case I_SETCLTIME:
3206 case I_SERROPT:
3207 case I_ESETSIG:
3208 case FIONBIO:
3209 case FIOASYNC:
3210 case FIOSETOWN:
3211 case JBOOT: /* Obsolete */
3212 case JTERM: /* Obsolete */
3213 case JTIMOM: /* Obsolete */
3214 case JZOMBOOT: /* Obsolete */
3215 case JAGENT: /* Obsolete */
3216 case JTRUN: /* Obsolete */
3217 case JXTPROTO: /* Obsolete */
3218 case TIOCSETLD:
3219 return (JCSETP);
3220 }
3221
3222 return (JCGETP);
3223 }
3224
3225 /*
3226 * ioctl for streams
3227 */
3228 int
3229 strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag,
3230 cred_t *crp, int *rvalp)
3231 {
3232 struct stdata *stp;
3233 struct strcmd *scp;
3234 struct strioctl strioc;
3235 struct uio uio;
3236 struct iovec iov;
3237 int access;
3238 mblk_t *mp;
3239 int error = 0;
3240 int done = 0;
3241 ssize_t rmin, rmax;
3242 queue_t *wrq;
3243 queue_t *rdq;
3244 boolean_t kioctl = B_FALSE;
3245 uint32_t auditing = AU_AUDITING();
3246
3247 if (flag & FKIOCTL) {
3248 copyflag = K_TO_K;
3249 kioctl = B_TRUE;
3250 }
3251 ASSERT(vp->v_stream);
3252 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K);
3253 stp = vp->v_stream;
3254
3255 TRACE_3(TR_FAC_STREAMS_FR, TR_IOCTL_ENTER,
3256 "strioctl:stp %p cmd %X arg %lX", stp, cmd, arg);
3257
3258 /*
3259 * If the copy is kernel to kernel, make sure that the FNATIVE
3260 * flag is set. After this it would be a serious error to have
3261 * no model flag.
3262 */
3263 if (copyflag == K_TO_K)
3264 flag = (flag & ~FMODELS) | FNATIVE;
3265
3266 ASSERT((flag & FMODELS) != 0);
3267
3268 wrq = stp->sd_wrq;
3269 rdq = _RD(wrq);
3270
3271 access = job_control_type(cmd);
3272
3273 /* We should never see these here, should be handled by iwscn */
3274 if (cmd == SRIOCSREDIR || cmd == SRIOCISREDIR)
3275 return (EINVAL);
3276
3277 mutex_enter(&stp->sd_lock);
3278 if ((access != -1) && ((error = i_straccess(stp, access)) != 0)) {
3279 mutex_exit(&stp->sd_lock);
3280 return (error);
3281 }
3282 mutex_exit(&stp->sd_lock);
3283
3284 /*
3285 * Check for sgttyb-related ioctls first, and complain as
3286 * necessary.
3287 */
3288 switch (cmd) {
3289 case TIOCGETP:
3290 case TIOCSETP:
3291 case TIOCSETN:
3292 if (sgttyb_handling >= 2 && !sgttyb_complaint) {
3293 sgttyb_complaint = B_TRUE;
3294 cmn_err(CE_NOTE,
3295 "application used obsolete TIOC[GS]ET");
3296 }
3297 if (sgttyb_handling >= 3) {
3298 tsignal(curthread, SIGSYS);
3299 return (EIO);
3300 }
3301 break;
3302 }
3303
3304 mutex_enter(&stp->sd_lock);
3305
3306 switch (cmd) {
3307 case I_RECVFD:
3308 case I_E_RECVFD:
3309 case I_PEEK:
3310 case I_NREAD:
3311 case FIONREAD:
3312 case FIORDCHK:
3313 case I_ATMARK:
3314 case FIONBIO:
3315 case FIOASYNC:
3316 if (stp->sd_flag & (STRDERR|STPLEX)) {
3317 error = strgeterr(stp, STRDERR|STPLEX, 0);
3318 if (error != 0) {
3319 mutex_exit(&stp->sd_lock);
3320 return (error);
3321 }
3322 }
3323 break;
3324
3325 default:
3326 if (stp->sd_flag & (STRDERR|STWRERR|STPLEX)) {
3327 error = strgeterr(stp, STRDERR|STWRERR|STPLEX, 0);
3328 if (error != 0) {
3329 mutex_exit(&stp->sd_lock);
3330 return (error);
3331 }
3332 }
3333 }
3334
3335 mutex_exit(&stp->sd_lock);
3336
3337 switch (cmd) {
3338 default:
3339 /*
3340 * The stream head has hardcoded knowledge of a
3341 * miscellaneous collection of terminal-, keyboard- and
3342 * mouse-related ioctls, enumerated below. This hardcoded
3343 * knowledge allows the stream head to automatically
3344 * convert transparent ioctl requests made by userland
3345 * programs into I_STR ioctls which many old STREAMS
3346 * modules and drivers require.
3347 *
3348 * No new ioctls should ever be added to this list.
3349 * Instead, the STREAMS module or driver should be written
3350 * to either handle transparent ioctls or require any
3351 * userland programs to use I_STR ioctls (by returning
3352 * EINVAL to any transparent ioctl requests).
3353 *
3354 * More importantly, removing ioctls from this list should
3355 * be done with the utmost care, since our STREAMS modules
3356 * and drivers *count* on the stream head performing this
3357 * conversion, and thus may panic while processing
3358 * transparent ioctl request for one of these ioctls (keep
3359 * in mind that third party modules and drivers may have
3360 * similar problems).
3361 */
3362 if (((cmd & IOCTYPE) == LDIOC) ||
3363 ((cmd & IOCTYPE) == tIOC) ||
3364 ((cmd & IOCTYPE) == TIOC) ||
3365 ((cmd & IOCTYPE) == KIOC) ||
3366 ((cmd & IOCTYPE) == MSIOC) ||
3367 ((cmd & IOCTYPE) == VUIOC)) {
3368 /*
3369 * The ioctl is a tty ioctl - set up strioc buffer
3370 * and call strdoioctl() to do the work.
3371 */
3372 if (stp->sd_flag & STRHUP)
3373 return (ENXIO);
3374 strioc.ic_cmd = cmd;
3375 strioc.ic_timout = INFTIM;
3376
3377 switch (cmd) {
3378
3379 case TCXONC:
3380 case TCSBRK:
3381 case TCFLSH:
3382 case TCDSET:
3383 {
3384 int native_arg = (int)arg;
3385 strioc.ic_len = sizeof (int);
3386 strioc.ic_dp = (char *)&native_arg;
3387 return (strdoioctl(stp, &strioc, flag,
3388 K_TO_K, crp, rvalp));
3389 }
3390
3391 case TCSETA:
3392 case TCSETAW:
3393 case TCSETAF:
3394 strioc.ic_len = sizeof (struct termio);
3395 strioc.ic_dp = (char *)arg;
3396 return (strdoioctl(stp, &strioc, flag,
3397 copyflag, crp, rvalp));
3398
3399 case TCSETS:
3400 case TCSETSW:
3401 case TCSETSF:
3402 strioc.ic_len = sizeof (struct termios);
3403 strioc.ic_dp = (char *)arg;
3404 return (strdoioctl(stp, &strioc, flag,
3405 copyflag, crp, rvalp));
3406
3407 case LDSETT:
3408 strioc.ic_len = sizeof (struct termcb);
3409 strioc.ic_dp = (char *)arg;
3410 return (strdoioctl(stp, &strioc, flag,
3411 copyflag, crp, rvalp));
3412
3413 case TIOCSETP:
3414 strioc.ic_len = sizeof (struct sgttyb);
3415 strioc.ic_dp = (char *)arg;
3416 return (strdoioctl(stp, &strioc, flag,
3417 copyflag, crp, rvalp));
3418
3419 case TIOCSTI:
3420 if ((flag & FREAD) == 0 &&
3421 secpolicy_sti(crp) != 0) {
3422 return (EPERM);
3423 }
3424 mutex_enter(&stp->sd_lock);
3425 mutex_enter(&curproc->p_splock);
3426 if (stp->sd_sidp != curproc->p_sessp->s_sidp &&
3427 secpolicy_sti(crp) != 0) {
3428 mutex_exit(&curproc->p_splock);
3429 mutex_exit(&stp->sd_lock);
3430 return (EACCES);
3431 }
3432 mutex_exit(&curproc->p_splock);
3433 mutex_exit(&stp->sd_lock);
3434
3435 strioc.ic_len = sizeof (char);
3436 strioc.ic_dp = (char *)arg;
3437 return (strdoioctl(stp, &strioc, flag,
3438 copyflag, crp, rvalp));
3439
3440 case TIOCSWINSZ:
3441 strioc.ic_len = sizeof (struct winsize);
3442 strioc.ic_dp = (char *)arg;
3443 return (strdoioctl(stp, &strioc, flag,
3444 copyflag, crp, rvalp));
3445
3446 case TIOCSSIZE:
3447 strioc.ic_len = sizeof (struct ttysize);
3448 strioc.ic_dp = (char *)arg;
3449 return (strdoioctl(stp, &strioc, flag,
3450 copyflag, crp, rvalp));
3451
3452 case TIOCSSOFTCAR:
3453 case KIOCTRANS:
3454 case KIOCTRANSABLE:
3455 case KIOCCMD:
3456 case KIOCSDIRECT:
3457 case KIOCSCOMPAT:
3458 case KIOCSKABORTEN:
3459 case KIOCSRPTDELAY:
3460 case KIOCSRPTRATE:
3461 case VUIDSFORMAT:
3462 case TIOCSPPS:
3463 strioc.ic_len = sizeof (int);
3464 strioc.ic_dp = (char *)arg;
3465 return (strdoioctl(stp, &strioc, flag,
3466 copyflag, crp, rvalp));
3467
3468 case KIOCSETKEY:
3469 case KIOCGETKEY:
3470 strioc.ic_len = sizeof (struct kiockey);
3471 strioc.ic_dp = (char *)arg;
3472 return (strdoioctl(stp, &strioc, flag,
3473 copyflag, crp, rvalp));
3474
3475 case KIOCSKEY:
3476 case KIOCGKEY:
3477 strioc.ic_len = sizeof (struct kiockeymap);
3478 strioc.ic_dp = (char *)arg;
3479 return (strdoioctl(stp, &strioc, flag,
3480 copyflag, crp, rvalp));
3481
3482 case KIOCSLED:
3483 /* arg is a pointer to char */
3484 strioc.ic_len = sizeof (char);
3485 strioc.ic_dp = (char *)arg;
3486 return (strdoioctl(stp, &strioc, flag,
3487 copyflag, crp, rvalp));
3488
3489 case MSIOSETPARMS:
3490 strioc.ic_len = sizeof (Ms_parms);
3491 strioc.ic_dp = (char *)arg;
3492 return (strdoioctl(stp, &strioc, flag,
3493 copyflag, crp, rvalp));
3494
3495 case VUIDSADDR:
3496 case VUIDGADDR:
3497 strioc.ic_len = sizeof (struct vuid_addr_probe);
3498 strioc.ic_dp = (char *)arg;
3499 return (strdoioctl(stp, &strioc, flag,
3500 copyflag, crp, rvalp));
3501
3502 /*
3503 * These M_IOCTL's don't require any data to be sent
3504 * downstream, and the driver will allocate and link
3505 * on its own mblk_t upon M_IOCACK -- thus we set
3506 * ic_len to zero and set ic_dp to arg so we know
3507 * where to copyout to later.
3508 */
3509 case TIOCGSOFTCAR:
3510 case TIOCGWINSZ:
3511 case TIOCGSIZE:
3512 case KIOCGTRANS:
3513 case KIOCGTRANSABLE:
3514 case KIOCTYPE:
3515 case KIOCGDIRECT:
3516 case KIOCGCOMPAT:
3517 case KIOCLAYOUT:
3518 case KIOCGLED:
3519 case MSIOGETPARMS:
3520 case MSIOBUTTONS:
3521 case VUIDGFORMAT:
3522 case TIOCGPPS:
3523 case TIOCGPPSEV:
3524 case TCGETA:
3525 case TCGETS:
3526 case LDGETT:
3527 case TIOCGETP:
3528 case KIOCGRPTDELAY:
3529 case KIOCGRPTRATE:
3530 strioc.ic_len = 0;
3531 strioc.ic_dp = (char *)arg;
3532 return (strdoioctl(stp, &strioc, flag,
3533 copyflag, crp, rvalp));
3534 }
3535 }
3536
3537 /*
3538 * Unknown cmd - send it down as a transparent ioctl.
3539 */
3540 strioc.ic_cmd = cmd;
3541 strioc.ic_timout = INFTIM;
3542 strioc.ic_len = TRANSPARENT;
3543 strioc.ic_dp = (char *)&arg;
3544
3545 return (strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp));
3546
3547 case I_STR:
3548 /*
3549 * Stream ioctl. Read in an strioctl buffer from the user
3550 * along with any data specified and send it downstream.
3551 * Strdoioctl will wait allow only one ioctl message at
3552 * a time, and waits for the acknowledgement.
3553 */
3554
3555 if (stp->sd_flag & STRHUP)
3556 return (ENXIO);
3557
3558 error = strcopyin_strioctl((void *)arg, &strioc, flag,
3559 copyflag);
3560 if (error != 0)
3561 return (error);
3562
3563 if ((strioc.ic_len < 0) || (strioc.ic_timout < -1))
3564 return (EINVAL);
3565
3566 access = job_control_type(strioc.ic_cmd);
3567 mutex_enter(&stp->sd_lock);
3568 if ((access != -1) &&
3569 ((error = i_straccess(stp, access)) != 0)) {
3570 mutex_exit(&stp->sd_lock);
3571 return (error);
3572 }
3573 mutex_exit(&stp->sd_lock);
3574
3575 /*
3576 * The I_STR facility provides a trap door for malicious
3577 * code to send down bogus streamio(7I) ioctl commands to
3578 * unsuspecting STREAMS modules and drivers which expect to
3579 * only get these messages from the stream head.
3580 * Explicitly prohibit any streamio ioctls which can be
3581 * passed downstream by the stream head. Note that we do
3582 * not block all streamio ioctls because the ioctl
3583 * numberspace is not well managed and thus it's possible
3584 * that a module or driver's ioctl numbers may accidentally
3585 * collide with them.
3586 */
3587 switch (strioc.ic_cmd) {
3588 case I_LINK:
3589 case I_PLINK:
3590 case I_UNLINK:
3591 case I_PUNLINK:
3592 case _I_GETPEERCRED:
3593 case _I_PLINK_LH:
3594 return (EINVAL);
3595 }
3596
3597 error = strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp);
3598 if (error == 0) {
3599 error = strcopyout_strioctl(&strioc, (void *)arg,
3600 flag, copyflag);
3601 }
3602 return (error);
3603
3604 case _I_CMD:
3605 /*
3606 * Like I_STR, but without using M_IOC* messages and without
3607 * copyins/copyouts beyond the passed-in argument.
3608 */
3609 if (stp->sd_flag & STRHUP)
3610 return (ENXIO);
3611
3612 if ((scp = kmem_alloc(sizeof (strcmd_t), KM_NOSLEEP)) == NULL)
3613 return (ENOMEM);
3614
3615 if (copyin((void *)arg, scp, sizeof (strcmd_t))) {
3616 kmem_free(scp, sizeof (strcmd_t));
3617 return (EFAULT);
3618 }
3619
3620 access = job_control_type(scp->sc_cmd);
3621 mutex_enter(&stp->sd_lock);
3622 if (access != -1 && (error = i_straccess(stp, access)) != 0) {
3623 mutex_exit(&stp->sd_lock);
3624 kmem_free(scp, sizeof (strcmd_t));
3625 return (error);
3626 }
3627 mutex_exit(&stp->sd_lock);
3628
3629 *rvalp = 0;
3630 if ((error = strdocmd(stp, scp, crp)) == 0) {
3631 if (copyout(scp, (void *)arg, sizeof (strcmd_t)))
3632 error = EFAULT;
3633 }
3634 kmem_free(scp, sizeof (strcmd_t));
3635 return (error);
3636
3637 case I_NREAD:
3638 /*
3639 * Return number of bytes of data in first message
3640 * in queue in "arg" and return the number of messages
3641 * in queue in return value.
3642 */
3643 {
3644 size_t size;
3645 int retval;
3646 int count = 0;
3647
3648 mutex_enter(QLOCK(rdq));
3649
3650 size = msgdsize(rdq->q_first);
3651 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3652 count++;
3653
3654 mutex_exit(QLOCK(rdq));
3655 if (stp->sd_struiordq) {
3656 infod_t infod;
3657
3658 infod.d_cmd = INFOD_COUNT;
3659 infod.d_count = 0;
3660 if (count == 0) {
3661 infod.d_cmd |= INFOD_FIRSTBYTES;
3662 infod.d_bytes = 0;
3663 }
3664 infod.d_res = 0;
3665 (void) infonext(rdq, &infod);
3666 count += infod.d_count;
3667 if (infod.d_res & INFOD_FIRSTBYTES)
3668 size = infod.d_bytes;
3669 }
3670
3671 /*
3672 * Drop down from size_t to the "int" required by the
3673 * interface. Cap at INT_MAX.
3674 */
3675 retval = MIN(size, INT_MAX);
3676 error = strcopyout(&retval, (void *)arg, sizeof (retval),
3677 copyflag);
3678 if (!error)
3679 *rvalp = count;
3680 return (error);
3681 }
3682
3683 case FIONREAD:
3684 /*
3685 * Return number of bytes of data in all data messages
3686 * in queue in "arg".
3687 */
3688 {
3689 size_t size = 0;
3690 int retval;
3691
3692 mutex_enter(QLOCK(rdq));
3693 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3694 size += msgdsize(mp);
3695 mutex_exit(QLOCK(rdq));
3696
3697 if (stp->sd_struiordq) {
3698 infod_t infod;
3699
3700 infod.d_cmd = INFOD_BYTES;
3701 infod.d_res = 0;
3702 infod.d_bytes = 0;
3703 (void) infonext(rdq, &infod);
3704 size += infod.d_bytes;
3705 }
3706
3707 /*
3708 * Drop down from size_t to the "int" required by the
3709 * interface. Cap at INT_MAX.
3710 */
3711 retval = MIN(size, INT_MAX);
3712 error = strcopyout(&retval, (void *)arg, sizeof (retval),
3713 copyflag);
3714
3715 *rvalp = 0;
3716 return (error);
3717 }
3718 case FIORDCHK:
3719 /*
3720 * FIORDCHK does not use arg value (like FIONREAD),
3721 * instead a count is returned. I_NREAD value may
3722 * not be accurate but safe. The real thing to do is
3723 * to add the msgdsizes of all data messages until
3724 * a non-data message.
3725 */
3726 {
3727 size_t size = 0;
3728
3729 mutex_enter(QLOCK(rdq));
3730 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3731 size += msgdsize(mp);
3732 mutex_exit(QLOCK(rdq));
3733
3734 if (stp->sd_struiordq) {
3735 infod_t infod;
3736
3737 infod.d_cmd = INFOD_BYTES;
3738 infod.d_res = 0;
3739 infod.d_bytes = 0;
3740 (void) infonext(rdq, &infod);
3741 size += infod.d_bytes;
3742 }
3743
3744 /*
3745 * Since ioctl returns an int, and memory sizes under
3746 * LP64 may not fit, we return INT_MAX if the count was
3747 * actually greater.
3748 */
3749 *rvalp = MIN(size, INT_MAX);
3750 return (0);
3751 }
3752
3753 case I_FIND:
3754 /*
3755 * Get module name.
3756 */
3757 {
3758 char mname[FMNAMESZ + 1];
3759 queue_t *q;
3760
3761 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg,
3762 mname, FMNAMESZ + 1, NULL);
3763 if (error)
3764 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
3765
3766 /*
3767 * Return EINVAL if we're handed a bogus module name.
3768 */
3769 if (fmodsw_find(mname, FMODSW_LOAD) == NULL) {
3770 TRACE_0(TR_FAC_STREAMS_FR,
3771 TR_I_CANT_FIND, "couldn't I_FIND");
3772 return (EINVAL);
3773 }
3774
3775 *rvalp = 0;
3776
3777 /* Look downstream to see if module is there. */
3778 claimstr(stp->sd_wrq);
3779 for (q = stp->sd_wrq->q_next; q; q = q->q_next) {
3780 if (q->q_flag & QREADR) {
3781 q = NULL;
3782 break;
3783 }
3784 if (strcmp(mname, Q2NAME(q)) == 0)
3785 break;
3786 }
3787 releasestr(stp->sd_wrq);
3788
3789 *rvalp = (q ? 1 : 0);
3790 return (error);
3791 }
3792
3793 case I_PUSH:
3794 case __I_PUSH_NOCTTY:
3795 /*
3796 * Push a module.
3797 * For the case __I_PUSH_NOCTTY push a module but
3798 * do not allocate controlling tty. See bugid 4025044
3799 */
3800
3801 {
3802 char mname[FMNAMESZ + 1];
3803 fmodsw_impl_t *fp;
3804 dev_t dummydev;
3805
3806 if (stp->sd_flag & STRHUP)
3807 return (ENXIO);
3808
3809 /*
3810 * Get module name and look up in fmodsw.
3811 */
3812 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg,
3813 mname, FMNAMESZ + 1, NULL);
3814 if (error)
3815 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
3816
3817 if ((fp = fmodsw_find(mname, FMODSW_HOLD | FMODSW_LOAD)) ==
3818 NULL)
3819 return (EINVAL);
3820
3821 TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH,
3822 "I_PUSH:fp %p stp %p", fp, stp);
3823
3824 if (error = strstartplumb(stp, flag, cmd)) {
3825 fmodsw_rele(fp);
3826 return (error);
3827 }
3828
3829 /*
3830 * See if any more modules can be pushed on this stream.
3831 * Note that this check must be done after strstartplumb()
3832 * since otherwise multiple threads issuing I_PUSHes on
3833 * the same stream will be able to exceed nstrpush.
3834 */
3835 mutex_enter(&stp->sd_lock);
3836 if (stp->sd_pushcnt >= nstrpush) {
3837 fmodsw_rele(fp);
3838 strendplumb(stp);
3839 mutex_exit(&stp->sd_lock);
3840 return (EINVAL);
3841 }
3842 mutex_exit(&stp->sd_lock);
3843
3844 /*
3845 * Push new module and call its open routine
3846 * via qattach(). Modules don't change device
3847 * numbers, so just ignore dummydev here.
3848 */
3849 dummydev = vp->v_rdev;
3850 if ((error = qattach(rdq, &dummydev, 0, crp, fp,
3851 B_FALSE)) == 0) {
3852 if (vp->v_type == VCHR && /* sorry, no pipes allowed */
3853 (cmd == I_PUSH) && (stp->sd_flag & STRISTTY)) {
3854 /*
3855 * try to allocate it as a controlling terminal
3856 */
3857 (void) strctty(stp);
3858 }
3859 }
3860
3861 mutex_enter(&stp->sd_lock);
3862
3863 /*
3864 * As a performance concern we are caching the values of
3865 * q_minpsz and q_maxpsz of the module below the stream
3866 * head in the stream head.
3867 */
3868 mutex_enter(QLOCK(stp->sd_wrq->q_next));
3869 rmin = stp->sd_wrq->q_next->q_minpsz;
3870 rmax = stp->sd_wrq->q_next->q_maxpsz;
3871 mutex_exit(QLOCK(stp->sd_wrq->q_next));
3872
3873 /* Do this processing here as a performance concern */
3874 if (strmsgsz != 0) {
3875 if (rmax == INFPSZ)
3876 rmax = strmsgsz;
3877 else {
3878 if (vp->v_type == VFIFO)
3879 rmax = MIN(PIPE_BUF, rmax);
3880 else rmax = MIN(strmsgsz, rmax);
3881 }
3882 }
3883
3884 mutex_enter(QLOCK(wrq));
3885 stp->sd_qn_minpsz = rmin;
3886 stp->sd_qn_maxpsz = rmax;
3887 mutex_exit(QLOCK(wrq));
3888
3889 strendplumb(stp);
3890 mutex_exit(&stp->sd_lock);
3891 return (error);
3892 }
3893
3894 case I_POP:
3895 {
3896 queue_t *q;
3897
3898 if (stp->sd_flag & STRHUP)
3899 return (ENXIO);
3900 if (!wrq->q_next) /* for broken pipes */
3901 return (EINVAL);
3902
3903 if (error = strstartplumb(stp, flag, cmd))
3904 return (error);
3905
3906 /*
3907 * If there is an anchor on this stream and popping
3908 * the current module would attempt to pop through the
3909 * anchor, then disallow the pop unless we have sufficient
3910 * privileges; take the cheapest (non-locking) check
3911 * first.
3912 */
3913 if (secpolicy_ip_config(crp, B_TRUE) != 0 ||
3914 (stp->sd_anchorzone != crgetzoneid(crp))) {
3915 mutex_enter(&stp->sd_lock);
3916 /*
3917 * Anchors only apply if there's at least one
3918 * module on the stream (sd_pushcnt > 0).
3919 */
3920 if (stp->sd_pushcnt > 0 &&
3921 stp->sd_pushcnt == stp->sd_anchor &&
3922 stp->sd_vnode->v_type != VFIFO) {
3923 strendplumb(stp);
3924 mutex_exit(&stp->sd_lock);
3925 if (stp->sd_anchorzone != crgetzoneid(crp))
3926 return (EINVAL);
3927 /* Audit and report error */
3928 return (secpolicy_ip_config(crp, B_FALSE));
3929 }
3930 mutex_exit(&stp->sd_lock);
3931 }
3932
3933 q = wrq->q_next;
3934 TRACE_2(TR_FAC_STREAMS_FR, TR_I_POP,
3935 "I_POP:%p from %p", q, stp);
3936 if (q->q_next == NULL || (q->q_flag & (QREADR|QISDRV))) {
3937 error = EINVAL;
3938 } else {
3939 qdetach(_RD(q), 1, flag, crp, B_FALSE);
3940 error = 0;
3941 }
3942 mutex_enter(&stp->sd_lock);
3943
3944 /*
3945 * As a performance concern we are caching the values of
3946 * q_minpsz and q_maxpsz of the module below the stream
3947 * head in the stream head.
3948 */
3949 mutex_enter(QLOCK(wrq->q_next));
3950 rmin = wrq->q_next->q_minpsz;
3951 rmax = wrq->q_next->q_maxpsz;
3952 mutex_exit(QLOCK(wrq->q_next));
3953
3954 /* Do this processing here as a performance concern */
3955 if (strmsgsz != 0) {
3956 if (rmax == INFPSZ)
3957 rmax = strmsgsz;
3958 else {
3959 if (vp->v_type == VFIFO)
3960 rmax = MIN(PIPE_BUF, rmax);
3961 else rmax = MIN(strmsgsz, rmax);
3962 }
3963 }
3964
3965 mutex_enter(QLOCK(wrq));
3966 stp->sd_qn_minpsz = rmin;
3967 stp->sd_qn_maxpsz = rmax;
3968 mutex_exit(QLOCK(wrq));
3969
3970 /* If we popped through the anchor, then reset the anchor. */
3971 if (stp->sd_pushcnt < stp->sd_anchor) {
3972 stp->sd_anchor = 0;
3973 stp->sd_anchorzone = 0;
3974 }
3975 strendplumb(stp);
3976 mutex_exit(&stp->sd_lock);
3977 return (error);
3978 }
3979
3980 case _I_MUXID2FD:
3981 {
3982 /*
3983 * Create a fd for a I_PLINK'ed lower stream with a given
3984 * muxid. With the fd, application can send down ioctls,
3985 * like I_LIST, to the previously I_PLINK'ed stream. Note
3986 * that after getting the fd, the application has to do an
3987 * I_PUNLINK on the muxid before it can do any operation
3988 * on the lower stream. This is required by spec1170.
3989 *
3990 * The fd used to do this ioctl should point to the same
3991 * controlling device used to do the I_PLINK. If it uses
3992 * a different stream or an invalid muxid, I_MUXID2FD will
3993 * fail. The error code is set to EINVAL.
3994 *
3995 * The intended use of this interface is the following.
3996 * An application I_PLINK'ed a stream and exits. The fd
3997 * to the lower stream is gone. Another application
3998 * wants to get a fd to the lower stream, it uses I_MUXID2FD.
3999 */
4000 int muxid = (int)arg;
4001 int fd;
4002 linkinfo_t *linkp;
4003 struct file *fp;
4004 netstack_t *ns;
4005 str_stack_t *ss;
4006
4007 /*
4008 * Do not allow the wildcard muxid. This ioctl is not
4009 * intended to find arbitrary link.
4010 */
4011 if (muxid == 0) {
4012 return (EINVAL);
4013 }
4014
4015 ns = netstack_find_by_cred(crp);
4016 ASSERT(ns != NULL);
4017 ss = ns->netstack_str;
4018 ASSERT(ss != NULL);
4019
4020 mutex_enter(&muxifier);
4021 linkp = findlinks(vp->v_stream, muxid, LINKPERSIST, ss);
4022 if (linkp == NULL) {
4023 mutex_exit(&muxifier);
4024 netstack_rele(ss->ss_netstack);
4025 return (EINVAL);
4026 }
4027
4028 if ((fd = ufalloc(0)) == -1) {
4029 mutex_exit(&muxifier);
4030 netstack_rele(ss->ss_netstack);
4031 return (EMFILE);
4032 }
4033 fp = linkp->li_fpdown;
4034 mutex_enter(&fp->f_tlock);
4035 fp->f_count++;
4036 mutex_exit(&fp->f_tlock);
4037 mutex_exit(&muxifier);
4038 setf(fd, fp);
4039 *rvalp = fd;
4040 netstack_rele(ss->ss_netstack);
4041 return (0);
4042 }
4043
4044 case _I_INSERT:
4045 {
4046 /*
4047 * To insert a module to a given position in a stream.
4048 * In the first release, only allow privileged user
4049 * to use this ioctl. Furthermore, the insert is only allowed
4050 * below an anchor if the zoneid is the same as the zoneid
4051 * which created the anchor.
4052 *
4053 * Note that we do not plan to support this ioctl
4054 * on pipes in the first release. We want to learn more
4055 * about the implications of these ioctls before extending
4056 * their support. And we do not think these features are
4057 * valuable for pipes.
4058 */
4059 STRUCT_DECL(strmodconf, strmodinsert);
4060 char mod_name[FMNAMESZ + 1];
4061 fmodsw_impl_t *fp;
4062 dev_t dummydev;
4063 queue_t *tmp_wrq;
4064 int pos;
4065 boolean_t is_insert;
4066
4067 STRUCT_INIT(strmodinsert, flag);
4068 if (stp->sd_flag & STRHUP)
4069 return (ENXIO);
4070 if (STRMATED(stp))
4071 return (EINVAL);
4072 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0)
4073 return (error);
4074 if (stp->sd_anchor != 0 &&
4075 stp->sd_anchorzone != crgetzoneid(crp))
4076 return (EINVAL);
4077
4078 error = strcopyin((void *)arg, STRUCT_BUF(strmodinsert),
4079 STRUCT_SIZE(strmodinsert), copyflag);
4080 if (error)
4081 return (error);
4082
4083 /*
4084 * Get module name and look up in fmodsw.
4085 */
4086 error = (copyflag & U_TO_K ? copyinstr :
4087 copystr)(STRUCT_FGETP(strmodinsert, mod_name),
4088 mod_name, FMNAMESZ + 1, NULL);
4089 if (error)
4090 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
4091
4092 if ((fp = fmodsw_find(mod_name, FMODSW_HOLD | FMODSW_LOAD)) ==
4093 NULL)
4094 return (EINVAL);
4095
4096 if (error = strstartplumb(stp, flag, cmd)) {
4097 fmodsw_rele(fp);
4098 return (error);
4099 }
4100
4101 /*
4102 * Is this _I_INSERT just like an I_PUSH? We need to know
4103 * this because we do some optimizations if this is a
4104 * module being pushed.
4105 */
4106 pos = STRUCT_FGET(strmodinsert, pos);
4107 is_insert = (pos != 0);
4108
4109 /*
4110 * Make sure pos is valid. Even though it is not an I_PUSH,
4111 * we impose the same limit on the number of modules in a
4112 * stream.
4113 */
4114 mutex_enter(&stp->sd_lock);
4115 if (stp->sd_pushcnt >= nstrpush || pos < 0 ||
4116 pos > stp->sd_pushcnt) {
4117 fmodsw_rele(fp);
4118 strendplumb(stp);
4119 mutex_exit(&stp->sd_lock);
4120 return (EINVAL);
4121 }
4122 if (stp->sd_anchor != 0) {
4123 /*
4124 * Is this insert below the anchor?
4125 * Pushcnt hasn't been increased yet hence
4126 * we test for greater than here, and greater or
4127 * equal after qattach.
4128 */
4129 if (pos > (stp->sd_pushcnt - stp->sd_anchor) &&
4130 stp->sd_anchorzone != crgetzoneid(crp)) {
4131 fmodsw_rele(fp);
4132 strendplumb(stp);
4133 mutex_exit(&stp->sd_lock);
4134 return (EPERM);
4135 }
4136 }
4137
4138 mutex_exit(&stp->sd_lock);
4139
4140 /*
4141 * First find the correct position this module to
4142 * be inserted. We don't need to call claimstr()
4143 * as the stream should not be changing at this point.
4144 *
4145 * Insert new module and call its open routine
4146 * via qattach(). Modules don't change device
4147 * numbers, so just ignore dummydev here.
4148 */
4149 for (tmp_wrq = stp->sd_wrq; pos > 0;
4150 tmp_wrq = tmp_wrq->q_next, pos--) {
4151 ASSERT(SAMESTR(tmp_wrq));
4152 }
4153 dummydev = vp->v_rdev;
4154 if ((error = qattach(_RD(tmp_wrq), &dummydev, 0, crp,
4155 fp, is_insert)) != 0) {
4156 mutex_enter(&stp->sd_lock);
4157 strendplumb(stp);
4158 mutex_exit(&stp->sd_lock);
4159 return (error);
4160 }
4161
4162 mutex_enter(&stp->sd_lock);
4163
4164 /*
4165 * As a performance concern we are caching the values of
4166 * q_minpsz and q_maxpsz of the module below the stream
4167 * head in the stream head.
4168 */
4169 if (!is_insert) {
4170 mutex_enter(QLOCK(stp->sd_wrq->q_next));
4171 rmin = stp->sd_wrq->q_next->q_minpsz;
4172 rmax = stp->sd_wrq->q_next->q_maxpsz;
4173 mutex_exit(QLOCK(stp->sd_wrq->q_next));
4174
4175 /* Do this processing here as a performance concern */
4176 if (strmsgsz != 0) {
4177 if (rmax == INFPSZ) {
4178 rmax = strmsgsz;
4179 } else {
4180 rmax = MIN(strmsgsz, rmax);
4181 }
4182 }
4183
4184 mutex_enter(QLOCK(wrq));
4185 stp->sd_qn_minpsz = rmin;
4186 stp->sd_qn_maxpsz = rmax;
4187 mutex_exit(QLOCK(wrq));
4188 }
4189
4190 /*
4191 * Need to update the anchor value if this module is
4192 * inserted below the anchor point.
4193 */
4194 if (stp->sd_anchor != 0) {
4195 pos = STRUCT_FGET(strmodinsert, pos);
4196 if (pos >= (stp->sd_pushcnt - stp->sd_anchor))
4197 stp->sd_anchor++;
4198 }
4199
4200 strendplumb(stp);
4201 mutex_exit(&stp->sd_lock);
4202 return (0);
4203 }
4204
4205 case _I_REMOVE:
4206 {
4207 /*
4208 * To remove a module with a given name in a stream. The
4209 * caller of this ioctl needs to provide both the name and
4210 * the position of the module to be removed. This eliminates
4211 * the ambiguity of removal if a module is inserted/pushed
4212 * multiple times in a stream. In the first release, only
4213 * allow privileged user to use this ioctl.
4214 * Furthermore, the remove is only allowed
4215 * below an anchor if the zoneid is the same as the zoneid
4216 * which created the anchor.
4217 *
4218 * Note that we do not plan to support this ioctl
4219 * on pipes in the first release. We want to learn more
4220 * about the implications of these ioctls before extending
4221 * their support. And we do not think these features are
4222 * valuable for pipes.
4223 *
4224 * Also note that _I_REMOVE cannot be used to remove a
4225 * driver or the stream head.
4226 */
4227 STRUCT_DECL(strmodconf, strmodremove);
4228 queue_t *q;
4229 int pos;
4230 char mod_name[FMNAMESZ + 1];
4231 boolean_t is_remove;
4232
4233 STRUCT_INIT(strmodremove, flag);
4234 if (stp->sd_flag & STRHUP)
4235 return (ENXIO);
4236 if (STRMATED(stp))
4237 return (EINVAL);
4238 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0)
4239 return (error);
4240 if (stp->sd_anchor != 0 &&
4241 stp->sd_anchorzone != crgetzoneid(crp))
4242 return (EINVAL);
4243
4244 error = strcopyin((void *)arg, STRUCT_BUF(strmodremove),
4245 STRUCT_SIZE(strmodremove), copyflag);
4246 if (error)
4247 return (error);
4248
4249 error = (copyflag & U_TO_K ? copyinstr :
4250 copystr)(STRUCT_FGETP(strmodremove, mod_name),
4251 mod_name, FMNAMESZ + 1, NULL);
4252 if (error)
4253 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
4254
4255 if ((error = strstartplumb(stp, flag, cmd)) != 0)
4256 return (error);
4257
4258 /*
4259 * Match the name of given module to the name of module at
4260 * the given position.
4261 */
4262 pos = STRUCT_FGET(strmodremove, pos);
4263
4264 is_remove = (pos != 0);
4265 for (q = stp->sd_wrq->q_next; SAMESTR(q) && pos > 0;
4266 q = q->q_next, pos--)
4267 ;
4268 if (pos > 0 || !SAMESTR(q) ||
4269 strcmp(Q2NAME(q), mod_name) != 0) {
4270 mutex_enter(&stp->sd_lock);
4271 strendplumb(stp);
4272 mutex_exit(&stp->sd_lock);
4273 return (EINVAL);
4274 }
4275
4276 /*
4277 * If the position is at or below an anchor, then the zoneid
4278 * must match the zoneid that created the anchor.
4279 */
4280 if (stp->sd_anchor != 0) {
4281 pos = STRUCT_FGET(strmodremove, pos);
4282 if (pos >= (stp->sd_pushcnt - stp->sd_anchor) &&
4283 stp->sd_anchorzone != crgetzoneid(crp)) {
4284 mutex_enter(&stp->sd_lock);
4285 strendplumb(stp);
4286 mutex_exit(&stp->sd_lock);
4287 return (EPERM);
4288 }
4289 }
4290
4291
4292 ASSERT(!(q->q_flag & QREADR));
4293 qdetach(_RD(q), 1, flag, crp, is_remove);
4294
4295 mutex_enter(&stp->sd_lock);
4296
4297 /*
4298 * As a performance concern we are caching the values of
4299 * q_minpsz and q_maxpsz of the module below the stream
4300 * head in the stream head.
4301 */
4302 if (!is_remove) {
4303 mutex_enter(QLOCK(wrq->q_next));
4304 rmin = wrq->q_next->q_minpsz;
4305 rmax = wrq->q_next->q_maxpsz;
4306 mutex_exit(QLOCK(wrq->q_next));
4307
4308 /* Do this processing here as a performance concern */
4309 if (strmsgsz != 0) {
4310 if (rmax == INFPSZ)
4311 rmax = strmsgsz;
4312 else {
4313 if (vp->v_type == VFIFO)
4314 rmax = MIN(PIPE_BUF, rmax);
4315 else rmax = MIN(strmsgsz, rmax);
4316 }
4317 }
4318
4319 mutex_enter(QLOCK(wrq));
4320 stp->sd_qn_minpsz = rmin;
4321 stp->sd_qn_maxpsz = rmax;
4322 mutex_exit(QLOCK(wrq));
4323 }
4324
4325 /*
4326 * Need to update the anchor value if this module is removed
4327 * at or below the anchor point. If the removed module is at
4328 * the anchor point, remove the anchor for this stream if
4329 * there is no module above the anchor point. Otherwise, if
4330 * the removed module is below the anchor point, decrement the
4331 * anchor point by 1.
4332 */
4333 if (stp->sd_anchor != 0) {
4334 pos = STRUCT_FGET(strmodremove, pos);
4335 if (pos == stp->sd_pushcnt - stp->sd_anchor + 1)
4336 stp->sd_anchor = 0;
4337 else if (pos > (stp->sd_pushcnt - stp->sd_anchor + 1))
4338 stp->sd_anchor--;
4339 }
4340
4341 strendplumb(stp);
4342 mutex_exit(&stp->sd_lock);
4343 return (0);
4344 }
4345
4346 case I_ANCHOR:
4347 /*
4348 * Set the anchor position on the stream to reside at
4349 * the top module (in other words, the top module
4350 * cannot be popped). Anchors with a FIFO make no
4351 * obvious sense, so they're not allowed.
4352 */
4353 mutex_enter(&stp->sd_lock);
4354
4355 if (stp->sd_vnode->v_type == VFIFO) {
4356 mutex_exit(&stp->sd_lock);
4357 return (EINVAL);
4358 }
4359 /* Only allow the same zoneid to update the anchor */
4360 if (stp->sd_anchor != 0 &&
4361 stp->sd_anchorzone != crgetzoneid(crp)) {
4362 mutex_exit(&stp->sd_lock);
4363 return (EINVAL);
4364 }
4365 stp->sd_anchor = stp->sd_pushcnt;
4366 stp->sd_anchorzone = crgetzoneid(crp);
4367 mutex_exit(&stp->sd_lock);
4368 return (0);
4369
4370 case I_LOOK:
4371 /*
4372 * Get name of first module downstream.
4373 * If no module, return an error.
4374 */
4375 claimstr(wrq);
4376 if (_SAMESTR(wrq) && wrq->q_next->q_next != NULL) {
4377 char *name = Q2NAME(wrq->q_next);
4378
4379 error = strcopyout(name, (void *)arg, strlen(name) + 1,
4380 copyflag);
4381 releasestr(wrq);
4382 return (error);
4383 }
4384 releasestr(wrq);
4385 return (EINVAL);
4386
4387 case I_LINK:
4388 case I_PLINK:
4389 /*
4390 * Link a multiplexor.
4391 */
4392 return (mlink(vp, cmd, (int)arg, crp, rvalp, 0));
4393
4394 case _I_PLINK_LH:
4395 /*
4396 * Link a multiplexor: Call must originate from kernel.
4397 */
4398 if (kioctl)
4399 return (ldi_mlink_lh(vp, cmd, arg, crp, rvalp));
4400
4401 return (EINVAL);
4402 case I_UNLINK:
4403 case I_PUNLINK:
4404 /*
4405 * Unlink a multiplexor.
4406 * If arg is -1, unlink all links for which this is the
4407 * controlling stream. Otherwise, arg is an index number
4408 * for a link to be removed.
4409 */
4410 {
4411 struct linkinfo *linkp;
4412 int native_arg = (int)arg;
4413 int type;
4414 netstack_t *ns;
4415 str_stack_t *ss;
4416
4417 TRACE_1(TR_FAC_STREAMS_FR,
4418 TR_I_UNLINK, "I_UNLINK/I_PUNLINK:%p", stp);
4419 if (vp->v_type == VFIFO) {
4420 return (EINVAL);
4421 }
4422 if (cmd == I_UNLINK)
4423 type = LINKNORMAL;
4424 else /* I_PUNLINK */
4425 type = LINKPERSIST;
4426 if (native_arg == 0) {
4427 return (EINVAL);
4428 }
4429 ns = netstack_find_by_cred(crp);
4430 ASSERT(ns != NULL);
4431 ss = ns->netstack_str;
4432 ASSERT(ss != NULL);
4433
4434 if (native_arg == MUXID_ALL)
4435 error = munlinkall(stp, type, crp, rvalp, ss);
4436 else {
4437 mutex_enter(&muxifier);
4438 if (!(linkp = findlinks(stp, (int)arg, type, ss))) {
4439 /* invalid user supplied index number */
4440 mutex_exit(&muxifier);
4441 netstack_rele(ss->ss_netstack);
4442 return (EINVAL);
4443 }
4444 /* munlink drops the muxifier lock */
4445 error = munlink(stp, linkp, type, crp, rvalp, ss);
4446 }
4447 netstack_rele(ss->ss_netstack);
4448 return (error);
4449 }
4450
4451 case I_FLUSH:
4452 /*
4453 * send a flush message downstream
4454 * flush message can indicate
4455 * FLUSHR - flush read queue
4456 * FLUSHW - flush write queue
4457 * FLUSHRW - flush read/write queue
4458 */
4459 if (stp->sd_flag & STRHUP)
4460 return (ENXIO);
4461 if (arg & ~FLUSHRW)
4462 return (EINVAL);
4463
4464 for (;;) {
4465 if (putnextctl1(stp->sd_wrq, M_FLUSH, (int)arg)) {
4466 break;
4467 }
4468 if (error = strwaitbuf(1, BPRI_HI)) {
4469 return (error);
4470 }
4471 }
4472
4473 /*
4474 * Send down an unsupported ioctl and wait for the nack
4475 * in order to allow the M_FLUSH to propagate back
4476 * up to the stream head.
4477 * Replaces if (qready()) runqueues();
4478 */
4479 strioc.ic_cmd = -1; /* The unsupported ioctl */
4480 strioc.ic_timout = 0;
4481 strioc.ic_len = 0;
4482 strioc.ic_dp = NULL;
4483 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp);
4484 *rvalp = 0;
4485 return (0);
4486
4487 case I_FLUSHBAND:
4488 {
4489 struct bandinfo binfo;
4490
4491 error = strcopyin((void *)arg, &binfo, sizeof (binfo),
4492 copyflag);
4493 if (error)
4494 return (error);
4495 if (stp->sd_flag & STRHUP)
4496 return (ENXIO);
4497 if (binfo.bi_flag & ~FLUSHRW)
4498 return (EINVAL);
4499 while (!(mp = allocb(2, BPRI_HI))) {
4500 if (error = strwaitbuf(2, BPRI_HI))
4501 return (error);
4502 }
4503 mp->b_datap->db_type = M_FLUSH;
4504 *mp->b_wptr++ = binfo.bi_flag | FLUSHBAND;
4505 *mp->b_wptr++ = binfo.bi_pri;
4506 putnext(stp->sd_wrq, mp);
4507 /*
4508 * Send down an unsupported ioctl and wait for the nack
4509 * in order to allow the M_FLUSH to propagate back
4510 * up to the stream head.
4511 * Replaces if (qready()) runqueues();
4512 */
4513 strioc.ic_cmd = -1; /* The unsupported ioctl */
4514 strioc.ic_timout = 0;
4515 strioc.ic_len = 0;
4516 strioc.ic_dp = NULL;
4517 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp);
4518 *rvalp = 0;
4519 return (0);
4520 }
4521
4522 case I_SRDOPT:
4523 /*
4524 * Set read options
4525 *
4526 * RNORM - default stream mode
4527 * RMSGN - message no discard
4528 * RMSGD - message discard
4529 * RPROTNORM - fail read with EBADMSG for M_[PC]PROTOs
4530 * RPROTDAT - convert M_[PC]PROTOs to M_DATAs
4531 * RPROTDIS - discard M_[PC]PROTOs and retain M_DATAs
4532 */
4533 if (arg & ~(RMODEMASK | RPROTMASK))
4534 return (EINVAL);
4535
4536 if ((arg & (RMSGD|RMSGN)) == (RMSGD|RMSGN))
4537 return (EINVAL);
4538
4539 mutex_enter(&stp->sd_lock);
4540 switch (arg & RMODEMASK) {
4541 case RNORM:
4542 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS);
4543 break;
4544 case RMSGD:
4545 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGNODIS) |
4546 RD_MSGDIS;
4547 break;
4548 case RMSGN:
4549 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGDIS) |
4550 RD_MSGNODIS;
4551 break;
4552 }
4553
4554 switch (arg & RPROTMASK) {
4555 case RPROTNORM:
4556 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS);
4557 break;
4558
4559 case RPROTDAT:
4560 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDIS) |
4561 RD_PROTDAT);
4562 break;
4563
4564 case RPROTDIS:
4565 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDAT) |
4566 RD_PROTDIS);
4567 break;
4568 }
4569 mutex_exit(&stp->sd_lock);
4570 return (0);
4571
4572 case I_GRDOPT:
4573 /*
4574 * Get read option and return the value
4575 * to spot pointed to by arg
4576 */
4577 {
4578 int rdopt;
4579
4580 rdopt = ((stp->sd_read_opt & RD_MSGDIS) ? RMSGD :
4581 ((stp->sd_read_opt & RD_MSGNODIS) ? RMSGN : RNORM));
4582 rdopt |= ((stp->sd_read_opt & RD_PROTDAT) ? RPROTDAT :
4583 ((stp->sd_read_opt & RD_PROTDIS) ? RPROTDIS : RPROTNORM));
4584
4585 return (strcopyout(&rdopt, (void *)arg, sizeof (int),
4586 copyflag));
4587 }
4588
4589 case I_SERROPT:
4590 /*
4591 * Set error options
4592 *
4593 * RERRNORM - persistent read errors
4594 * RERRNONPERSIST - non-persistent read errors
4595 * WERRNORM - persistent write errors
4596 * WERRNONPERSIST - non-persistent write errors
4597 */
4598 if (arg & ~(RERRMASK | WERRMASK))
4599 return (EINVAL);
4600
4601 mutex_enter(&stp->sd_lock);
4602 switch (arg & RERRMASK) {
4603 case RERRNORM:
4604 stp->sd_flag &= ~STRDERRNONPERSIST;
4605 break;
4606 case RERRNONPERSIST:
4607 stp->sd_flag |= STRDERRNONPERSIST;
4608 break;
4609 }
4610 switch (arg & WERRMASK) {
4611 case WERRNORM:
4612 stp->sd_flag &= ~STWRERRNONPERSIST;
4613 break;
4614 case WERRNONPERSIST:
4615 stp->sd_flag |= STWRERRNONPERSIST;
4616 break;
4617 }
4618 mutex_exit(&stp->sd_lock);
4619 return (0);
4620
4621 case I_GERROPT:
4622 /*
4623 * Get error option and return the value
4624 * to spot pointed to by arg
4625 */
4626 {
4627 int erropt = 0;
4628
4629 erropt |= (stp->sd_flag & STRDERRNONPERSIST) ? RERRNONPERSIST :
4630 RERRNORM;
4631 erropt |= (stp->sd_flag & STWRERRNONPERSIST) ? WERRNONPERSIST :
4632 WERRNORM;
4633 return (strcopyout(&erropt, (void *)arg, sizeof (int),
4634 copyflag));
4635 }
4636
4637 case I_SETSIG:
4638 /*
4639 * Register the calling proc to receive the SIGPOLL
4640 * signal based on the events given in arg. If
4641 * arg is zero, remove the proc from register list.
4642 */
4643 {
4644 strsig_t *ssp, *pssp;
4645 struct pid *pidp;
4646
4647 pssp = NULL;
4648 pidp = curproc->p_pidp;
4649 /*
4650 * Hold sd_lock to prevent traversal of sd_siglist while
4651 * it is modified.
4652 */
4653 mutex_enter(&stp->sd_lock);
4654 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pidp != pidp);
4655 pssp = ssp, ssp = ssp->ss_next)
4656 ;
4657
4658 if (arg) {
4659 if (arg & ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR|
4660 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) {
4661 mutex_exit(&stp->sd_lock);
4662 return (EINVAL);
4663 }
4664 if ((arg & S_BANDURG) && !(arg & S_RDBAND)) {
4665 mutex_exit(&stp->sd_lock);
4666 return (EINVAL);
4667 }
4668
4669 /*
4670 * If proc not already registered, add it
4671 * to list.
4672 */
4673 if (!ssp) {
4674 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP);
4675 ssp->ss_pidp = pidp;
4676 ssp->ss_pid = pidp->pid_id;
4677 ssp->ss_next = NULL;
4678 if (pssp)
4679 pssp->ss_next = ssp;
4680 else
4681 stp->sd_siglist = ssp;
4682 mutex_enter(&pidlock);
4683 PID_HOLD(pidp);
4684 mutex_exit(&pidlock);
4685 }
4686
4687 /*
4688 * Set events.
4689 */
4690 ssp->ss_events = (int)arg;
4691 } else {
4692 /*
4693 * Remove proc from register list.
4694 */
4695 if (ssp) {
4696 mutex_enter(&pidlock);
4697 PID_RELE(pidp);
4698 mutex_exit(&pidlock);
4699 if (pssp)
4700 pssp->ss_next = ssp->ss_next;
4701 else
4702 stp->sd_siglist = ssp->ss_next;
4703 kmem_free(ssp, sizeof (strsig_t));
4704 } else {
4705 mutex_exit(&stp->sd_lock);
4706 return (EINVAL);
4707 }
4708 }
4709
4710 /*
4711 * Recalculate OR of sig events.
4712 */
4713 stp->sd_sigflags = 0;
4714 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4715 stp->sd_sigflags |= ssp->ss_events;
4716 mutex_exit(&stp->sd_lock);
4717 return (0);
4718 }
4719
4720 case I_GETSIG:
4721 /*
4722 * Return (in arg) the current registration of events
4723 * for which the calling proc is to be signaled.
4724 */
4725 {
4726 struct strsig *ssp;
4727 struct pid *pidp;
4728
4729 pidp = curproc->p_pidp;
4730 mutex_enter(&stp->sd_lock);
4731 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4732 if (ssp->ss_pidp == pidp) {
4733 error = strcopyout(&ssp->ss_events, (void *)arg,
4734 sizeof (int), copyflag);
4735 mutex_exit(&stp->sd_lock);
4736 return (error);
4737 }
4738 mutex_exit(&stp->sd_lock);
4739 return (EINVAL);
4740 }
4741
4742 case I_ESETSIG:
4743 /*
4744 * Register the ss_pid to receive the SIGPOLL
4745 * signal based on the events is ss_events arg. If
4746 * ss_events is zero, remove the proc from register list.
4747 */
4748 {
4749 struct strsig *ssp, *pssp;
4750 struct proc *proc;
4751 struct pid *pidp;
4752 pid_t pid;
4753 struct strsigset ss;
4754
4755 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag);
4756 if (error)
4757 return (error);
4758
4759 pid = ss.ss_pid;
4760
4761 if (ss.ss_events != 0) {
4762 /*
4763 * Permissions check by sending signal 0.
4764 * Note that when kill fails it does a set_errno
4765 * causing the system call to fail.
4766 */
4767 error = kill(pid, 0);
4768 if (error) {
4769 return (error);
4770 }
4771 }
4772 mutex_enter(&pidlock);
4773 if (pid == 0)
4774 proc = curproc;
4775 else if (pid < 0)
4776 proc = pgfind(-pid);
4777 else
4778 proc = prfind(pid);
4779 if (proc == NULL) {
4780 mutex_exit(&pidlock);
4781 return (ESRCH);
4782 }
4783 if (pid < 0)
4784 pidp = proc->p_pgidp;
4785 else
4786 pidp = proc->p_pidp;
4787 ASSERT(pidp);
4788 /*
4789 * Get a hold on the pid structure while referencing it.
4790 * There is a separate PID_HOLD should it be inserted
4791 * in the list below.
4792 */
4793 PID_HOLD(pidp);
4794 mutex_exit(&pidlock);
4795
4796 pssp = NULL;
4797 /*
4798 * Hold sd_lock to prevent traversal of sd_siglist while
4799 * it is modified.
4800 */
4801 mutex_enter(&stp->sd_lock);
4802 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pid != pid);
4803 pssp = ssp, ssp = ssp->ss_next)
4804 ;
4805
4806 if (ss.ss_events) {
4807 if (ss.ss_events &
4808 ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR|
4809 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) {
4810 mutex_exit(&stp->sd_lock);
4811 mutex_enter(&pidlock);
4812 PID_RELE(pidp);
4813 mutex_exit(&pidlock);
4814 return (EINVAL);
4815 }
4816 if ((ss.ss_events & S_BANDURG) &&
4817 !(ss.ss_events & S_RDBAND)) {
4818 mutex_exit(&stp->sd_lock);
4819 mutex_enter(&pidlock);
4820 PID_RELE(pidp);
4821 mutex_exit(&pidlock);
4822 return (EINVAL);
4823 }
4824
4825 /*
4826 * If proc not already registered, add it
4827 * to list.
4828 */
4829 if (!ssp) {
4830 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP);
4831 ssp->ss_pidp = pidp;
4832 ssp->ss_pid = pid;
4833 ssp->ss_next = NULL;
4834 if (pssp)
4835 pssp->ss_next = ssp;
4836 else
4837 stp->sd_siglist = ssp;
4838 mutex_enter(&pidlock);
4839 PID_HOLD(pidp);
4840 mutex_exit(&pidlock);
4841 }
4842
4843 /*
4844 * Set events.
4845 */
4846 ssp->ss_events = ss.ss_events;
4847 } else {
4848 /*
4849 * Remove proc from register list.
4850 */
4851 if (ssp) {
4852 mutex_enter(&pidlock);
4853 PID_RELE(pidp);
4854 mutex_exit(&pidlock);
4855 if (pssp)
4856 pssp->ss_next = ssp->ss_next;
4857 else
4858 stp->sd_siglist = ssp->ss_next;
4859 kmem_free(ssp, sizeof (strsig_t));
4860 } else {
4861 mutex_exit(&stp->sd_lock);
4862 mutex_enter(&pidlock);
4863 PID_RELE(pidp);
4864 mutex_exit(&pidlock);
4865 return (EINVAL);
4866 }
4867 }
4868
4869 /*
4870 * Recalculate OR of sig events.
4871 */
4872 stp->sd_sigflags = 0;
4873 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4874 stp->sd_sigflags |= ssp->ss_events;
4875 mutex_exit(&stp->sd_lock);
4876 mutex_enter(&pidlock);
4877 PID_RELE(pidp);
4878 mutex_exit(&pidlock);
4879 return (0);
4880 }
4881
4882 case I_EGETSIG:
4883 /*
4884 * Return (in arg) the current registration of events
4885 * for which the calling proc is to be signaled.
4886 */
4887 {
4888 struct strsig *ssp;
4889 struct proc *proc;
4890 pid_t pid;
4891 struct pid *pidp;
4892 struct strsigset ss;
4893
4894 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag);
4895 if (error)
4896 return (error);
4897
4898 pid = ss.ss_pid;
4899 mutex_enter(&pidlock);
4900 if (pid == 0)
4901 proc = curproc;
4902 else if (pid < 0)
4903 proc = pgfind(-pid);
4904 else
4905 proc = prfind(pid);
4906 if (proc == NULL) {
4907 mutex_exit(&pidlock);
4908 return (ESRCH);
4909 }
4910 if (pid < 0)
4911 pidp = proc->p_pgidp;
4912 else
4913 pidp = proc->p_pidp;
4914
4915 /* Prevent the pidp from being reassigned */
4916 PID_HOLD(pidp);
4917 mutex_exit(&pidlock);
4918
4919 mutex_enter(&stp->sd_lock);
4920 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4921 if (ssp->ss_pid == pid) {
4922 ss.ss_pid = ssp->ss_pid;
4923 ss.ss_events = ssp->ss_events;
4924 error = strcopyout(&ss, (void *)arg,
4925 sizeof (struct strsigset), copyflag);
4926 mutex_exit(&stp->sd_lock);
4927 mutex_enter(&pidlock);
4928 PID_RELE(pidp);
4929 mutex_exit(&pidlock);
4930 return (error);
4931 }
4932 mutex_exit(&stp->sd_lock);
4933 mutex_enter(&pidlock);
4934 PID_RELE(pidp);
4935 mutex_exit(&pidlock);
4936 return (EINVAL);
4937 }
4938
4939 case I_PEEK:
4940 {
4941 STRUCT_DECL(strpeek, strpeek);
4942 size_t n;
4943 mblk_t *fmp, *tmp_mp = NULL;
4944
4945 STRUCT_INIT(strpeek, flag);
4946
4947 error = strcopyin((void *)arg, STRUCT_BUF(strpeek),
4948 STRUCT_SIZE(strpeek), copyflag);
4949 if (error)
4950 return (error);
4951
4952 mutex_enter(QLOCK(rdq));
4953 /*
4954 * Skip the invalid messages
4955 */
4956 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
4957 if (mp->b_datap->db_type != M_SIG)
4958 break;
4959
4960 /*
4961 * If user has requested to peek at a high priority message
4962 * and first message is not, return 0
4963 */
4964 if (mp != NULL) {
4965 if ((STRUCT_FGET(strpeek, flags) & RS_HIPRI) &&
4966 queclass(mp) == QNORM) {
4967 *rvalp = 0;
4968 mutex_exit(QLOCK(rdq));
4969 return (0);
4970 }
4971 } else if (stp->sd_struiordq == NULL ||
4972 (STRUCT_FGET(strpeek, flags) & RS_HIPRI)) {
4973 /*
4974 * No mblks to look at at the streamhead and
4975 * 1). This isn't a synch stream or
4976 * 2). This is a synch stream but caller wants high
4977 * priority messages which is not supported by
4978 * the synch stream. (it only supports QNORM)
4979 */
4980 *rvalp = 0;
4981 mutex_exit(QLOCK(rdq));
4982 return (0);
4983 }
4984
4985 fmp = mp;
4986
4987 if (mp && mp->b_datap->db_type == M_PASSFP) {
4988 mutex_exit(QLOCK(rdq));
4989 return (EBADMSG);
4990 }
4991
4992 ASSERT(mp == NULL || mp->b_datap->db_type == M_PCPROTO ||
4993 mp->b_datap->db_type == M_PROTO ||
4994 mp->b_datap->db_type == M_DATA);
4995
4996 if (mp && mp->b_datap->db_type == M_PCPROTO) {
4997 STRUCT_FSET(strpeek, flags, RS_HIPRI);
4998 } else {
4999 STRUCT_FSET(strpeek, flags, 0);
5000 }
5001
5002
5003 if (mp && ((tmp_mp = dupmsg(mp)) == NULL)) {
5004 mutex_exit(QLOCK(rdq));
5005 return (ENOSR);
5006 }
5007 mutex_exit(QLOCK(rdq));
5008
5009 /*
5010 * set mp = tmp_mp, so that I_PEEK processing can continue.
5011 * tmp_mp is used to free the dup'd message.
5012 */
5013 mp = tmp_mp;
5014
5015 uio.uio_fmode = 0;
5016 uio.uio_extflg = UIO_COPY_CACHED;
5017 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE :
5018 UIO_SYSSPACE;
5019 uio.uio_limit = 0;
5020 /*
5021 * First process PROTO blocks, if any.
5022 * If user doesn't want to get ctl info by setting maxlen <= 0,
5023 * then set len to -1/0 and skip control blocks part.
5024 */
5025 if (STRUCT_FGET(strpeek, ctlbuf.maxlen) < 0)
5026 STRUCT_FSET(strpeek, ctlbuf.len, -1);
5027 else if (STRUCT_FGET(strpeek, ctlbuf.maxlen) == 0)
5028 STRUCT_FSET(strpeek, ctlbuf.len, 0);
5029 else {
5030 int ctl_part = 0;
5031
5032 iov.iov_base = STRUCT_FGETP(strpeek, ctlbuf.buf);
5033 iov.iov_len = STRUCT_FGET(strpeek, ctlbuf.maxlen);
5034 uio.uio_iov = &iov;
5035 uio.uio_resid = iov.iov_len;
5036 uio.uio_loffset = 0;
5037 uio.uio_iovcnt = 1;
5038 while (mp && mp->b_datap->db_type != M_DATA &&
5039 uio.uio_resid >= 0) {
5040 ASSERT(STRUCT_FGET(strpeek, flags) == 0 ?
5041 mp->b_datap->db_type == M_PROTO :
5042 mp->b_datap->db_type == M_PCPROTO);
5043
5044 if ((n = MIN(uio.uio_resid,
5045 mp->b_wptr - mp->b_rptr)) != 0 &&
5046 (error = uiomove((char *)mp->b_rptr, n,
5047 UIO_READ, &uio)) != 0) {
5048 freemsg(tmp_mp);
5049 return (error);
5050 }
5051 ctl_part = 1;
5052 mp = mp->b_cont;
5053 }
5054 /* No ctl message */
5055 if (ctl_part == 0)
5056 STRUCT_FSET(strpeek, ctlbuf.len, -1);
5057 else
5058 STRUCT_FSET(strpeek, ctlbuf.len,
5059 STRUCT_FGET(strpeek, ctlbuf.maxlen) -
5060 uio.uio_resid);
5061 }
5062
5063 /*
5064 * Now process DATA blocks, if any.
5065 * If user doesn't want to get data info by setting maxlen <= 0,
5066 * then set len to -1/0 and skip data blocks part.
5067 */
5068 if (STRUCT_FGET(strpeek, databuf.maxlen) < 0)
5069 STRUCT_FSET(strpeek, databuf.len, -1);
5070 else if (STRUCT_FGET(strpeek, databuf.maxlen) == 0)
5071 STRUCT_FSET(strpeek, databuf.len, 0);
5072 else {
5073 int data_part = 0;
5074
5075 iov.iov_base = STRUCT_FGETP(strpeek, databuf.buf);
5076 iov.iov_len = STRUCT_FGET(strpeek, databuf.maxlen);
5077 uio.uio_iov = &iov;
5078 uio.uio_resid = iov.iov_len;
5079 uio.uio_loffset = 0;
5080 uio.uio_iovcnt = 1;
5081 while (mp && uio.uio_resid) {
5082 if (mp->b_datap->db_type == M_DATA) {
5083 if ((n = MIN(uio.uio_resid,
5084 mp->b_wptr - mp->b_rptr)) != 0 &&
5085 (error = uiomove((char *)mp->b_rptr,
5086 n, UIO_READ, &uio)) != 0) {
5087 freemsg(tmp_mp);
5088 return (error);
5089 }
5090 data_part = 1;
5091 }
5092 ASSERT(data_part == 0 ||
5093 mp->b_datap->db_type == M_DATA);
5094 mp = mp->b_cont;
5095 }
5096 /* No data message */
5097 if (data_part == 0)
5098 STRUCT_FSET(strpeek, databuf.len, -1);
5099 else
5100 STRUCT_FSET(strpeek, databuf.len,
5101 STRUCT_FGET(strpeek, databuf.maxlen) -
5102 uio.uio_resid);
5103 }
5104 freemsg(tmp_mp);
5105
5106 /*
5107 * It is a synch stream and user wants to get
5108 * data (maxlen > 0).
5109 * uio setup is done by the codes that process DATA
5110 * blocks above.
5111 */
5112 if ((fmp == NULL) && STRUCT_FGET(strpeek, databuf.maxlen) > 0) {
5113 infod_t infod;
5114
5115 infod.d_cmd = INFOD_COPYOUT;
5116 infod.d_res = 0;
5117 infod.d_uiop = &uio;
5118 error = infonext(rdq, &infod);
5119 if (error == EINVAL || error == EBUSY)
5120 error = 0;
5121 if (error)
5122 return (error);
5123 STRUCT_FSET(strpeek, databuf.len, STRUCT_FGET(strpeek,
5124 databuf.maxlen) - uio.uio_resid);
5125 if (STRUCT_FGET(strpeek, databuf.len) == 0) {
5126 /*
5127 * No data found by the infonext().
5128 */
5129 STRUCT_FSET(strpeek, databuf.len, -1);
5130 }
5131 }
5132 error = strcopyout(STRUCT_BUF(strpeek), (void *)arg,
5133 STRUCT_SIZE(strpeek), copyflag);
5134 if (error) {
5135 return (error);
5136 }
5137 /*
5138 * If there is no message retrieved, set return code to 0
5139 * otherwise, set it to 1.
5140 */
5141 if (STRUCT_FGET(strpeek, ctlbuf.len) == -1 &&
5142 STRUCT_FGET(strpeek, databuf.len) == -1)
5143 *rvalp = 0;
5144 else
5145 *rvalp = 1;
5146 return (0);
5147 }
5148
5149 case I_FDINSERT:
5150 {
5151 STRUCT_DECL(strfdinsert, strfdinsert);
5152 struct file *resftp;
5153 struct stdata *resstp;
5154 t_uscalar_t ival;
5155 ssize_t msgsize;
5156 struct strbuf mctl;
5157
5158 STRUCT_INIT(strfdinsert, flag);
5159 if (stp->sd_flag & STRHUP)
5160 return (ENXIO);
5161 /*
5162 * STRDERR, STWRERR and STPLEX tested above.
5163 */
5164 error = strcopyin((void *)arg, STRUCT_BUF(strfdinsert),
5165 STRUCT_SIZE(strfdinsert), copyflag);
5166 if (error)
5167 return (error);
5168
5169 if (STRUCT_FGET(strfdinsert, offset) < 0 ||
5170 (STRUCT_FGET(strfdinsert, offset) %
5171 sizeof (t_uscalar_t)) != 0)
5172 return (EINVAL);
5173 if ((resftp = getf(STRUCT_FGET(strfdinsert, fildes))) != NULL) {
5174 if ((resstp = resftp->f_vnode->v_stream) == NULL) {
5175 releasef(STRUCT_FGET(strfdinsert, fildes));
5176 return (EINVAL);
5177 }
5178 } else
5179 return (EINVAL);
5180
5181 mutex_enter(&resstp->sd_lock);
5182 if (resstp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
5183 error = strgeterr(resstp,
5184 STRDERR|STWRERR|STRHUP|STPLEX, 0);
5185 if (error != 0) {
5186 mutex_exit(&resstp->sd_lock);
5187 releasef(STRUCT_FGET(strfdinsert, fildes));
5188 return (error);
5189 }
5190 }
5191 mutex_exit(&resstp->sd_lock);
5192
5193 #ifdef _ILP32
5194 {
5195 queue_t *q;
5196 queue_t *mate = NULL;
5197
5198 /* get read queue of stream terminus */
5199 claimstr(resstp->sd_wrq);
5200 for (q = resstp->sd_wrq->q_next; q->q_next != NULL;
5201 q = q->q_next)
5202 if (!STRMATED(resstp) && STREAM(q) != resstp &&
5203 mate == NULL) {
5204 ASSERT(q->q_qinfo->qi_srvp);
5205 ASSERT(_OTHERQ(q)->q_qinfo->qi_srvp);
5206 claimstr(q);
5207 mate = q;
5208 }
5209 q = _RD(q);
5210 if (mate)
5211 releasestr(mate);
5212 releasestr(resstp->sd_wrq);
5213 ival = (t_uscalar_t)q;
5214 }
5215 #else
5216 ival = (t_uscalar_t)getminor(resftp->f_vnode->v_rdev);
5217 #endif /* _ILP32 */
5218
5219 if (STRUCT_FGET(strfdinsert, ctlbuf.len) <
5220 STRUCT_FGET(strfdinsert, offset) + sizeof (t_uscalar_t)) {
5221 releasef(STRUCT_FGET(strfdinsert, fildes));
5222 return (EINVAL);
5223 }
5224
5225 /*
5226 * Check for legal flag value.
5227 */
5228 if (STRUCT_FGET(strfdinsert, flags) & ~RS_HIPRI) {
5229 releasef(STRUCT_FGET(strfdinsert, fildes));
5230 return (EINVAL);
5231 }
5232
5233 /* get these values from those cached in the stream head */
5234 mutex_enter(QLOCK(stp->sd_wrq));
5235 rmin = stp->sd_qn_minpsz;
5236 rmax = stp->sd_qn_maxpsz;
5237 mutex_exit(QLOCK(stp->sd_wrq));
5238
5239 /*
5240 * Make sure ctl and data sizes together fall within
5241 * the limits of the max and min receive packet sizes
5242 * and do not exceed system limit. A negative data
5243 * length means that no data part is to be sent.
5244 */
5245 ASSERT((rmax >= 0) || (rmax == INFPSZ));
5246 if (rmax == 0) {
5247 releasef(STRUCT_FGET(strfdinsert, fildes));
5248 return (ERANGE);
5249 }
5250 if ((msgsize = STRUCT_FGET(strfdinsert, databuf.len)) < 0)
5251 msgsize = 0;
5252 if ((msgsize < rmin) ||
5253 ((msgsize > rmax) && (rmax != INFPSZ)) ||
5254 (STRUCT_FGET(strfdinsert, ctlbuf.len) > strctlsz)) {
5255 releasef(STRUCT_FGET(strfdinsert, fildes));
5256 return (ERANGE);
5257 }
5258
5259 mutex_enter(&stp->sd_lock);
5260 while (!(STRUCT_FGET(strfdinsert, flags) & RS_HIPRI) &&
5261 !canputnext(stp->sd_wrq)) {
5262 if ((error = strwaitq(stp, WRITEWAIT, (ssize_t)0,
5263 flag, -1, &done)) != 0 || done) {
5264 mutex_exit(&stp->sd_lock);
5265 releasef(STRUCT_FGET(strfdinsert, fildes));
5266 return (error);
5267 }
5268 if ((error = i_straccess(stp, access)) != 0) {
5269 mutex_exit(&stp->sd_lock);
5270 releasef(
5271 STRUCT_FGET(strfdinsert, fildes));
5272 return (error);
5273 }
5274 }
5275 mutex_exit(&stp->sd_lock);
5276
5277 /*
5278 * Copy strfdinsert.ctlbuf into native form of
5279 * ctlbuf to pass down into strmakemsg().
5280 */
5281 mctl.maxlen = STRUCT_FGET(strfdinsert, ctlbuf.maxlen);
5282 mctl.len = STRUCT_FGET(strfdinsert, ctlbuf.len);
5283 mctl.buf = STRUCT_FGETP(strfdinsert, ctlbuf.buf);
5284
5285 iov.iov_base = STRUCT_FGETP(strfdinsert, databuf.buf);
5286 iov.iov_len = STRUCT_FGET(strfdinsert, databuf.len);
5287 uio.uio_iov = &iov;
5288 uio.uio_iovcnt = 1;
5289 uio.uio_loffset = 0;
5290 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE :
5291 UIO_SYSSPACE;
5292 uio.uio_fmode = 0;
5293 uio.uio_extflg = UIO_COPY_CACHED;
5294 uio.uio_resid = iov.iov_len;
5295 if ((error = strmakemsg(&mctl,
5296 &msgsize, &uio, stp,
5297 STRUCT_FGET(strfdinsert, flags), &mp)) != 0 || !mp) {
5298 STRUCT_FSET(strfdinsert, databuf.len, msgsize);
5299 releasef(STRUCT_FGET(strfdinsert, fildes));
5300 return (error);
5301 }
5302
5303 STRUCT_FSET(strfdinsert, databuf.len, msgsize);
5304
5305 /*
5306 * Place the possibly reencoded queue pointer 'offset' bytes
5307 * from the start of the control portion of the message.
5308 */
5309 *((t_uscalar_t *)(mp->b_rptr +
5310 STRUCT_FGET(strfdinsert, offset))) = ival;
5311
5312 /*
5313 * Put message downstream.
5314 */
5315 stream_willservice(stp);
5316 putnext(stp->sd_wrq, mp);
5317 stream_runservice(stp);
5318 releasef(STRUCT_FGET(strfdinsert, fildes));
5319 return (error);
5320 }
5321
5322 case I_SENDFD:
5323 {
5324 struct file *fp;
5325
5326 if ((fp = getf((int)arg)) == NULL)
5327 return (EBADF);
5328 error = do_sendfp(stp, fp, crp);
5329 if (auditing) {
5330 audit_fdsend((int)arg, fp, error);
5331 }
5332 releasef((int)arg);
5333 return (error);
5334 }
5335
5336 case I_RECVFD:
5337 case I_E_RECVFD:
5338 {
5339 struct k_strrecvfd *srf;
5340 int i, fd;
5341
5342 mutex_enter(&stp->sd_lock);
5343 while (!(mp = getq(rdq))) {
5344 if (stp->sd_flag & (STRHUP|STREOF)) {
5345 mutex_exit(&stp->sd_lock);
5346 return (ENXIO);
5347 }
5348 if ((error = strwaitq(stp, GETWAIT, (ssize_t)0,
5349 flag, -1, &done)) != 0 || done) {
5350 mutex_exit(&stp->sd_lock);
5351 return (error);
5352 }
5353 if ((error = i_straccess(stp, access)) != 0) {
5354 mutex_exit(&stp->sd_lock);
5355 return (error);
5356 }
5357 }
5358 if (mp->b_datap->db_type != M_PASSFP) {
5359 putback(stp, rdq, mp, mp->b_band);
5360 mutex_exit(&stp->sd_lock);
5361 return (EBADMSG);
5362 }
5363 mutex_exit(&stp->sd_lock);
5364
5365 srf = (struct k_strrecvfd *)mp->b_rptr;
5366 if ((fd = ufalloc(0)) == -1) {
5367 mutex_enter(&stp->sd_lock);
5368 putback(stp, rdq, mp, mp->b_band);
5369 mutex_exit(&stp->sd_lock);
5370 return (EMFILE);
5371 }
5372 if (cmd == I_RECVFD) {
5373 struct o_strrecvfd ostrfd;
5374
5375 /* check to see if uid/gid values are too large. */
5376
5377 if (srf->uid > (o_uid_t)USHRT_MAX ||
5378 srf->gid > (o_gid_t)USHRT_MAX) {
5379 mutex_enter(&stp->sd_lock);
5380 putback(stp, rdq, mp, mp->b_band);
5381 mutex_exit(&stp->sd_lock);
5382 setf(fd, NULL); /* release fd entry */
5383 return (EOVERFLOW);
5384 }
5385
5386 ostrfd.fd = fd;
5387 ostrfd.uid = (o_uid_t)srf->uid;
5388 ostrfd.gid = (o_gid_t)srf->gid;
5389
5390 /* Null the filler bits */
5391 for (i = 0; i < 8; i++)
5392 ostrfd.fill[i] = 0;
5393
5394 error = strcopyout(&ostrfd, (void *)arg,
5395 sizeof (struct o_strrecvfd), copyflag);
5396 } else { /* I_E_RECVFD */
5397 struct strrecvfd strfd;
5398
5399 strfd.fd = fd;
5400 strfd.uid = srf->uid;
5401 strfd.gid = srf->gid;
5402
5403 /* null the filler bits */
5404 for (i = 0; i < 8; i++)
5405 strfd.fill[i] = 0;
5406
5407 error = strcopyout(&strfd, (void *)arg,
5408 sizeof (struct strrecvfd), copyflag);
5409 }
5410
5411 if (error) {
5412 setf(fd, NULL); /* release fd entry */
5413 mutex_enter(&stp->sd_lock);
5414 putback(stp, rdq, mp, mp->b_band);
5415 mutex_exit(&stp->sd_lock);
5416 return (error);
5417 }
5418 if (auditing) {
5419 audit_fdrecv(fd, srf->fp);
5420 }
5421
5422 /*
5423 * Always increment f_count since the freemsg() below will
5424 * always call free_passfp() which performs a closef().
5425 */
5426 mutex_enter(&srf->fp->f_tlock);
5427 srf->fp->f_count++;
5428 mutex_exit(&srf->fp->f_tlock);
5429 setf(fd, srf->fp);
5430 freemsg(mp);
5431 return (0);
5432 }
5433
5434 case I_SWROPT:
5435 /*
5436 * Set/clear the write options. arg is a bit
5437 * mask with any of the following bits set...
5438 * SNDZERO - send zero length message
5439 * SNDPIPE - send sigpipe to process if
5440 * sd_werror is set and process is
5441 * doing a write or putmsg.
5442 * The new stream head write options should reflect
5443 * what is in arg.
5444 */
5445 if (arg & ~(SNDZERO|SNDPIPE))
5446 return (EINVAL);
5447
5448 mutex_enter(&stp->sd_lock);
5449 stp->sd_wput_opt &= ~(SW_SIGPIPE|SW_SNDZERO);
5450 if (arg & SNDZERO)
5451 stp->sd_wput_opt |= SW_SNDZERO;
5452 if (arg & SNDPIPE)
5453 stp->sd_wput_opt |= SW_SIGPIPE;
5454 mutex_exit(&stp->sd_lock);
5455 return (0);
5456
5457 case I_GWROPT:
5458 {
5459 int wropt = 0;
5460
5461 if (stp->sd_wput_opt & SW_SNDZERO)
5462 wropt |= SNDZERO;
5463 if (stp->sd_wput_opt & SW_SIGPIPE)
5464 wropt |= SNDPIPE;
5465 return (strcopyout(&wropt, (void *)arg, sizeof (wropt),
5466 copyflag));
5467 }
5468
5469 case I_LIST:
5470 /*
5471 * Returns all the modules found on this stream,
5472 * upto the driver. If argument is NULL, return the
5473 * number of modules (including driver). If argument
5474 * is not NULL, copy the names into the structure
5475 * provided.
5476 */
5477
5478 {
5479 queue_t *q;
5480 char *qname;
5481 int i, nmods;
5482 struct str_mlist *mlist;
5483 STRUCT_DECL(str_list, strlist);
5484
5485 if (arg == NULL) { /* Return number of modules plus driver */
5486 if (stp->sd_vnode->v_type == VFIFO)
5487 *rvalp = stp->sd_pushcnt;
5488 else
5489 *rvalp = stp->sd_pushcnt + 1;
5490 return (0);
5491 }
5492
5493 STRUCT_INIT(strlist, flag);
5494
5495 error = strcopyin((void *)arg, STRUCT_BUF(strlist),
5496 STRUCT_SIZE(strlist), copyflag);
5497 if (error != 0)
5498 return (error);
5499
5500 mlist = STRUCT_FGETP(strlist, sl_modlist);
5501 nmods = STRUCT_FGET(strlist, sl_nmods);
5502 if (nmods <= 0)
5503 return (EINVAL);
5504
5505 claimstr(stp->sd_wrq);
5506 q = stp->sd_wrq;
5507 for (i = 0; i < nmods && _SAMESTR(q); i++, q = q->q_next) {
5508 qname = Q2NAME(q->q_next);
5509 error = strcopyout(qname, &mlist[i], strlen(qname) + 1,
5510 copyflag);
5511 if (error != 0) {
5512 releasestr(stp->sd_wrq);
5513 return (error);
5514 }
5515 }
5516 releasestr(stp->sd_wrq);
5517 return (strcopyout(&i, (void *)arg, sizeof (int), copyflag));
5518 }
5519
5520 case I_CKBAND:
5521 {
5522 queue_t *q;
5523 qband_t *qbp;
5524
5525 if ((arg < 0) || (arg >= NBAND))
5526 return (EINVAL);
5527 q = _RD(stp->sd_wrq);
5528 mutex_enter(QLOCK(q));
5529 if (arg > (int)q->q_nband) {
5530 *rvalp = 0;
5531 } else {
5532 if (arg == 0) {
5533 if (q->q_first)
5534 *rvalp = 1;
5535 else
5536 *rvalp = 0;
5537 } else {
5538 qbp = q->q_bandp;
5539 while (--arg > 0)
5540 qbp = qbp->qb_next;
5541 if (qbp->qb_first)
5542 *rvalp = 1;
5543 else
5544 *rvalp = 0;
5545 }
5546 }
5547 mutex_exit(QLOCK(q));
5548 return (0);
5549 }
5550
5551 case I_GETBAND:
5552 {
5553 int intpri;
5554 queue_t *q;
5555
5556 q = _RD(stp->sd_wrq);
5557 mutex_enter(QLOCK(q));
5558 mp = q->q_first;
5559 if (!mp) {
5560 mutex_exit(QLOCK(q));
5561 return (ENODATA);
5562 }
5563 intpri = (int)mp->b_band;
5564 error = strcopyout(&intpri, (void *)arg, sizeof (int),
5565 copyflag);
5566 mutex_exit(QLOCK(q));
5567 return (error);
5568 }
5569
5570 case I_ATMARK:
5571 {
5572 queue_t *q;
5573
5574 if (arg & ~(ANYMARK|LASTMARK))
5575 return (EINVAL);
5576 q = _RD(stp->sd_wrq);
5577 mutex_enter(&stp->sd_lock);
5578 if ((stp->sd_flag & STRATMARK) && (arg == ANYMARK)) {
5579 *rvalp = 1;
5580 } else {
5581 mutex_enter(QLOCK(q));
5582 mp = q->q_first;
5583
5584 if (mp == NULL)
5585 *rvalp = 0;
5586 else if ((arg == ANYMARK) && (mp->b_flag & MSGMARK))
5587 *rvalp = 1;
5588 else if ((arg == LASTMARK) && (mp == stp->sd_mark))
5589 *rvalp = 1;
5590 else
5591 *rvalp = 0;
5592 mutex_exit(QLOCK(q));
5593 }
5594 mutex_exit(&stp->sd_lock);
5595 return (0);
5596 }
5597
5598 case I_CANPUT:
5599 {
5600 char band;
5601
5602 if ((arg < 0) || (arg >= NBAND))
5603 return (EINVAL);
5604 band = (char)arg;
5605 *rvalp = bcanputnext(stp->sd_wrq, band);
5606 return (0);
5607 }
5608
5609 case I_SETCLTIME:
5610 {
5611 int closetime;
5612
5613 error = strcopyin((void *)arg, &closetime, sizeof (int),
5614 copyflag);
5615 if (error)
5616 return (error);
5617 if (closetime < 0)
5618 return (EINVAL);
5619
5620 stp->sd_closetime = closetime;
5621 return (0);
5622 }
5623
5624 case I_GETCLTIME:
5625 {
5626 int closetime;
5627
5628 closetime = stp->sd_closetime;
5629 return (strcopyout(&closetime, (void *)arg, sizeof (int),
5630 copyflag));
5631 }
5632
5633 case TIOCGSID:
5634 {
5635 pid_t sid;
5636
5637 mutex_enter(&stp->sd_lock);
5638 if (stp->sd_sidp == NULL) {
5639 mutex_exit(&stp->sd_lock);
5640 return (ENOTTY);
5641 }
5642 sid = stp->sd_sidp->pid_id;
5643 mutex_exit(&stp->sd_lock);
5644 return (strcopyout(&sid, (void *)arg, sizeof (pid_t),
5645 copyflag));
5646 }
5647
5648 case TIOCSPGRP:
5649 {
5650 pid_t pgrp;
5651 proc_t *q;
5652 pid_t sid, fg_pgid, bg_pgid;
5653
5654 if (error = strcopyin((void *)arg, &pgrp, sizeof (pid_t),
5655 copyflag))
5656 return (error);
5657 mutex_enter(&stp->sd_lock);
5658 mutex_enter(&pidlock);
5659 if (stp->sd_sidp != ttoproc(curthread)->p_sessp->s_sidp) {
5660 mutex_exit(&pidlock);
5661 mutex_exit(&stp->sd_lock);
5662 return (ENOTTY);
5663 }
5664 if (pgrp == stp->sd_pgidp->pid_id) {
5665 mutex_exit(&pidlock);
5666 mutex_exit(&stp->sd_lock);
5667 return (0);
5668 }
5669 if (pgrp <= 0 || pgrp >= maxpid) {
5670 mutex_exit(&pidlock);
5671 mutex_exit(&stp->sd_lock);
5672 return (EINVAL);
5673 }
5674 if ((q = pgfind(pgrp)) == NULL ||
5675 q->p_sessp != ttoproc(curthread)->p_sessp) {
5676 mutex_exit(&pidlock);
5677 mutex_exit(&stp->sd_lock);
5678 return (EPERM);
5679 }
5680 sid = stp->sd_sidp->pid_id;
5681 fg_pgid = q->p_pgrp;
5682 bg_pgid = stp->sd_pgidp->pid_id;
5683 CL_SET_PROCESS_GROUP(curthread, sid, bg_pgid, fg_pgid);
5684 PID_RELE(stp->sd_pgidp);
5685 ctty_clear_sighuped();
5686 stp->sd_pgidp = q->p_pgidp;
5687 PID_HOLD(stp->sd_pgidp);
5688 mutex_exit(&pidlock);
5689 mutex_exit(&stp->sd_lock);
5690 return (0);
5691 }
5692
5693 case TIOCGPGRP:
5694 {
5695 pid_t pgrp;
5696
5697 mutex_enter(&stp->sd_lock);
5698 if (stp->sd_sidp == NULL) {
5699 mutex_exit(&stp->sd_lock);
5700 return (ENOTTY);
5701 }
5702 pgrp = stp->sd_pgidp->pid_id;
5703 mutex_exit(&stp->sd_lock);
5704 return (strcopyout(&pgrp, (void *)arg, sizeof (pid_t),
5705 copyflag));
5706 }
5707
5708 case TIOCSCTTY:
5709 {
5710 return (strctty(stp));
5711 }
5712
5713 case TIOCNOTTY:
5714 {
5715 /* freectty() always assumes curproc. */
5716 if (freectty(B_FALSE) != 0)
5717 return (0);
5718 return (ENOTTY);
5719 }
5720
5721 case FIONBIO:
5722 case FIOASYNC:
5723 return (0); /* handled by the upper layer */
5724 }
5725 }
5726
5727 /*
5728 * Custom free routine used for M_PASSFP messages.
5729 */
5730 static void
5731 free_passfp(struct k_strrecvfd *srf)
5732 {
5733 (void) closef(srf->fp);
5734 kmem_free(srf, sizeof (struct k_strrecvfd) + sizeof (frtn_t));
5735 }
5736
5737 /* ARGSUSED */
5738 int
5739 do_sendfp(struct stdata *stp, struct file *fp, struct cred *cr)
5740 {
5741 queue_t *qp, *nextqp;
5742 struct k_strrecvfd *srf;
5743 mblk_t *mp;
5744 frtn_t *frtnp;
5745 size_t bufsize;
5746 queue_t *mate = NULL;
5747 syncq_t *sq = NULL;
5748 int retval = 0;
5749
5750 if (stp->sd_flag & STRHUP)
5751 return (ENXIO);
5752
5753 claimstr(stp->sd_wrq);
5754
5755 /* Fastpath, we have a pipe, and we are already mated, use it. */
5756 if (STRMATED(stp)) {
5757 qp = _RD(stp->sd_mate->sd_wrq);
5758 claimstr(qp);
5759 mate = qp;
5760 } else { /* Not already mated. */
5761
5762 /*
5763 * Walk the stream to the end of this one.
5764 * assumes that the claimstr() will prevent
5765 * plumbing between the stream head and the
5766 * driver from changing
5767 */
5768 qp = stp->sd_wrq;
5769
5770 /*
5771 * Loop until we reach the end of this stream.
5772 * On completion, qp points to the write queue
5773 * at the end of the stream, or the read queue
5774 * at the stream head if this is a fifo.
5775 */
5776 while (((qp = qp->q_next) != NULL) && _SAMESTR(qp))
5777 ;
5778
5779 /*
5780 * Just in case we get a q_next which is NULL, but
5781 * not at the end of the stream. This is actually
5782 * broken, so we set an assert to catch it in
5783 * debug, and set an error and return if not debug.
5784 */
5785 ASSERT(qp);
5786 if (qp == NULL) {
5787 releasestr(stp->sd_wrq);
5788 return (EINVAL);
5789 }
5790
5791 /*
5792 * Enter the syncq for the driver, so (hopefully)
5793 * the queue values will not change on us.
5794 * XXXX - This will only prevent the race IFF only
5795 * the write side modifies the q_next member, and
5796 * the put procedure is protected by at least
5797 * MT_PERQ.
5798 */
5799 if ((sq = qp->q_syncq) != NULL)
5800 entersq(sq, SQ_PUT);
5801
5802 /* Now get the q_next value from this qp. */
5803 nextqp = qp->q_next;
5804
5805 /*
5806 * If nextqp exists and the other stream is different
5807 * from this one claim the stream, set the mate, and
5808 * get the read queue at the stream head of the other
5809 * stream. Assumes that nextqp was at least valid when
5810 * we got it. Hopefully the entersq of the driver
5811 * will prevent it from changing on us.
5812 */
5813 if ((nextqp != NULL) && (STREAM(nextqp) != stp)) {
5814 ASSERT(qp->q_qinfo->qi_srvp);
5815 ASSERT(_OTHERQ(qp)->q_qinfo->qi_srvp);
5816 ASSERT(_OTHERQ(qp->q_next)->q_qinfo->qi_srvp);
5817 claimstr(nextqp);
5818
5819 /* Make sure we still have a q_next */
5820 if (nextqp != qp->q_next) {
5821 releasestr(stp->sd_wrq);
5822 releasestr(nextqp);
5823 return (EINVAL);
5824 }
5825
5826 qp = _RD(STREAM(nextqp)->sd_wrq);
5827 mate = qp;
5828 }
5829 /* If we entered the synq above, leave it. */
5830 if (sq != NULL)
5831 leavesq(sq, SQ_PUT);
5832 } /* STRMATED(STP) */
5833
5834 /* XXX prevents substitution of the ops vector */
5835 if (qp->q_qinfo != &strdata && qp->q_qinfo != &fifo_strdata) {
5836 retval = EINVAL;
5837 goto out;
5838 }
5839
5840 if (qp->q_flag & QFULL) {
5841 retval = EAGAIN;
5842 goto out;
5843 }
5844
5845 /*
5846 * Since M_PASSFP messages include a file descriptor, we use
5847 * esballoc() and specify a custom free routine (free_passfp()) that
5848 * will close the descriptor as part of freeing the message. For
5849 * convenience, we stash the frtn_t right after the data block.
5850 */
5851 bufsize = sizeof (struct k_strrecvfd) + sizeof (frtn_t);
5852 srf = kmem_alloc(bufsize, KM_NOSLEEP);
5853 if (srf == NULL) {
5854 retval = EAGAIN;
5855 goto out;
5856 }
5857
5858 frtnp = (frtn_t *)(srf + 1);
5859 frtnp->free_arg = (caddr_t)srf;
5860 frtnp->free_func = free_passfp;
5861
5862 mp = esballoc((uchar_t *)srf, bufsize, BPRI_MED, frtnp);
5863 if (mp == NULL) {
5864 kmem_free(srf, bufsize);
5865 retval = EAGAIN;
5866 goto out;
5867 }
5868 mp->b_wptr += sizeof (struct k_strrecvfd);
5869 mp->b_datap->db_type = M_PASSFP;
5870
5871 srf->fp = fp;
5872 srf->uid = crgetuid(curthread->t_cred);
5873 srf->gid = crgetgid(curthread->t_cred);
5874 mutex_enter(&fp->f_tlock);
5875 fp->f_count++;
5876 mutex_exit(&fp->f_tlock);
5877
5878 put(qp, mp);
5879 out:
5880 releasestr(stp->sd_wrq);
5881 if (mate)
5882 releasestr(mate);
5883 return (retval);
5884 }
5885
5886 /*
5887 * Send an ioctl message downstream and wait for acknowledgement.
5888 * flags may be set to either U_TO_K or K_TO_K and a combination
5889 * of STR_NOERROR or STR_NOSIG
5890 * STR_NOSIG: Signals are essentially ignored or held and have
5891 * no effect for the duration of the call.
5892 * STR_NOERROR: Ignores stream head read, write and hup errors.
5893 * Additionally, if an existing ioctl times out, it is assumed
5894 * lost and and this ioctl will continue as if the previous ioctl had
5895 * finished. ETIME may be returned if this ioctl times out (i.e.
5896 * ic_timout is not INFTIM). Non-stream head errors may be returned if
5897 * the ioc_error indicates that the driver/module had problems,
5898 * an EFAULT was found when accessing user data, a lack of
5899 * resources, etc.
5900 */
5901 int
5902 strdoioctl(
5903 struct stdata *stp,
5904 struct strioctl *strioc,
5905 int fflags, /* file flags with model info */
5906 int flag,
5907 cred_t *crp,
5908 int *rvalp)
5909 {
5910 mblk_t *bp;
5911 struct iocblk *iocbp;
5912 struct copyreq *reqp;
5913 struct copyresp *resp;
5914 int id;
5915 int transparent = 0;
5916 int error = 0;
5917 int len = 0;
5918 caddr_t taddr;
5919 int copyflag = (flag & (U_TO_K | K_TO_K));
5920 int sigflag = (flag & STR_NOSIG);
5921 int errs;
5922 uint_t waitflags;
5923 boolean_t set_iocwaitne = B_FALSE;
5924
5925 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K);
5926 ASSERT((fflags & FMODELS) != 0);
5927
5928 TRACE_2(TR_FAC_STREAMS_FR,
5929 TR_STRDOIOCTL,
5930 "strdoioctl:stp %p strioc %p", stp, strioc);
5931 if (strioc->ic_len == TRANSPARENT) { /* send arg in M_DATA block */
5932 transparent = 1;
5933 strioc->ic_len = sizeof (intptr_t);
5934 }
5935
5936 if (strioc->ic_len < 0 || (strmsgsz > 0 && strioc->ic_len > strmsgsz))
5937 return (EINVAL);
5938
5939 if ((bp = allocb_cred_wait(sizeof (union ioctypes), sigflag, &error,
5940 crp, curproc->p_pid)) == NULL)
5941 return (error);
5942
5943 bzero(bp->b_wptr, sizeof (union ioctypes));
5944
5945 iocbp = (struct iocblk *)bp->b_wptr;
5946 iocbp->ioc_count = strioc->ic_len;
5947 iocbp->ioc_cmd = strioc->ic_cmd;
5948 iocbp->ioc_flag = (fflags & FMODELS);
5949
5950 crhold(crp);
5951 iocbp->ioc_cr = crp;
5952 DB_TYPE(bp) = M_IOCTL;
5953 bp->b_wptr += sizeof (struct iocblk);
5954
5955 if (flag & STR_NOERROR)
5956 errs = STPLEX;
5957 else
5958 errs = STRHUP|STRDERR|STWRERR|STPLEX;
5959
5960 /*
5961 * If there is data to copy into ioctl block, do so.
5962 */
5963 if (iocbp->ioc_count > 0) {
5964 if (transparent)
5965 /*
5966 * Note: STR_NOERROR does not have an effect
5967 * in putiocd()
5968 */
5969 id = K_TO_K | sigflag;
5970 else
5971 id = flag;
5972 if ((error = putiocd(bp, strioc->ic_dp, id, crp)) != 0) {
5973 freemsg(bp);
5974 crfree(crp);
5975 return (error);
5976 }
5977
5978 /*
5979 * We could have slept copying in user pages.
5980 * Recheck the stream head state (the other end
5981 * of a pipe could have gone away).
5982 */
5983 if (stp->sd_flag & errs) {
5984 mutex_enter(&stp->sd_lock);
5985 error = strgeterr(stp, errs, 0);
5986 mutex_exit(&stp->sd_lock);
5987 if (error != 0) {
5988 freemsg(bp);
5989 crfree(crp);
5990 return (error);
5991 }
5992 }
5993 }
5994 if (transparent)
5995 iocbp->ioc_count = TRANSPARENT;
5996
5997 /*
5998 * Block for up to STRTIMOUT milliseconds if there is an outstanding
5999 * ioctl for this stream already running. All processes
6000 * sleeping here will be awakened as a result of an ACK
6001 * or NAK being received for the outstanding ioctl, or
6002 * as a result of the timer expiring on the outstanding
6003 * ioctl (a failure), or as a result of any waiting
6004 * process's timer expiring (also a failure).
6005 */
6006
6007 error = 0;
6008 mutex_enter(&stp->sd_lock);
6009 while ((stp->sd_flag & IOCWAIT) ||
6010 (!set_iocwaitne && (stp->sd_flag & IOCWAITNE))) {
6011 clock_t cv_rval;
6012
6013 TRACE_0(TR_FAC_STREAMS_FR,
6014 TR_STRDOIOCTL_WAIT,
6015 "strdoioctl sleeps - IOCWAIT");
6016 cv_rval = str_cv_wait(&stp->sd_iocmonitor, &stp->sd_lock,
6017 STRTIMOUT, sigflag);
6018 if (cv_rval <= 0) {
6019 if (cv_rval == 0) {
6020 error = EINTR;
6021 } else {
6022 if (flag & STR_NOERROR) {
6023 /*
6024 * Terminating current ioctl in
6025 * progress -- assume it got lost and
6026 * wake up the other thread so that the
6027 * operation completes.
6028 */
6029 if (!(stp->sd_flag & IOCWAITNE)) {
6030 set_iocwaitne = B_TRUE;
6031 stp->sd_flag |= IOCWAITNE;
6032 cv_broadcast(&stp->sd_monitor);
6033 }
6034 /*
6035 * Otherwise, there's a running
6036 * STR_NOERROR -- we have no choice
6037 * here but to wait forever (or until
6038 * interrupted).
6039 */
6040 } else {
6041 /*
6042 * pending ioctl has caused
6043 * us to time out
6044 */
6045 error = ETIME;
6046 }
6047 }
6048 } else if ((stp->sd_flag & errs)) {
6049 error = strgeterr(stp, errs, 0);
6050 }
6051 if (error) {
6052 mutex_exit(&stp->sd_lock);
6053 freemsg(bp);
6054 crfree(crp);
6055 return (error);
6056 }
6057 }
6058
6059 /*
6060 * Have control of ioctl mechanism.
6061 * Send down ioctl packet and wait for response.
6062 */
6063 if (stp->sd_iocblk != (mblk_t *)-1) {
6064 freemsg(stp->sd_iocblk);
6065 }
6066 stp->sd_iocblk = NULL;
6067
6068 /*
6069 * If this is marked with 'noerror' (internal; mostly
6070 * I_{P,}{UN,}LINK), then make sure nobody else is able to get
6071 * in here by setting IOCWAITNE.
6072 */
6073 waitflags = IOCWAIT;
6074 if (flag & STR_NOERROR)
6075 waitflags |= IOCWAITNE;
6076
6077 stp->sd_flag |= waitflags;
6078
6079 /*
6080 * Assign sequence number.
6081 */
6082 iocbp->ioc_id = stp->sd_iocid = getiocseqno();
6083
6084 mutex_exit(&stp->sd_lock);
6085
6086 TRACE_1(TR_FAC_STREAMS_FR,
6087 TR_STRDOIOCTL_PUT, "strdoioctl put: stp %p", stp);
6088 stream_willservice(stp);
6089 putnext(stp->sd_wrq, bp);
6090 stream_runservice(stp);
6091
6092 /*
6093 * Timed wait for acknowledgment. The wait time is limited by the
6094 * timeout value, which must be a positive integer (number of
6095 * milliseconds) to wait, or 0 (use default value of STRTIMOUT
6096 * milliseconds), or -1 (wait forever). This will be awakened
6097 * either by an ACK/NAK message arriving, the timer expiring, or
6098 * the timer expiring on another ioctl waiting for control of the
6099 * mechanism.
6100 */
6101 waitioc:
6102 mutex_enter(&stp->sd_lock);
6103
6104
6105 /*
6106 * If the reply has already arrived, don't sleep. If awakened from
6107 * the sleep, fail only if the reply has not arrived by then.
6108 * Otherwise, process the reply.
6109 */
6110 while (!stp->sd_iocblk) {
6111 clock_t cv_rval;
6112
6113 if (stp->sd_flag & errs) {
6114 error = strgeterr(stp, errs, 0);
6115 if (error != 0) {
6116 stp->sd_flag &= ~waitflags;
6117 cv_broadcast(&stp->sd_iocmonitor);
6118 mutex_exit(&stp->sd_lock);
6119 crfree(crp);
6120 return (error);
6121 }
6122 }
6123
6124 TRACE_0(TR_FAC_STREAMS_FR,
6125 TR_STRDOIOCTL_WAIT2,
6126 "strdoioctl sleeps awaiting reply");
6127 ASSERT(error == 0);
6128
6129 cv_rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock,
6130 (strioc->ic_timout ?
6131 strioc->ic_timout * 1000 : STRTIMOUT), sigflag);
6132
6133 /*
6134 * There are four possible cases here: interrupt, timeout,
6135 * wakeup by IOCWAITNE (above), or wakeup by strrput_nondata (a
6136 * valid M_IOCTL reply).
6137 *
6138 * If we've been awakened by a STR_NOERROR ioctl on some other
6139 * thread, then sd_iocblk will still be NULL, and IOCWAITNE
6140 * will be set. Pretend as if we just timed out. Note that
6141 * this other thread waited at least STRTIMOUT before trying to
6142 * awaken our thread, so this is indistinguishable (even for
6143 * INFTIM) from the case where we failed with ETIME waiting on
6144 * IOCWAIT in the prior loop.
6145 */
6146 if (cv_rval > 0 && !(flag & STR_NOERROR) &&
6147 stp->sd_iocblk == NULL && (stp->sd_flag & IOCWAITNE)) {
6148 cv_rval = -1;
6149 }
6150
6151 /*
6152 * note: STR_NOERROR does not protect
6153 * us here.. use ic_timout < 0
6154 */
6155 if (cv_rval <= 0) {
6156 if (cv_rval == 0) {
6157 error = EINTR;
6158 } else {
6159 error = ETIME;
6160 }
6161 /*
6162 * A message could have come in after we were scheduled
6163 * but before we were actually run.
6164 */
6165 bp = stp->sd_iocblk;
6166 stp->sd_iocblk = NULL;
6167 if (bp != NULL) {
6168 if ((bp->b_datap->db_type == M_COPYIN) ||
6169 (bp->b_datap->db_type == M_COPYOUT)) {
6170 mutex_exit(&stp->sd_lock);
6171 if (bp->b_cont) {
6172 freemsg(bp->b_cont);
6173 bp->b_cont = NULL;
6174 }
6175 bp->b_datap->db_type = M_IOCDATA;
6176 bp->b_wptr = bp->b_rptr +
6177 sizeof (struct copyresp);
6178 resp = (struct copyresp *)bp->b_rptr;
6179 resp->cp_rval =
6180 (caddr_t)1; /* failure */
6181 stream_willservice(stp);
6182 putnext(stp->sd_wrq, bp);
6183 stream_runservice(stp);
6184 mutex_enter(&stp->sd_lock);
6185 } else {
6186 freemsg(bp);
6187 }
6188 }
6189 stp->sd_flag &= ~waitflags;
6190 cv_broadcast(&stp->sd_iocmonitor);
6191 mutex_exit(&stp->sd_lock);
6192 crfree(crp);
6193 return (error);
6194 }
6195 }
6196 bp = stp->sd_iocblk;
6197 /*
6198 * Note: it is strictly impossible to get here with sd_iocblk set to
6199 * -1. This is because the initial loop above doesn't allow any new
6200 * ioctls into the fray until all others have passed this point.
6201 */
6202 ASSERT(bp != NULL && bp != (mblk_t *)-1);
6203 TRACE_1(TR_FAC_STREAMS_FR,
6204 TR_STRDOIOCTL_ACK, "strdoioctl got reply: bp %p", bp);
6205 if ((bp->b_datap->db_type == M_IOCACK) ||
6206 (bp->b_datap->db_type == M_IOCNAK)) {
6207 /* for detection of duplicate ioctl replies */
6208 stp->sd_iocblk = (mblk_t *)-1;
6209 stp->sd_flag &= ~waitflags;
6210 cv_broadcast(&stp->sd_iocmonitor);
6211 mutex_exit(&stp->sd_lock);
6212 } else {
6213 /*
6214 * flags not cleared here because we're still doing
6215 * copy in/out for ioctl.
6216 */
6217 stp->sd_iocblk = NULL;
6218 mutex_exit(&stp->sd_lock);
6219 }
6220
6221
6222 /*
6223 * Have received acknowledgment.
6224 */
6225
6226 switch (bp->b_datap->db_type) {
6227 case M_IOCACK:
6228 /*
6229 * Positive ack.
6230 */
6231 iocbp = (struct iocblk *)bp->b_rptr;
6232
6233 /*
6234 * Set error if indicated.
6235 */
6236 if (iocbp->ioc_error) {
6237 error = iocbp->ioc_error;
6238 break;
6239 }
6240
6241 /*
6242 * Set return value.
6243 */
6244 *rvalp = iocbp->ioc_rval;
6245
6246 /*
6247 * Data may have been returned in ACK message (ioc_count > 0).
6248 * If so, copy it out to the user's buffer.
6249 */
6250 if (iocbp->ioc_count && !transparent) {
6251 if (error = getiocd(bp, strioc->ic_dp, copyflag))
6252 break;
6253 }
6254 if (!transparent) {
6255 if (len) /* an M_COPYOUT was used with I_STR */
6256 strioc->ic_len = len;
6257 else
6258 strioc->ic_len = (int)iocbp->ioc_count;
6259 }
6260 break;
6261
6262 case M_IOCNAK:
6263 /*
6264 * Negative ack.
6265 *
6266 * The only thing to do is set error as specified
6267 * in neg ack packet.
6268 */
6269 iocbp = (struct iocblk *)bp->b_rptr;
6270
6271 error = (iocbp->ioc_error ? iocbp->ioc_error : EINVAL);
6272 break;
6273
6274 case M_COPYIN:
6275 /*
6276 * Driver or module has requested user ioctl data.
6277 */
6278 reqp = (struct copyreq *)bp->b_rptr;
6279
6280 /*
6281 * M_COPYIN should *never* have a message attached, though
6282 * it's harmless if it does -- thus, panic on a DEBUG
6283 * kernel and just free it on a non-DEBUG build.
6284 */
6285 ASSERT(bp->b_cont == NULL);
6286 if (bp->b_cont != NULL) {
6287 freemsg(bp->b_cont);
6288 bp->b_cont = NULL;
6289 }
6290
6291 error = putiocd(bp, reqp->cq_addr, flag, crp);
6292 if (error && bp->b_cont) {
6293 freemsg(bp->b_cont);
6294 bp->b_cont = NULL;
6295 }
6296
6297 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
6298 bp->b_datap->db_type = M_IOCDATA;
6299
6300 mblk_setcred(bp, crp, curproc->p_pid);
6301 resp = (struct copyresp *)bp->b_rptr;
6302 resp->cp_rval = (caddr_t)(uintptr_t)error;
6303 resp->cp_flag = (fflags & FMODELS);
6304
6305 stream_willservice(stp);
6306 putnext(stp->sd_wrq, bp);
6307 stream_runservice(stp);
6308
6309 if (error) {
6310 mutex_enter(&stp->sd_lock);
6311 stp->sd_flag &= ~waitflags;
6312 cv_broadcast(&stp->sd_iocmonitor);
6313 mutex_exit(&stp->sd_lock);
6314 crfree(crp);
6315 return (error);
6316 }
6317
6318 goto waitioc;
6319
6320 case M_COPYOUT:
6321 /*
6322 * Driver or module has ioctl data for a user.
6323 */
6324 reqp = (struct copyreq *)bp->b_rptr;
6325 ASSERT(bp->b_cont != NULL);
6326
6327 /*
6328 * Always (transparent or non-transparent )
6329 * use the address specified in the request
6330 */
6331 taddr = reqp->cq_addr;
6332 if (!transparent)
6333 len = (int)reqp->cq_size;
6334
6335 /* copyout data to the provided address */
6336 error = getiocd(bp, taddr, copyflag);
6337
6338 freemsg(bp->b_cont);
6339 bp->b_cont = NULL;
6340
6341 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
6342 bp->b_datap->db_type = M_IOCDATA;
6343
6344 mblk_setcred(bp, crp, curproc->p_pid);
6345 resp = (struct copyresp *)bp->b_rptr;
6346 resp->cp_rval = (caddr_t)(uintptr_t)error;
6347 resp->cp_flag = (fflags & FMODELS);
6348
6349 stream_willservice(stp);
6350 putnext(stp->sd_wrq, bp);
6351 stream_runservice(stp);
6352
6353 if (error) {
6354 mutex_enter(&stp->sd_lock);
6355 stp->sd_flag &= ~waitflags;
6356 cv_broadcast(&stp->sd_iocmonitor);
6357 mutex_exit(&stp->sd_lock);
6358 crfree(crp);
6359 return (error);
6360 }
6361 goto waitioc;
6362
6363 default:
6364 ASSERT(0);
6365 mutex_enter(&stp->sd_lock);
6366 stp->sd_flag &= ~waitflags;
6367 cv_broadcast(&stp->sd_iocmonitor);
6368 mutex_exit(&stp->sd_lock);
6369 break;
6370 }
6371
6372 freemsg(bp);
6373 crfree(crp);
6374 return (error);
6375 }
6376
6377 /*
6378 * Send an M_CMD message downstream and wait for a reply. This is a ptools
6379 * special used to retrieve information from modules/drivers a stream without
6380 * being subjected to flow control or interfering with pending messages on the
6381 * stream (e.g. an ioctl in flight).
6382 */
6383 int
6384 strdocmd(struct stdata *stp, struct strcmd *scp, cred_t *crp)
6385 {
6386 mblk_t *mp;
6387 struct cmdblk *cmdp;
6388 int error = 0;
6389 int errs = STRHUP|STRDERR|STWRERR|STPLEX;
6390 clock_t rval, timeout = STRTIMOUT;
6391
6392 if (scp->sc_len < 0 || scp->sc_len > sizeof (scp->sc_buf) ||
6393 scp->sc_timeout < -1)
6394 return (EINVAL);
6395
6396 if (scp->sc_timeout > 0)
6397 timeout = scp->sc_timeout * MILLISEC;
6398
6399 if ((mp = allocb_cred(sizeof (struct cmdblk), crp,
6400 curproc->p_pid)) == NULL)
6401 return (ENOMEM);
6402
6403 crhold(crp);
6404
6405 cmdp = (struct cmdblk *)mp->b_wptr;
6406 cmdp->cb_cr = crp;
6407 cmdp->cb_cmd = scp->sc_cmd;
6408 cmdp->cb_len = scp->sc_len;
6409 cmdp->cb_error = 0;
6410 mp->b_wptr += sizeof (struct cmdblk);
6411
6412 DB_TYPE(mp) = M_CMD;
6413 DB_CPID(mp) = curproc->p_pid;
6414
6415 /*
6416 * Copy in the payload.
6417 */
6418 if (cmdp->cb_len > 0) {
6419 mp->b_cont = allocb_cred(sizeof (scp->sc_buf), crp,
6420 curproc->p_pid);
6421 if (mp->b_cont == NULL) {
6422 error = ENOMEM;
6423 goto out;
6424 }
6425
6426 /* cb_len comes from sc_len, which has already been checked */
6427 ASSERT(cmdp->cb_len <= sizeof (scp->sc_buf));
6428 (void) bcopy(scp->sc_buf, mp->b_cont->b_wptr, cmdp->cb_len);
6429 mp->b_cont->b_wptr += cmdp->cb_len;
6430 DB_CPID(mp->b_cont) = curproc->p_pid;
6431 }
6432
6433 /*
6434 * Since this mechanism is strictly for ptools, and since only one
6435 * process can be grabbed at a time, we simply fail if there's
6436 * currently an operation pending.
6437 */
6438 mutex_enter(&stp->sd_lock);
6439 if (stp->sd_flag & STRCMDWAIT) {
6440 mutex_exit(&stp->sd_lock);
6441 error = EBUSY;
6442 goto out;
6443 }
6444 stp->sd_flag |= STRCMDWAIT;
6445 ASSERT(stp->sd_cmdblk == NULL);
6446 mutex_exit(&stp->sd_lock);
6447
6448 putnext(stp->sd_wrq, mp);
6449 mp = NULL;
6450
6451 /*
6452 * Timed wait for acknowledgment. If the reply has already arrived,
6453 * don't sleep. If awakened from the sleep, fail only if the reply
6454 * has not arrived by then. Otherwise, process the reply.
6455 */
6456 mutex_enter(&stp->sd_lock);
6457 while (stp->sd_cmdblk == NULL) {
6458 if (stp->sd_flag & errs) {
6459 if ((error = strgeterr(stp, errs, 0)) != 0)
6460 goto waitout;
6461 }
6462
6463 rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, timeout, 0);
6464 if (stp->sd_cmdblk != NULL)
6465 break;
6466
6467 if (rval <= 0) {
6468 error = (rval == 0) ? EINTR : ETIME;
6469 goto waitout;
6470 }
6471 }
6472
6473 /*
6474 * We received a reply.
6475 */
6476 mp = stp->sd_cmdblk;
6477 stp->sd_cmdblk = NULL;
6478 ASSERT(mp != NULL && DB_TYPE(mp) == M_CMD);
6479 ASSERT(stp->sd_flag & STRCMDWAIT);
6480 stp->sd_flag &= ~STRCMDWAIT;
6481 mutex_exit(&stp->sd_lock);
6482
6483 cmdp = (struct cmdblk *)mp->b_rptr;
6484 if ((error = cmdp->cb_error) != 0)
6485 goto out;
6486
6487 /*
6488 * Data may have been returned in the reply (cb_len > 0).
6489 * If so, copy it out to the user's buffer.
6490 */
6491 if (cmdp->cb_len > 0) {
6492 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < cmdp->cb_len) {
6493 error = EPROTO;
6494 goto out;
6495 }
6496
6497 cmdp->cb_len = MIN(cmdp->cb_len, sizeof (scp->sc_buf));
6498 (void) bcopy(mp->b_cont->b_rptr, scp->sc_buf, cmdp->cb_len);
6499 }
6500 scp->sc_len = cmdp->cb_len;
6501 out:
6502 freemsg(mp);
6503 crfree(crp);
6504 return (error);
6505 waitout:
6506 ASSERT(stp->sd_cmdblk == NULL);
6507 stp->sd_flag &= ~STRCMDWAIT;
6508 mutex_exit(&stp->sd_lock);
6509 crfree(crp);
6510 return (error);
6511 }
6512
6513 /*
6514 * For the SunOS keyboard driver.
6515 * Return the next available "ioctl" sequence number.
6516 * Exported, so that streams modules can send "ioctl" messages
6517 * downstream from their open routine.
6518 */
6519 int
6520 getiocseqno(void)
6521 {
6522 int i;
6523
6524 mutex_enter(&strresources);
6525 i = ++ioc_id;
6526 mutex_exit(&strresources);
6527 return (i);
6528 }
6529
6530 /*
6531 * Get the next message from the read queue. If the message is
6532 * priority, STRPRI will have been set by strrput(). This flag
6533 * should be reset only when the entire message at the front of the
6534 * queue as been consumed.
6535 *
6536 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common.
6537 */
6538 int
6539 strgetmsg(
6540 struct vnode *vp,
6541 struct strbuf *mctl,
6542 struct strbuf *mdata,
6543 unsigned char *prip,
6544 int *flagsp,
6545 int fmode,
6546 rval_t *rvp)
6547 {
6548 struct stdata *stp;
6549 mblk_t *bp, *nbp;
6550 mblk_t *savemp = NULL;
6551 mblk_t *savemptail = NULL;
6552 uint_t old_sd_flag;
6553 int flg;
6554 int more = 0;
6555 int error = 0;
6556 char first = 1;
6557 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
6558 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
6559 unsigned char pri = 0;
6560 queue_t *q;
6561 int pr = 0; /* Partial read successful */
6562 struct uio uios;
6563 struct uio *uiop = &uios;
6564 struct iovec iovs;
6565 unsigned char type;
6566
6567 TRACE_1(TR_FAC_STREAMS_FR, TR_STRGETMSG_ENTER,
6568 "strgetmsg:%p", vp);
6569
6570 ASSERT(vp->v_stream);
6571 stp = vp->v_stream;
6572 rvp->r_val1 = 0;
6573
6574 mutex_enter(&stp->sd_lock);
6575
6576 if ((error = i_straccess(stp, JCREAD)) != 0) {
6577 mutex_exit(&stp->sd_lock);
6578 return (error);
6579 }
6580
6581 if (stp->sd_flag & (STRDERR|STPLEX)) {
6582 error = strgeterr(stp, STRDERR|STPLEX, 0);
6583 if (error != 0) {
6584 mutex_exit(&stp->sd_lock);
6585 return (error);
6586 }
6587 }
6588 mutex_exit(&stp->sd_lock);
6589
6590 switch (*flagsp) {
6591 case MSG_HIPRI:
6592 if (*prip != 0)
6593 return (EINVAL);
6594 break;
6595
6596 case MSG_ANY:
6597 case MSG_BAND:
6598 break;
6599
6600 default:
6601 return (EINVAL);
6602 }
6603 /*
6604 * Setup uio and iov for data part
6605 */
6606 iovs.iov_base = mdata->buf;
6607 iovs.iov_len = mdata->maxlen;
6608 uios.uio_iov = &iovs;
6609 uios.uio_iovcnt = 1;
6610 uios.uio_loffset = 0;
6611 uios.uio_segflg = UIO_USERSPACE;
6612 uios.uio_fmode = 0;
6613 uios.uio_extflg = UIO_COPY_CACHED;
6614 uios.uio_resid = mdata->maxlen;
6615 uios.uio_offset = 0;
6616
6617 q = _RD(stp->sd_wrq);
6618 mutex_enter(&stp->sd_lock);
6619 old_sd_flag = stp->sd_flag;
6620 mark = 0;
6621 for (;;) {
6622 int done = 0;
6623 mblk_t *q_first = q->q_first;
6624
6625 /*
6626 * Get the next message of appropriate priority
6627 * from the stream head. If the caller is interested
6628 * in band or hipri messages, then they should already
6629 * be enqueued at the stream head. On the other hand
6630 * if the caller wants normal (band 0) messages, they
6631 * might be deferred in a synchronous stream and they
6632 * will need to be pulled up.
6633 *
6634 * After we have dequeued a message, we might find that
6635 * it was a deferred M_SIG that was enqueued at the
6636 * stream head. It must now be posted as part of the
6637 * read by calling strsignal_nolock().
6638 *
6639 * Also note that strrput does not enqueue an M_PCSIG,
6640 * and there cannot be more than one hipri message,
6641 * so there was no need to have the M_PCSIG case.
6642 *
6643 * At some time it might be nice to try and wrap the
6644 * functionality of kstrgetmsg() and strgetmsg() into
6645 * a common routine so to reduce the amount of replicated
6646 * code (since they are extremely similar).
6647 */
6648 if (!(*flagsp & (MSG_HIPRI|MSG_BAND))) {
6649 /* Asking for normal, band0 data */
6650 bp = strget(stp, q, uiop, first, &error);
6651 ASSERT(MUTEX_HELD(&stp->sd_lock));
6652 if (bp != NULL) {
6653 if (DB_TYPE(bp) == M_SIG) {
6654 strsignal_nolock(stp, *bp->b_rptr,
6655 bp->b_band);
6656 freemsg(bp);
6657 continue;
6658 } else {
6659 break;
6660 }
6661 }
6662 if (error != 0)
6663 goto getmout;
6664
6665 /*
6666 * We can't depend on the value of STRPRI here because
6667 * the stream head may be in transit. Therefore, we
6668 * must look at the type of the first message to
6669 * determine if a high priority messages is waiting
6670 */
6671 } else if ((*flagsp & MSG_HIPRI) && q_first != NULL &&
6672 DB_TYPE(q_first) >= QPCTL &&
6673 (bp = getq_noenab(q, 0)) != NULL) {
6674 /* Asked for HIPRI and got one */
6675 ASSERT(DB_TYPE(bp) >= QPCTL);
6676 break;
6677 } else if ((*flagsp & MSG_BAND) && q_first != NULL &&
6678 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) &&
6679 (bp = getq_noenab(q, 0)) != NULL) {
6680 /*
6681 * Asked for at least band "prip" and got either at
6682 * least that band or a hipri message.
6683 */
6684 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL);
6685 if (DB_TYPE(bp) == M_SIG) {
6686 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
6687 freemsg(bp);
6688 continue;
6689 } else {
6690 break;
6691 }
6692 }
6693
6694 /* No data. Time to sleep? */
6695 qbackenable(q, 0);
6696
6697 /*
6698 * If STRHUP or STREOF, return 0 length control and data.
6699 * If resid is 0, then a read(fd,buf,0) was done. Do not
6700 * sleep to satisfy this request because by default we have
6701 * zero bytes to return.
6702 */
6703 if ((stp->sd_flag & (STRHUP|STREOF)) || (mctl->maxlen == 0 &&
6704 mdata->maxlen == 0)) {
6705 mctl->len = mdata->len = 0;
6706 *flagsp = 0;
6707 mutex_exit(&stp->sd_lock);
6708 return (0);
6709 }
6710 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_WAIT,
6711 "strgetmsg calls strwaitq:%p, %p",
6712 vp, uiop);
6713 if (((error = strwaitq(stp, GETWAIT, (ssize_t)0, fmode, -1,
6714 &done)) != 0) || done) {
6715 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_DONE,
6716 "strgetmsg error or done:%p, %p",
6717 vp, uiop);
6718 mutex_exit(&stp->sd_lock);
6719 return (error);
6720 }
6721 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_AWAKE,
6722 "strgetmsg awakes:%p, %p", vp, uiop);
6723 if ((error = i_straccess(stp, JCREAD)) != 0) {
6724 mutex_exit(&stp->sd_lock);
6725 return (error);
6726 }
6727 first = 0;
6728 }
6729 ASSERT(bp != NULL);
6730 /*
6731 * Extract any mark information. If the message is not completely
6732 * consumed this information will be put in the mblk
6733 * that is putback.
6734 * If MSGMARKNEXT is set and the message is completely consumed
6735 * the STRATMARK flag will be set below. Likewise, if
6736 * MSGNOTMARKNEXT is set and the message is
6737 * completely consumed STRNOTATMARK will be set.
6738 */
6739 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
6740 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
6741 (MSGMARKNEXT|MSGNOTMARKNEXT));
6742 if (mark != 0 && bp == stp->sd_mark) {
6743 mark |= _LASTMARK;
6744 stp->sd_mark = NULL;
6745 }
6746 /*
6747 * keep track of the original message type and priority
6748 */
6749 pri = bp->b_band;
6750 type = bp->b_datap->db_type;
6751 if (type == M_PASSFP) {
6752 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
6753 stp->sd_mark = bp;
6754 bp->b_flag |= mark & ~_LASTMARK;
6755 putback(stp, q, bp, pri);
6756 qbackenable(q, pri);
6757 mutex_exit(&stp->sd_lock);
6758 return (EBADMSG);
6759 }
6760 ASSERT(type != M_SIG);
6761
6762 /*
6763 * Set this flag so strrput will not generate signals. Need to
6764 * make sure this flag is cleared before leaving this routine
6765 * else signals will stop being sent.
6766 */
6767 stp->sd_flag |= STRGETINPROG;
6768 mutex_exit(&stp->sd_lock);
6769
6770 if (STREAM_NEEDSERVICE(stp))
6771 stream_runservice(stp);
6772
6773 /*
6774 * Set HIPRI flag if message is priority.
6775 */
6776 if (type >= QPCTL)
6777 flg = MSG_HIPRI;
6778 else
6779 flg = MSG_BAND;
6780
6781 /*
6782 * First process PROTO or PCPROTO blocks, if any.
6783 */
6784 if (mctl->maxlen >= 0 && type != M_DATA) {
6785 size_t n, bcnt;
6786 char *ubuf;
6787
6788 bcnt = mctl->maxlen;
6789 ubuf = mctl->buf;
6790 while (bp != NULL && bp->b_datap->db_type != M_DATA) {
6791 if ((n = MIN(bcnt, bp->b_wptr - bp->b_rptr)) != 0 &&
6792 copyout(bp->b_rptr, ubuf, n)) {
6793 error = EFAULT;
6794 mutex_enter(&stp->sd_lock);
6795 /*
6796 * clear stream head pri flag based on
6797 * first message type
6798 */
6799 if (type >= QPCTL) {
6800 ASSERT(type == M_PCPROTO);
6801 stp->sd_flag &= ~STRPRI;
6802 }
6803 more = 0;
6804 freemsg(bp);
6805 goto getmout;
6806 }
6807 ubuf += n;
6808 bp->b_rptr += n;
6809 if (bp->b_rptr >= bp->b_wptr) {
6810 nbp = bp;
6811 bp = bp->b_cont;
6812 freeb(nbp);
6813 }
6814 ASSERT(n <= bcnt);
6815 bcnt -= n;
6816 if (bcnt == 0)
6817 break;
6818 }
6819 mctl->len = mctl->maxlen - bcnt;
6820 } else
6821 mctl->len = -1;
6822
6823 if (bp && bp->b_datap->db_type != M_DATA) {
6824 /*
6825 * More PROTO blocks in msg.
6826 */
6827 more |= MORECTL;
6828 savemp = bp;
6829 while (bp && bp->b_datap->db_type != M_DATA) {
6830 savemptail = bp;
6831 bp = bp->b_cont;
6832 }
6833 savemptail->b_cont = NULL;
6834 }
6835
6836 /*
6837 * Now process DATA blocks, if any.
6838 */
6839 if (mdata->maxlen >= 0 && bp) {
6840 /*
6841 * struiocopyout will consume a potential zero-length
6842 * M_DATA even if uio_resid is zero.
6843 */
6844 size_t oldresid = uiop->uio_resid;
6845
6846 bp = struiocopyout(bp, uiop, &error);
6847 if (error != 0) {
6848 mutex_enter(&stp->sd_lock);
6849 /*
6850 * clear stream head hi pri flag based on
6851 * first message
6852 */
6853 if (type >= QPCTL) {
6854 ASSERT(type == M_PCPROTO);
6855 stp->sd_flag &= ~STRPRI;
6856 }
6857 more = 0;
6858 freemsg(savemp);
6859 goto getmout;
6860 }
6861 /*
6862 * (pr == 1) indicates a partial read.
6863 */
6864 if (oldresid > uiop->uio_resid)
6865 pr = 1;
6866 mdata->len = mdata->maxlen - uiop->uio_resid;
6867 } else
6868 mdata->len = -1;
6869
6870 if (bp) { /* more data blocks in msg */
6871 more |= MOREDATA;
6872 if (savemp)
6873 savemptail->b_cont = bp;
6874 else
6875 savemp = bp;
6876 }
6877
6878 mutex_enter(&stp->sd_lock);
6879 if (savemp) {
6880 if (pr && (savemp->b_datap->db_type == M_DATA) &&
6881 msgnodata(savemp)) {
6882 /*
6883 * Avoid queuing a zero-length tail part of
6884 * a message. pr=1 indicates that we read some of
6885 * the message.
6886 */
6887 freemsg(savemp);
6888 more &= ~MOREDATA;
6889 /*
6890 * clear stream head hi pri flag based on
6891 * first message
6892 */
6893 if (type >= QPCTL) {
6894 ASSERT(type == M_PCPROTO);
6895 stp->sd_flag &= ~STRPRI;
6896 }
6897 } else {
6898 savemp->b_band = pri;
6899 /*
6900 * If the first message was HIPRI and the one we're
6901 * putting back isn't, then clear STRPRI, otherwise
6902 * set STRPRI again. Note that we must set STRPRI
6903 * again since the flush logic in strrput_nondata()
6904 * may have cleared it while we had sd_lock dropped.
6905 */
6906 if (type >= QPCTL) {
6907 ASSERT(type == M_PCPROTO);
6908 if (queclass(savemp) < QPCTL)
6909 stp->sd_flag &= ~STRPRI;
6910 else
6911 stp->sd_flag |= STRPRI;
6912 } else if (queclass(savemp) >= QPCTL) {
6913 /*
6914 * The first message was not a HIPRI message,
6915 * but the one we are about to putback is.
6916 * For simplicitly, we do not allow for HIPRI
6917 * messages to be embedded in the message
6918 * body, so just force it to same type as
6919 * first message.
6920 */
6921 ASSERT(type == M_DATA || type == M_PROTO);
6922 ASSERT(savemp->b_datap->db_type == M_PCPROTO);
6923 savemp->b_datap->db_type = type;
6924 }
6925 if (mark != 0) {
6926 savemp->b_flag |= mark & ~_LASTMARK;
6927 if ((mark & _LASTMARK) &&
6928 (stp->sd_mark == NULL)) {
6929 /*
6930 * If another marked message arrived
6931 * while sd_lock was not held sd_mark
6932 * would be non-NULL.
6933 */
6934 stp->sd_mark = savemp;
6935 }
6936 }
6937 putback(stp, q, savemp, pri);
6938 }
6939 } else {
6940 /*
6941 * The complete message was consumed.
6942 *
6943 * If another M_PCPROTO arrived while sd_lock was not held
6944 * it would have been discarded since STRPRI was still set.
6945 *
6946 * Move the MSG*MARKNEXT information
6947 * to the stream head just in case
6948 * the read queue becomes empty.
6949 * clear stream head hi pri flag based on
6950 * first message
6951 *
6952 * If the stream head was at the mark
6953 * (STRATMARK) before we dropped sd_lock above
6954 * and some data was consumed then we have
6955 * moved past the mark thus STRATMARK is
6956 * cleared. However, if a message arrived in
6957 * strrput during the copyout above causing
6958 * STRATMARK to be set we can not clear that
6959 * flag.
6960 */
6961 if (type >= QPCTL) {
6962 ASSERT(type == M_PCPROTO);
6963 stp->sd_flag &= ~STRPRI;
6964 }
6965 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
6966 if (mark & MSGMARKNEXT) {
6967 stp->sd_flag &= ~STRNOTATMARK;
6968 stp->sd_flag |= STRATMARK;
6969 } else if (mark & MSGNOTMARKNEXT) {
6970 stp->sd_flag &= ~STRATMARK;
6971 stp->sd_flag |= STRNOTATMARK;
6972 } else {
6973 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK);
6974 }
6975 } else if (pr && (old_sd_flag & STRATMARK)) {
6976 stp->sd_flag &= ~STRATMARK;
6977 }
6978 }
6979
6980 *flagsp = flg;
6981 *prip = pri;
6982
6983 /*
6984 * Getmsg cleanup processing - if the state of the queue has changed
6985 * some signals may need to be sent and/or poll awakened.
6986 */
6987 getmout:
6988 qbackenable(q, pri);
6989
6990 /*
6991 * We dropped the stream head lock above. Send all M_SIG messages
6992 * before processing stream head for SIGPOLL messages.
6993 */
6994 ASSERT(MUTEX_HELD(&stp->sd_lock));
6995 while ((bp = q->q_first) != NULL &&
6996 (bp->b_datap->db_type == M_SIG)) {
6997 /*
6998 * sd_lock is held so the content of the read queue can not
6999 * change.
7000 */
7001 bp = getq(q);
7002 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG);
7003
7004 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7005 mutex_exit(&stp->sd_lock);
7006 freemsg(bp);
7007 if (STREAM_NEEDSERVICE(stp))
7008 stream_runservice(stp);
7009 mutex_enter(&stp->sd_lock);
7010 }
7011
7012 /*
7013 * stream head cannot change while we make the determination
7014 * whether or not to send a signal. Drop the flag to allow strrput
7015 * to send firstmsgsigs again.
7016 */
7017 stp->sd_flag &= ~STRGETINPROG;
7018
7019 /*
7020 * If the type of message at the front of the queue changed
7021 * due to the receive the appropriate signals and pollwakeup events
7022 * are generated. The type of changes are:
7023 * Processed a hipri message, q_first is not hipri.
7024 * Processed a band X message, and q_first is band Y.
7025 * The generated signals and pollwakeups are identical to what
7026 * strrput() generates should the message that is now on q_first
7027 * arrive to an empty read queue.
7028 *
7029 * Note: only strrput will send a signal for a hipri message.
7030 */
7031 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) {
7032 strsigset_t signals = 0;
7033 strpollset_t pollwakeups = 0;
7034
7035 if (flg & MSG_HIPRI) {
7036 /*
7037 * Removed a hipri message. Regular data at
7038 * the front of the queue.
7039 */
7040 if (bp->b_band == 0) {
7041 signals = S_INPUT | S_RDNORM;
7042 pollwakeups = POLLIN | POLLRDNORM;
7043 } else {
7044 signals = S_INPUT | S_RDBAND;
7045 pollwakeups = POLLIN | POLLRDBAND;
7046 }
7047 } else if (pri != bp->b_band) {
7048 /*
7049 * The band is different for the new q_first.
7050 */
7051 if (bp->b_band == 0) {
7052 signals = S_RDNORM;
7053 pollwakeups = POLLIN | POLLRDNORM;
7054 } else {
7055 signals = S_RDBAND;
7056 pollwakeups = POLLIN | POLLRDBAND;
7057 }
7058 }
7059
7060 if (pollwakeups != 0) {
7061 if (pollwakeups == (POLLIN | POLLRDNORM)) {
7062 if (!(stp->sd_rput_opt & SR_POLLIN))
7063 goto no_pollwake;
7064 stp->sd_rput_opt &= ~SR_POLLIN;
7065 }
7066 mutex_exit(&stp->sd_lock);
7067 pollwakeup(&stp->sd_pollist, pollwakeups);
7068 mutex_enter(&stp->sd_lock);
7069 }
7070 no_pollwake:
7071
7072 if (stp->sd_sigflags & signals)
7073 strsendsig(stp->sd_siglist, signals, bp->b_band, 0);
7074 }
7075 mutex_exit(&stp->sd_lock);
7076
7077 rvp->r_val1 = more;
7078 return (error);
7079 #undef _LASTMARK
7080 }
7081
7082 /*
7083 * Get the next message from the read queue. If the message is
7084 * priority, STRPRI will have been set by strrput(). This flag
7085 * should be reset only when the entire message at the front of the
7086 * queue as been consumed.
7087 *
7088 * If uiop is NULL all data is returned in mctlp.
7089 * Note that a NULL uiop implies that FNDELAY and FNONBLOCK are assumed
7090 * not enabled.
7091 * The timeout parameter is in milliseconds; -1 for infinity.
7092 * This routine handles the consolidation private flags:
7093 * MSG_IGNERROR Ignore any stream head error except STPLEX.
7094 * MSG_DELAYERROR Defer the error check until the queue is empty.
7095 * MSG_HOLDSIG Hold signals while waiting for data.
7096 * MSG_IPEEK Only peek at messages.
7097 * MSG_DISCARDTAIL Discard the tail M_DATA part of the message
7098 * that doesn't fit.
7099 * MSG_NOMARK If the message is marked leave it on the queue.
7100 *
7101 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common.
7102 */
7103 int
7104 kstrgetmsg(
7105 struct vnode *vp,
7106 mblk_t **mctlp,
7107 struct uio *uiop,
7108 unsigned char *prip,
7109 int *flagsp,
7110 clock_t timout,
7111 rval_t *rvp)
7112 {
7113 struct stdata *stp;
7114 mblk_t *bp, *nbp;
7115 mblk_t *savemp = NULL;
7116 mblk_t *savemptail = NULL;
7117 int flags;
7118 uint_t old_sd_flag;
7119 int flg;
7120 int more = 0;
7121 int error = 0;
7122 char first = 1;
7123 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
7124 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
7125 unsigned char pri = 0;
7126 queue_t *q;
7127 int pr = 0; /* Partial read successful */
7128 unsigned char type;
7129
7130 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_ENTER,
7131 "kstrgetmsg:%p", vp);
7132
7133 ASSERT(vp->v_stream);
7134 stp = vp->v_stream;
7135 rvp->r_val1 = 0;
7136
7137 mutex_enter(&stp->sd_lock);
7138
7139 if ((error = i_straccess(stp, JCREAD)) != 0) {
7140 mutex_exit(&stp->sd_lock);
7141 return (error);
7142 }
7143
7144 flags = *flagsp;
7145 if (stp->sd_flag & (STRDERR|STPLEX)) {
7146 if ((stp->sd_flag & STPLEX) ||
7147 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == 0) {
7148 error = strgeterr(stp, STRDERR|STPLEX,
7149 (flags & MSG_IPEEK));
7150 if (error != 0) {
7151 mutex_exit(&stp->sd_lock);
7152 return (error);
7153 }
7154 }
7155 }
7156 mutex_exit(&stp->sd_lock);
7157
7158 switch (flags & (MSG_HIPRI|MSG_ANY|MSG_BAND)) {
7159 case MSG_HIPRI:
7160 if (*prip != 0)
7161 return (EINVAL);
7162 break;
7163
7164 case MSG_ANY:
7165 case MSG_BAND:
7166 break;
7167
7168 default:
7169 return (EINVAL);
7170 }
7171
7172 retry:
7173 q = _RD(stp->sd_wrq);
7174 mutex_enter(&stp->sd_lock);
7175 old_sd_flag = stp->sd_flag;
7176 mark = 0;
7177 for (;;) {
7178 int done = 0;
7179 int waitflag;
7180 int fmode;
7181 mblk_t *q_first = q->q_first;
7182
7183 /*
7184 * This section of the code operates just like the code
7185 * in strgetmsg(). There is a comment there about what
7186 * is going on here.
7187 */
7188 if (!(flags & (MSG_HIPRI|MSG_BAND))) {
7189 /* Asking for normal, band0 data */
7190 bp = strget(stp, q, uiop, first, &error);
7191 ASSERT(MUTEX_HELD(&stp->sd_lock));
7192 if (bp != NULL) {
7193 if (DB_TYPE(bp) == M_SIG) {
7194 strsignal_nolock(stp, *bp->b_rptr,
7195 bp->b_band);
7196 freemsg(bp);
7197 continue;
7198 } else {
7199 break;
7200 }
7201 }
7202 if (error != 0) {
7203 goto getmout;
7204 }
7205 /*
7206 * We can't depend on the value of STRPRI here because
7207 * the stream head may be in transit. Therefore, we
7208 * must look at the type of the first message to
7209 * determine if a high priority messages is waiting
7210 */
7211 } else if ((flags & MSG_HIPRI) && q_first != NULL &&
7212 DB_TYPE(q_first) >= QPCTL &&
7213 (bp = getq_noenab(q, 0)) != NULL) {
7214 ASSERT(DB_TYPE(bp) >= QPCTL);
7215 break;
7216 } else if ((flags & MSG_BAND) && q_first != NULL &&
7217 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) &&
7218 (bp = getq_noenab(q, 0)) != NULL) {
7219 /*
7220 * Asked for at least band "prip" and got either at
7221 * least that band or a hipri message.
7222 */
7223 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL);
7224 if (DB_TYPE(bp) == M_SIG) {
7225 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7226 freemsg(bp);
7227 continue;
7228 } else {
7229 break;
7230 }
7231 }
7232
7233 /* No data. Time to sleep? */
7234 qbackenable(q, 0);
7235
7236 /*
7237 * Delayed error notification?
7238 */
7239 if ((stp->sd_flag & (STRDERR|STPLEX)) &&
7240 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == MSG_DELAYERROR) {
7241 error = strgeterr(stp, STRDERR|STPLEX,
7242 (flags & MSG_IPEEK));
7243 if (error != 0) {
7244 mutex_exit(&stp->sd_lock);
7245 return (error);
7246 }
7247 }
7248
7249 /*
7250 * If STRHUP or STREOF, return 0 length control and data.
7251 * If a read(fd,buf,0) has been done, do not sleep, just
7252 * return.
7253 *
7254 * If mctlp == NULL and uiop == NULL, then the code will
7255 * do the strwaitq. This is an understood way of saying
7256 * sleep "polling" until a message is received.
7257 */
7258 if ((stp->sd_flag & (STRHUP|STREOF)) ||
7259 (uiop != NULL && uiop->uio_resid == 0)) {
7260 if (mctlp != NULL)
7261 *mctlp = NULL;
7262 *flagsp = 0;
7263 mutex_exit(&stp->sd_lock);
7264 return (0);
7265 }
7266
7267 waitflag = GETWAIT;
7268 if (flags &
7269 (MSG_HOLDSIG|MSG_IGNERROR|MSG_IPEEK|MSG_DELAYERROR)) {
7270 if (flags & MSG_HOLDSIG)
7271 waitflag |= STR_NOSIG;
7272 if (flags & MSG_IGNERROR)
7273 waitflag |= STR_NOERROR;
7274 if (flags & MSG_IPEEK)
7275 waitflag |= STR_PEEK;
7276 if (flags & MSG_DELAYERROR)
7277 waitflag |= STR_DELAYERR;
7278 }
7279 if (uiop != NULL)
7280 fmode = uiop->uio_fmode;
7281 else
7282 fmode = 0;
7283
7284 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_WAIT,
7285 "kstrgetmsg calls strwaitq:%p, %p",
7286 vp, uiop);
7287 if (((error = strwaitq(stp, waitflag, (ssize_t)0,
7288 fmode, timout, &done))) != 0 || done) {
7289 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_DONE,
7290 "kstrgetmsg error or done:%p, %p",
7291 vp, uiop);
7292 mutex_exit(&stp->sd_lock);
7293 return (error);
7294 }
7295 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_AWAKE,
7296 "kstrgetmsg awakes:%p, %p", vp, uiop);
7297 if ((error = i_straccess(stp, JCREAD)) != 0) {
7298 mutex_exit(&stp->sd_lock);
7299 return (error);
7300 }
7301 first = 0;
7302 }
7303 ASSERT(bp != NULL);
7304 /*
7305 * Extract any mark information. If the message is not completely
7306 * consumed this information will be put in the mblk
7307 * that is putback.
7308 * If MSGMARKNEXT is set and the message is completely consumed
7309 * the STRATMARK flag will be set below. Likewise, if
7310 * MSGNOTMARKNEXT is set and the message is
7311 * completely consumed STRNOTATMARK will be set.
7312 */
7313 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
7314 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
7315 (MSGMARKNEXT|MSGNOTMARKNEXT));
7316 pri = bp->b_band;
7317 if (mark != 0) {
7318 /*
7319 * If the caller doesn't want the mark return.
7320 * Used to implement MSG_WAITALL in sockets.
7321 */
7322 if (flags & MSG_NOMARK) {
7323 putback(stp, q, bp, pri);
7324 qbackenable(q, pri);
7325 mutex_exit(&stp->sd_lock);
7326 return (EWOULDBLOCK);
7327 }
7328 if (bp == stp->sd_mark) {
7329 mark |= _LASTMARK;
7330 stp->sd_mark = NULL;
7331 }
7332 }
7333
7334 /*
7335 * keep track of the first message type
7336 */
7337 type = bp->b_datap->db_type;
7338
7339 if (bp->b_datap->db_type == M_PASSFP) {
7340 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7341 stp->sd_mark = bp;
7342 bp->b_flag |= mark & ~_LASTMARK;
7343 putback(stp, q, bp, pri);
7344 qbackenable(q, pri);
7345 mutex_exit(&stp->sd_lock);
7346 return (EBADMSG);
7347 }
7348 ASSERT(type != M_SIG);
7349
7350 if (flags & MSG_IPEEK) {
7351 /*
7352 * Clear any struioflag - we do the uiomove over again
7353 * when peeking since it simplifies the code.
7354 *
7355 * Dup the message and put the original back on the queue.
7356 * If dupmsg() fails, try again with copymsg() to see if
7357 * there is indeed a shortage of memory. dupmsg() may fail
7358 * if db_ref in any of the messages reaches its limit.
7359 */
7360
7361 if ((nbp = dupmsg(bp)) == NULL && (nbp = copymsg(bp)) == NULL) {
7362 /*
7363 * Restore the state of the stream head since we
7364 * need to drop sd_lock (strwaitbuf is sleeping).
7365 */
7366 size_t size = msgdsize(bp);
7367
7368 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7369 stp->sd_mark = bp;
7370 bp->b_flag |= mark & ~_LASTMARK;
7371 putback(stp, q, bp, pri);
7372 mutex_exit(&stp->sd_lock);
7373 error = strwaitbuf(size, BPRI_HI);
7374 if (error) {
7375 /*
7376 * There is no net change to the queue thus
7377 * no need to qbackenable.
7378 */
7379 return (error);
7380 }
7381 goto retry;
7382 }
7383
7384 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7385 stp->sd_mark = bp;
7386 bp->b_flag |= mark & ~_LASTMARK;
7387 putback(stp, q, bp, pri);
7388 bp = nbp;
7389 }
7390
7391 /*
7392 * Set this flag so strrput will not generate signals. Need to
7393 * make sure this flag is cleared before leaving this routine
7394 * else signals will stop being sent.
7395 */
7396 stp->sd_flag |= STRGETINPROG;
7397 mutex_exit(&stp->sd_lock);
7398
7399 if ((stp->sd_rputdatafunc != NULL) && (DB_TYPE(bp) == M_DATA)) {
7400 mblk_t *tmp, *prevmp;
7401
7402 /*
7403 * Put first non-data mblk back to stream head and
7404 * cut the mblk chain so sd_rputdatafunc only sees
7405 * M_DATA mblks. We can skip the first mblk since it
7406 * is M_DATA according to the condition above.
7407 */
7408 for (prevmp = bp, tmp = bp->b_cont; tmp != NULL;
7409 prevmp = tmp, tmp = tmp->b_cont) {
7410 if (DB_TYPE(tmp) != M_DATA) {
7411 prevmp->b_cont = NULL;
7412 mutex_enter(&stp->sd_lock);
7413 putback(stp, q, tmp, tmp->b_band);
7414 mutex_exit(&stp->sd_lock);
7415 break;
7416 }
7417 }
7418
7419 bp = (stp->sd_rputdatafunc)(stp->sd_vnode, bp,
7420 NULL, NULL, NULL, NULL);
7421
7422 if (bp == NULL)
7423 goto retry;
7424 }
7425
7426 if (STREAM_NEEDSERVICE(stp))
7427 stream_runservice(stp);
7428
7429 /*
7430 * Set HIPRI flag if message is priority.
7431 */
7432 if (type >= QPCTL)
7433 flg = MSG_HIPRI;
7434 else
7435 flg = MSG_BAND;
7436
7437 /*
7438 * First process PROTO or PCPROTO blocks, if any.
7439 */
7440 if (mctlp != NULL && type != M_DATA) {
7441 mblk_t *nbp;
7442
7443 *mctlp = bp;
7444 while (bp->b_cont && bp->b_cont->b_datap->db_type != M_DATA)
7445 bp = bp->b_cont;
7446 nbp = bp->b_cont;
7447 bp->b_cont = NULL;
7448 bp = nbp;
7449 }
7450
7451 if (bp && bp->b_datap->db_type != M_DATA) {
7452 /*
7453 * More PROTO blocks in msg. Will only happen if mctlp is NULL.
7454 */
7455 more |= MORECTL;
7456 savemp = bp;
7457 while (bp && bp->b_datap->db_type != M_DATA) {
7458 savemptail = bp;
7459 bp = bp->b_cont;
7460 }
7461 savemptail->b_cont = NULL;
7462 }
7463
7464 /*
7465 * Now process DATA blocks, if any.
7466 */
7467 if (uiop == NULL) {
7468 /* Append data to tail of mctlp */
7469
7470 if (mctlp != NULL) {
7471 mblk_t **mpp = mctlp;
7472
7473 while (*mpp != NULL)
7474 mpp = &((*mpp)->b_cont);
7475 *mpp = bp;
7476 bp = NULL;
7477 }
7478 } else if (uiop->uio_resid >= 0 && bp) {
7479 size_t oldresid = uiop->uio_resid;
7480
7481 /*
7482 * If a streams message is likely to consist
7483 * of many small mblks, it is pulled up into
7484 * one continuous chunk of memory.
7485 * The size of the first mblk may be bogus because
7486 * successive read() calls on the socket reduce
7487 * the size of this mblk until it is exhausted
7488 * and then the code walks on to the next. Thus
7489 * the size of the mblk may not be the original size
7490 * that was passed up, it's simply a remainder
7491 * and hence can be very small without any
7492 * implication that the packet is badly fragmented.
7493 * So the size of the possible second mblk is
7494 * used to spot a badly fragmented packet.
7495 * see longer comment at top of page
7496 * by mblk_pull_len declaration.
7497 */
7498
7499 if (bp->b_cont != NULL && MBLKL(bp->b_cont) < mblk_pull_len) {
7500 (void) pullupmsg(bp, -1);
7501 }
7502
7503 bp = struiocopyout(bp, uiop, &error);
7504 if (error != 0) {
7505 if (mctlp != NULL) {
7506 freemsg(*mctlp);
7507 *mctlp = NULL;
7508 } else
7509 freemsg(savemp);
7510 mutex_enter(&stp->sd_lock);
7511 /*
7512 * clear stream head hi pri flag based on
7513 * first message
7514 */
7515 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) {
7516 ASSERT(type == M_PCPROTO);
7517 stp->sd_flag &= ~STRPRI;
7518 }
7519 more = 0;
7520 goto getmout;
7521 }
7522 /*
7523 * (pr == 1) indicates a partial read.
7524 */
7525 if (oldresid > uiop->uio_resid)
7526 pr = 1;
7527 }
7528
7529 if (bp) { /* more data blocks in msg */
7530 more |= MOREDATA;
7531 if (savemp)
7532 savemptail->b_cont = bp;
7533 else
7534 savemp = bp;
7535 }
7536
7537 mutex_enter(&stp->sd_lock);
7538 if (savemp) {
7539 if (flags & (MSG_IPEEK|MSG_DISCARDTAIL)) {
7540 /*
7541 * When MSG_DISCARDTAIL is set or
7542 * when peeking discard any tail. When peeking this
7543 * is the tail of the dup that was copied out - the
7544 * message has already been putback on the queue.
7545 * Return MOREDATA to the caller even though the data
7546 * is discarded. This is used by sockets (to
7547 * set MSG_TRUNC).
7548 */
7549 freemsg(savemp);
7550 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) {
7551 ASSERT(type == M_PCPROTO);
7552 stp->sd_flag &= ~STRPRI;
7553 }
7554 } else if (pr && (savemp->b_datap->db_type == M_DATA) &&
7555 msgnodata(savemp)) {
7556 /*
7557 * Avoid queuing a zero-length tail part of
7558 * a message. pr=1 indicates that we read some of
7559 * the message.
7560 */
7561 freemsg(savemp);
7562 more &= ~MOREDATA;
7563 if (type >= QPCTL) {
7564 ASSERT(type == M_PCPROTO);
7565 stp->sd_flag &= ~STRPRI;
7566 }
7567 } else {
7568 savemp->b_band = pri;
7569 /*
7570 * If the first message was HIPRI and the one we're
7571 * putting back isn't, then clear STRPRI, otherwise
7572 * set STRPRI again. Note that we must set STRPRI
7573 * again since the flush logic in strrput_nondata()
7574 * may have cleared it while we had sd_lock dropped.
7575 */
7576
7577 if (type >= QPCTL) {
7578 ASSERT(type == M_PCPROTO);
7579 if (queclass(savemp) < QPCTL)
7580 stp->sd_flag &= ~STRPRI;
7581 else
7582 stp->sd_flag |= STRPRI;
7583 } else if (queclass(savemp) >= QPCTL) {
7584 /*
7585 * The first message was not a HIPRI message,
7586 * but the one we are about to putback is.
7587 * For simplicitly, we do not allow for HIPRI
7588 * messages to be embedded in the message
7589 * body, so just force it to same type as
7590 * first message.
7591 */
7592 ASSERT(type == M_DATA || type == M_PROTO);
7593 ASSERT(savemp->b_datap->db_type == M_PCPROTO);
7594 savemp->b_datap->db_type = type;
7595 }
7596 if (mark != 0) {
7597 if ((mark & _LASTMARK) &&
7598 (stp->sd_mark == NULL)) {
7599 /*
7600 * If another marked message arrived
7601 * while sd_lock was not held sd_mark
7602 * would be non-NULL.
7603 */
7604 stp->sd_mark = savemp;
7605 }
7606 savemp->b_flag |= mark & ~_LASTMARK;
7607 }
7608 putback(stp, q, savemp, pri);
7609 }
7610 } else if (!(flags & MSG_IPEEK)) {
7611 /*
7612 * The complete message was consumed.
7613 *
7614 * If another M_PCPROTO arrived while sd_lock was not held
7615 * it would have been discarded since STRPRI was still set.
7616 *
7617 * Move the MSG*MARKNEXT information
7618 * to the stream head just in case
7619 * the read queue becomes empty.
7620 * clear stream head hi pri flag based on
7621 * first message
7622 *
7623 * If the stream head was at the mark
7624 * (STRATMARK) before we dropped sd_lock above
7625 * and some data was consumed then we have
7626 * moved past the mark thus STRATMARK is
7627 * cleared. However, if a message arrived in
7628 * strrput during the copyout above causing
7629 * STRATMARK to be set we can not clear that
7630 * flag.
7631 * XXX A "perimeter" would help by single-threading strrput,
7632 * strread, strgetmsg and kstrgetmsg.
7633 */
7634 if (type >= QPCTL) {
7635 ASSERT(type == M_PCPROTO);
7636 stp->sd_flag &= ~STRPRI;
7637 }
7638 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
7639 if (mark & MSGMARKNEXT) {
7640 stp->sd_flag &= ~STRNOTATMARK;
7641 stp->sd_flag |= STRATMARK;
7642 } else if (mark & MSGNOTMARKNEXT) {
7643 stp->sd_flag &= ~STRATMARK;
7644 stp->sd_flag |= STRNOTATMARK;
7645 } else {
7646 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK);
7647 }
7648 } else if (pr && (old_sd_flag & STRATMARK)) {
7649 stp->sd_flag &= ~STRATMARK;
7650 }
7651 }
7652
7653 *flagsp = flg;
7654 *prip = pri;
7655
7656 /*
7657 * Getmsg cleanup processing - if the state of the queue has changed
7658 * some signals may need to be sent and/or poll awakened.
7659 */
7660 getmout:
7661 qbackenable(q, pri);
7662
7663 /*
7664 * We dropped the stream head lock above. Send all M_SIG messages
7665 * before processing stream head for SIGPOLL messages.
7666 */
7667 ASSERT(MUTEX_HELD(&stp->sd_lock));
7668 while ((bp = q->q_first) != NULL &&
7669 (bp->b_datap->db_type == M_SIG)) {
7670 /*
7671 * sd_lock is held so the content of the read queue can not
7672 * change.
7673 */
7674 bp = getq(q);
7675 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG);
7676
7677 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7678 mutex_exit(&stp->sd_lock);
7679 freemsg(bp);
7680 if (STREAM_NEEDSERVICE(stp))
7681 stream_runservice(stp);
7682 mutex_enter(&stp->sd_lock);
7683 }
7684
7685 /*
7686 * stream head cannot change while we make the determination
7687 * whether or not to send a signal. Drop the flag to allow strrput
7688 * to send firstmsgsigs again.
7689 */
7690 stp->sd_flag &= ~STRGETINPROG;
7691
7692 /*
7693 * If the type of message at the front of the queue changed
7694 * due to the receive the appropriate signals and pollwakeup events
7695 * are generated. The type of changes are:
7696 * Processed a hipri message, q_first is not hipri.
7697 * Processed a band X message, and q_first is band Y.
7698 * The generated signals and pollwakeups are identical to what
7699 * strrput() generates should the message that is now on q_first
7700 * arrive to an empty read queue.
7701 *
7702 * Note: only strrput will send a signal for a hipri message.
7703 */
7704 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) {
7705 strsigset_t signals = 0;
7706 strpollset_t pollwakeups = 0;
7707
7708 if (flg & MSG_HIPRI) {
7709 /*
7710 * Removed a hipri message. Regular data at
7711 * the front of the queue.
7712 */
7713 if (bp->b_band == 0) {
7714 signals = S_INPUT | S_RDNORM;
7715 pollwakeups = POLLIN | POLLRDNORM;
7716 } else {
7717 signals = S_INPUT | S_RDBAND;
7718 pollwakeups = POLLIN | POLLRDBAND;
7719 }
7720 } else if (pri != bp->b_band) {
7721 /*
7722 * The band is different for the new q_first.
7723 */
7724 if (bp->b_band == 0) {
7725 signals = S_RDNORM;
7726 pollwakeups = POLLIN | POLLRDNORM;
7727 } else {
7728 signals = S_RDBAND;
7729 pollwakeups = POLLIN | POLLRDBAND;
7730 }
7731 }
7732
7733 if (pollwakeups != 0) {
7734 if (pollwakeups == (POLLIN | POLLRDNORM)) {
7735 if (!(stp->sd_rput_opt & SR_POLLIN))
7736 goto no_pollwake;
7737 stp->sd_rput_opt &= ~SR_POLLIN;
7738 }
7739 mutex_exit(&stp->sd_lock);
7740 pollwakeup(&stp->sd_pollist, pollwakeups);
7741 mutex_enter(&stp->sd_lock);
7742 }
7743 no_pollwake:
7744
7745 if (stp->sd_sigflags & signals)
7746 strsendsig(stp->sd_siglist, signals, bp->b_band, 0);
7747 }
7748 mutex_exit(&stp->sd_lock);
7749
7750 rvp->r_val1 = more;
7751 return (error);
7752 #undef _LASTMARK
7753 }
7754
7755 /*
7756 * Put a message downstream.
7757 *
7758 * NOTE: strputmsg and kstrputmsg have much of the logic in common.
7759 */
7760 int
7761 strputmsg(
7762 struct vnode *vp,
7763 struct strbuf *mctl,
7764 struct strbuf *mdata,
7765 unsigned char pri,
7766 int flag,
7767 int fmode)
7768 {
7769 struct stdata *stp;
7770 queue_t *wqp;
7771 mblk_t *mp;
7772 ssize_t msgsize;
7773 ssize_t rmin, rmax;
7774 int error;
7775 struct uio uios;
7776 struct uio *uiop = &uios;
7777 struct iovec iovs;
7778 int xpg4 = 0;
7779
7780 ASSERT(vp->v_stream);
7781 stp = vp->v_stream;
7782 wqp = stp->sd_wrq;
7783
7784 /*
7785 * If it is an XPG4 application, we need to send
7786 * SIGPIPE below
7787 */
7788
7789 xpg4 = (flag & MSG_XPG4) ? 1 : 0;
7790 flag &= ~MSG_XPG4;
7791
7792 if (AU_AUDITING())
7793 audit_strputmsg(vp, mctl, mdata, pri, flag, fmode);
7794
7795 mutex_enter(&stp->sd_lock);
7796
7797 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7798 mutex_exit(&stp->sd_lock);
7799 return (error);
7800 }
7801
7802 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
7803 error = strwriteable(stp, B_FALSE, xpg4);
7804 if (error != 0) {
7805 mutex_exit(&stp->sd_lock);
7806 return (error);
7807 }
7808 }
7809
7810 mutex_exit(&stp->sd_lock);
7811
7812 /*
7813 * Check for legal flag value.
7814 */
7815 switch (flag) {
7816 case MSG_HIPRI:
7817 if ((mctl->len < 0) || (pri != 0))
7818 return (EINVAL);
7819 break;
7820 case MSG_BAND:
7821 break;
7822
7823 default:
7824 return (EINVAL);
7825 }
7826
7827 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_IN,
7828 "strputmsg in:stp %p", stp);
7829
7830 /* get these values from those cached in the stream head */
7831 rmin = stp->sd_qn_minpsz;
7832 rmax = stp->sd_qn_maxpsz;
7833
7834 /*
7835 * Make sure ctl and data sizes together fall within the
7836 * limits of the max and min receive packet sizes and do
7837 * not exceed system limit.
7838 */
7839 ASSERT((rmax >= 0) || (rmax == INFPSZ));
7840 if (rmax == 0) {
7841 return (ERANGE);
7842 }
7843 /*
7844 * Use the MAXIMUM of sd_maxblk and q_maxpsz.
7845 * Needed to prevent partial failures in the strmakedata loop.
7846 */
7847 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk)
7848 rmax = stp->sd_maxblk;
7849
7850 if ((msgsize = mdata->len) < 0) {
7851 msgsize = 0;
7852 rmin = 0; /* no range check for NULL data part */
7853 }
7854 if ((msgsize < rmin) ||
7855 ((msgsize > rmax) && (rmax != INFPSZ)) ||
7856 (mctl->len > strctlsz)) {
7857 return (ERANGE);
7858 }
7859
7860 /*
7861 * Setup uio and iov for data part
7862 */
7863 iovs.iov_base = mdata->buf;
7864 iovs.iov_len = msgsize;
7865 uios.uio_iov = &iovs;
7866 uios.uio_iovcnt = 1;
7867 uios.uio_loffset = 0;
7868 uios.uio_segflg = UIO_USERSPACE;
7869 uios.uio_fmode = fmode;
7870 uios.uio_extflg = UIO_COPY_DEFAULT;
7871 uios.uio_resid = msgsize;
7872 uios.uio_offset = 0;
7873
7874 /* Ignore flow control in strput for HIPRI */
7875 if (flag & MSG_HIPRI)
7876 flag |= MSG_IGNFLOW;
7877
7878 for (;;) {
7879 int done = 0;
7880
7881 /*
7882 * strput will always free the ctl mblk - even when strput
7883 * fails.
7884 */
7885 if ((error = strmakectl(mctl, flag, fmode, &mp)) != 0) {
7886 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7887 "strputmsg out:stp %p out %d error %d",
7888 stp, 1, error);
7889 return (error);
7890 }
7891 /*
7892 * Verify that the whole message can be transferred by
7893 * strput.
7894 */
7895 ASSERT(stp->sd_maxblk == INFPSZ ||
7896 stp->sd_maxblk >= mdata->len);
7897
7898 msgsize = mdata->len;
7899 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag);
7900 mdata->len = msgsize;
7901
7902 if (error == 0)
7903 break;
7904
7905 if (error != EWOULDBLOCK)
7906 goto out;
7907
7908 mutex_enter(&stp->sd_lock);
7909 /*
7910 * Check for a missed wakeup.
7911 * Needed since strput did not hold sd_lock across
7912 * the canputnext.
7913 */
7914 if (bcanputnext(wqp, pri)) {
7915 /* Try again */
7916 mutex_exit(&stp->sd_lock);
7917 continue;
7918 }
7919 TRACE_2(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAIT,
7920 "strputmsg wait:stp %p waits pri %d", stp, pri);
7921 if (((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, fmode, -1,
7922 &done)) != 0) || done) {
7923 mutex_exit(&stp->sd_lock);
7924 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7925 "strputmsg out:q %p out %d error %d",
7926 stp, 0, error);
7927 return (error);
7928 }
7929 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAKE,
7930 "strputmsg wake:stp %p wakes", stp);
7931 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7932 mutex_exit(&stp->sd_lock);
7933 return (error);
7934 }
7935 mutex_exit(&stp->sd_lock);
7936 }
7937 out:
7938 /*
7939 * For historic reasons, applications expect EAGAIN
7940 * when data mblk could not be allocated. so change
7941 * ENOMEM back to EAGAIN
7942 */
7943 if (error == ENOMEM)
7944 error = EAGAIN;
7945 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7946 "strputmsg out:stp %p out %d error %d", stp, 2, error);
7947 return (error);
7948 }
7949
7950 /*
7951 * Put a message downstream.
7952 * Can send only an M_PROTO/M_PCPROTO by passing in a NULL uiop.
7953 * The fmode flag (NDELAY, NONBLOCK) is the or of the flags in the uio
7954 * and the fmode parameter.
7955 *
7956 * This routine handles the consolidation private flags:
7957 * MSG_IGNERROR Ignore any stream head error except STPLEX.
7958 * MSG_HOLDSIG Hold signals while waiting for data.
7959 * MSG_IGNFLOW Don't check streams flow control.
7960 *
7961 * NOTE: strputmsg and kstrputmsg have much of the logic in common.
7962 */
7963 int
7964 kstrputmsg(
7965 struct vnode *vp,
7966 mblk_t *mctl,
7967 struct uio *uiop,
7968 ssize_t msgsize,
7969 unsigned char pri,
7970 int flag,
7971 int fmode)
7972 {
7973 struct stdata *stp;
7974 queue_t *wqp;
7975 ssize_t rmin, rmax;
7976 int error;
7977
7978 ASSERT(vp->v_stream);
7979 stp = vp->v_stream;
7980 wqp = stp->sd_wrq;
7981 if (AU_AUDITING())
7982 audit_strputmsg(vp, NULL, NULL, pri, flag, fmode);
7983 if (mctl == NULL)
7984 return (EINVAL);
7985
7986 mutex_enter(&stp->sd_lock);
7987
7988 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7989 mutex_exit(&stp->sd_lock);
7990 freemsg(mctl);
7991 return (error);
7992 }
7993
7994 if ((stp->sd_flag & STPLEX) || !(flag & MSG_IGNERROR)) {
7995 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
7996 error = strwriteable(stp, B_FALSE, B_TRUE);
7997 if (error != 0) {
7998 mutex_exit(&stp->sd_lock);
7999 freemsg(mctl);
8000 return (error);
8001 }
8002 }
8003 }
8004
8005 mutex_exit(&stp->sd_lock);
8006
8007 /*
8008 * Check for legal flag value.
8009 */
8010 switch (flag & (MSG_HIPRI|MSG_BAND|MSG_ANY)) {
8011 case MSG_HIPRI:
8012 if (pri != 0) {
8013 freemsg(mctl);
8014 return (EINVAL);
8015 }
8016 break;
8017 case MSG_BAND:
8018 break;
8019 default:
8020 freemsg(mctl);
8021 return (EINVAL);
8022 }
8023
8024 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_IN,
8025 "kstrputmsg in:stp %p", stp);
8026
8027 /* get these values from those cached in the stream head */
8028 rmin = stp->sd_qn_minpsz;
8029 rmax = stp->sd_qn_maxpsz;
8030
8031 /*
8032 * Make sure ctl and data sizes together fall within the
8033 * limits of the max and min receive packet sizes and do
8034 * not exceed system limit.
8035 */
8036 ASSERT((rmax >= 0) || (rmax == INFPSZ));
8037 if (rmax == 0) {
8038 freemsg(mctl);
8039 return (ERANGE);
8040 }
8041 /*
8042 * Use the MAXIMUM of sd_maxblk and q_maxpsz.
8043 * Needed to prevent partial failures in the strmakedata loop.
8044 */
8045 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk)
8046 rmax = stp->sd_maxblk;
8047
8048 if (uiop == NULL) {
8049 msgsize = -1;
8050 rmin = -1; /* no range check for NULL data part */
8051 } else {
8052 /* Use uio flags as well as the fmode parameter flags */
8053 fmode |= uiop->uio_fmode;
8054
8055 if ((msgsize < rmin) ||
8056 ((msgsize > rmax) && (rmax != INFPSZ))) {
8057 freemsg(mctl);
8058 return (ERANGE);
8059 }
8060 }
8061
8062 /* Ignore flow control in strput for HIPRI */
8063 if (flag & MSG_HIPRI)
8064 flag |= MSG_IGNFLOW;
8065
8066 for (;;) {
8067 int done = 0;
8068 int waitflag;
8069 mblk_t *mp;
8070
8071 /*
8072 * strput will always free the ctl mblk - even when strput
8073 * fails. If MSG_IGNFLOW is set then any error returned
8074 * will cause us to break the loop, so we don't need a copy
8075 * of the message. If MSG_IGNFLOW is not set, then we can
8076 * get hit by flow control and be forced to try again. In
8077 * this case we need to have a copy of the message. We
8078 * do this using copymsg since the message may get modified
8079 * by something below us.
8080 *
8081 * We've observed that many TPI providers do not check db_ref
8082 * on the control messages but blindly reuse them for the
8083 * T_OK_ACK/T_ERROR_ACK. Thus using copymsg is more
8084 * friendly to such providers than using dupmsg. Also, note
8085 * that sockfs uses MSG_IGNFLOW for all TPI control messages.
8086 * Only data messages are subject to flow control, hence
8087 * subject to this copymsg.
8088 */
8089 if (flag & MSG_IGNFLOW) {
8090 mp = mctl;
8091 mctl = NULL;
8092 } else {
8093 do {
8094 /*
8095 * If a message has a free pointer, the message
8096 * must be dupmsg to maintain this pointer.
8097 * Code using this facility must be sure
8098 * that modules below will not change the
8099 * contents of the dblk without checking db_ref
8100 * first. If db_ref is > 1, then the module
8101 * needs to do a copymsg first. Otherwise,
8102 * the contents of the dblk may become
8103 * inconsistent because the freesmg/freeb below
8104 * may end up calling atomic_add_32_nv.
8105 * The atomic_add_32_nv in freeb (accessing
8106 * all of db_ref, db_type, db_flags, and
8107 * db_struioflag) does not prevent other threads
8108 * from concurrently trying to modify e.g.
8109 * db_type.
8110 */
8111 if (mctl->b_datap->db_frtnp != NULL)
8112 mp = dupmsg(mctl);
8113 else
8114 mp = copymsg(mctl);
8115
8116 if (mp != NULL)
8117 break;
8118
8119 error = strwaitbuf(msgdsize(mctl), BPRI_MED);
8120 if (error) {
8121 freemsg(mctl);
8122 return (error);
8123 }
8124 } while (mp == NULL);
8125 }
8126 /*
8127 * Verify that all of msgsize can be transferred by
8128 * strput.
8129 */
8130 ASSERT(stp->sd_maxblk == INFPSZ || stp->sd_maxblk >= msgsize);
8131 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag);
8132 if (error == 0)
8133 break;
8134
8135 if (error != EWOULDBLOCK)
8136 goto out;
8137
8138 /*
8139 * IF MSG_IGNFLOW is set we should have broken out of loop
8140 * above.
8141 */
8142 ASSERT(!(flag & MSG_IGNFLOW));
8143 mutex_enter(&stp->sd_lock);
8144 /*
8145 * Check for a missed wakeup.
8146 * Needed since strput did not hold sd_lock across
8147 * the canputnext.
8148 */
8149 if (bcanputnext(wqp, pri)) {
8150 /* Try again */
8151 mutex_exit(&stp->sd_lock);
8152 continue;
8153 }
8154 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAIT,
8155 "kstrputmsg wait:stp %p waits pri %d", stp, pri);
8156
8157 waitflag = WRITEWAIT;
8158 if (flag & (MSG_HOLDSIG|MSG_IGNERROR)) {
8159 if (flag & MSG_HOLDSIG)
8160 waitflag |= STR_NOSIG;
8161 if (flag & MSG_IGNERROR)
8162 waitflag |= STR_NOERROR;
8163 }
8164 if (((error = strwaitq(stp, waitflag,
8165 (ssize_t)0, fmode, -1, &done)) != 0) || done) {
8166 mutex_exit(&stp->sd_lock);
8167 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT,
8168 "kstrputmsg out:stp %p out %d error %d",
8169 stp, 0, error);
8170 freemsg(mctl);
8171 return (error);
8172 }
8173 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAKE,
8174 "kstrputmsg wake:stp %p wakes", stp);
8175 if ((error = i_straccess(stp, JCWRITE)) != 0) {
8176 mutex_exit(&stp->sd_lock);
8177 freemsg(mctl);
8178 return (error);
8179 }
8180 mutex_exit(&stp->sd_lock);
8181 }
8182 out:
8183 freemsg(mctl);
8184 /*
8185 * For historic reasons, applications expect EAGAIN
8186 * when data mblk could not be allocated. so change
8187 * ENOMEM back to EAGAIN
8188 */
8189 if (error == ENOMEM)
8190 error = EAGAIN;
8191 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT,
8192 "kstrputmsg out:stp %p out %d error %d", stp, 2, error);
8193 return (error);
8194 }
8195
8196 /*
8197 * Determines whether the necessary conditions are set on a stream
8198 * for it to be readable, writeable, or have exceptions.
8199 *
8200 * strpoll handles the consolidation private events:
8201 * POLLNOERR Do not return POLLERR even if there are stream
8202 * head errors.
8203 * Used by sockfs.
8204 * POLLRDDATA Do not return POLLIN unless at least one message on
8205 * the queue contains one or more M_DATA mblks. Thus
8206 * when this flag is set a queue with only
8207 * M_PROTO/M_PCPROTO mblks does not return POLLIN.
8208 * Used by sockfs to ignore T_EXDATA_IND messages.
8209 *
8210 * Note: POLLRDDATA assumes that synch streams only return messages with
8211 * an M_DATA attached (i.e. not messages consisting of only
8212 * an M_PROTO/M_PCPROTO part).
8213 */
8214 int
8215 strpoll(
8216 struct stdata *stp,
8217 short events_arg,
8218 int anyyet,
8219 short *reventsp,
8220 struct pollhead **phpp)
8221 {
8222 int events = (ushort_t)events_arg;
8223 int retevents = 0;
8224 mblk_t *mp;
8225 qband_t *qbp;
8226 long sd_flags = stp->sd_flag;
8227 int headlocked = 0;
8228
8229 /*
8230 * For performance, a single 'if' tests for most possible edge
8231 * conditions in one shot
8232 */
8233 if (sd_flags & (STPLEX | STRDERR | STWRERR)) {
8234 if (sd_flags & STPLEX) {
8235 *reventsp = POLLNVAL;
8236 return (EINVAL);
8237 }
8238 if (((events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) &&
8239 (sd_flags & STRDERR)) ||
8240 ((events & (POLLOUT | POLLWRNORM | POLLWRBAND)) &&
8241 (sd_flags & STWRERR))) {
8242 if (!(events & POLLNOERR)) {
8243 *reventsp = POLLERR;
8244 return (0);
8245 }
8246 }
8247 }
8248 if (sd_flags & STRHUP) {
8249 retevents |= POLLHUP;
8250 } else if (events & (POLLWRNORM | POLLWRBAND)) {
8251 queue_t *tq;
8252 queue_t *qp = stp->sd_wrq;
8253
8254 claimstr(qp);
8255 /* Find next module forward that has a service procedure */
8256 tq = qp->q_next->q_nfsrv;
8257 ASSERT(tq != NULL);
8258
8259 if (polllock(&stp->sd_pollist, QLOCK(tq)) != 0) {
8260 releasestr(qp);
8261 *reventsp = POLLNVAL;
8262 return (0);
8263 }
8264 if (events & POLLWRNORM) {
8265 queue_t *sqp;
8266
8267 if (tq->q_flag & QFULL)
8268 /* ensure backq svc procedure runs */
8269 tq->q_flag |= QWANTW;
8270 else if ((sqp = stp->sd_struiowrq) != NULL) {
8271 /* Check sync stream barrier write q */
8272 mutex_exit(QLOCK(tq));
8273 if (polllock(&stp->sd_pollist,
8274 QLOCK(sqp)) != 0) {
8275 releasestr(qp);
8276 *reventsp = POLLNVAL;
8277 return (0);
8278 }
8279 if (sqp->q_flag & QFULL)
8280 /* ensure pollwakeup() is done */
8281 sqp->q_flag |= QWANTWSYNC;
8282 else
8283 retevents |= POLLOUT;
8284 /* More write events to process ??? */
8285 if (! (events & POLLWRBAND)) {
8286 mutex_exit(QLOCK(sqp));
8287 releasestr(qp);
8288 goto chkrd;
8289 }
8290 mutex_exit(QLOCK(sqp));
8291 if (polllock(&stp->sd_pollist,
8292 QLOCK(tq)) != 0) {
8293 releasestr(qp);
8294 *reventsp = POLLNVAL;
8295 return (0);
8296 }
8297 } else
8298 retevents |= POLLOUT;
8299 }
8300 if (events & POLLWRBAND) {
8301 qbp = tq->q_bandp;
8302 if (qbp) {
8303 while (qbp) {
8304 if (qbp->qb_flag & QB_FULL)
8305 qbp->qb_flag |= QB_WANTW;
8306 else
8307 retevents |= POLLWRBAND;
8308 qbp = qbp->qb_next;
8309 }
8310 } else {
8311 retevents |= POLLWRBAND;
8312 }
8313 }
8314 mutex_exit(QLOCK(tq));
8315 releasestr(qp);
8316 }
8317 chkrd:
8318 if (sd_flags & STRPRI) {
8319 retevents |= (events & POLLPRI);
8320 } else if (events & (POLLRDNORM | POLLRDBAND | POLLIN)) {
8321 queue_t *qp = _RD(stp->sd_wrq);
8322 int normevents = (events & (POLLIN | POLLRDNORM));
8323
8324 /*
8325 * Note: Need to do polllock() here since ps_lock may be
8326 * held. See bug 4191544.
8327 */
8328 if (polllock(&stp->sd_pollist, &stp->sd_lock) != 0) {
8329 *reventsp = POLLNVAL;
8330 return (0);
8331 }
8332 headlocked = 1;
8333 mp = qp->q_first;
8334 while (mp) {
8335 /*
8336 * For POLLRDDATA we scan b_cont and b_next until we
8337 * find an M_DATA.
8338 */
8339 if ((events & POLLRDDATA) &&
8340 mp->b_datap->db_type != M_DATA) {
8341 mblk_t *nmp = mp->b_cont;
8342
8343 while (nmp != NULL &&
8344 nmp->b_datap->db_type != M_DATA)
8345 nmp = nmp->b_cont;
8346 if (nmp == NULL) {
8347 mp = mp->b_next;
8348 continue;
8349 }
8350 }
8351 if (mp->b_band == 0)
8352 retevents |= normevents;
8353 else
8354 retevents |= (events & (POLLIN | POLLRDBAND));
8355 break;
8356 }
8357 if (! (retevents & normevents) &&
8358 (stp->sd_wakeq & RSLEEP)) {
8359 /*
8360 * Sync stream barrier read queue has data.
8361 */
8362 retevents |= normevents;
8363 }
8364 /* Treat eof as normal data */
8365 if (sd_flags & STREOF)
8366 retevents |= normevents;
8367 }
8368
8369 *reventsp = (short)retevents;
8370 if (retevents && !(events & POLLET)) {
8371 if (headlocked)
8372 mutex_exit(&stp->sd_lock);
8373 return (0);
8374 }
8375
8376 /*
8377 * If poll() has not found any events yet, set up event cell
8378 * to wake up the poll if a requested event occurs on this
8379 * stream. Check for collisions with outstanding poll requests.
8380 */
8381 if (!anyyet) {
8382 *phpp = &stp->sd_pollist;
8383 if (headlocked == 0) {
8384 if (polllock(&stp->sd_pollist, &stp->sd_lock) != 0) {
8385 *reventsp = POLLNVAL;
8386 return (0);
8387 }
8388 headlocked = 1;
8389 }
8390 stp->sd_rput_opt |= SR_POLLIN;
8391 }
8392 if (headlocked)
8393 mutex_exit(&stp->sd_lock);
8394 return (0);
8395 }
8396
8397 /*
8398 * The purpose of putback() is to assure sleeping polls/reads
8399 * are awakened when there are no new messages arriving at the,
8400 * stream head, and a message is placed back on the read queue.
8401 *
8402 * sd_lock must be held when messages are placed back on stream
8403 * head. (getq() holds sd_lock when it removes messages from
8404 * the queue)
8405 */
8406
8407 static void
8408 putback(struct stdata *stp, queue_t *q, mblk_t *bp, int band)
8409 {
8410 mblk_t *qfirst;
8411 ASSERT(MUTEX_HELD(&stp->sd_lock));
8412
8413 /*
8414 * As a result of lock-step ordering around q_lock and sd_lock,
8415 * it's possible for function calls like putnext() and
8416 * canputnext() to get an inaccurate picture of how much
8417 * data is really being processed at the stream head.
8418 * We only consolidate with existing messages on the queue
8419 * if the length of the message we want to put back is smaller
8420 * than the queue hiwater mark.
8421 */
8422 if ((stp->sd_rput_opt & SR_CONSOL_DATA) &&
8423 (DB_TYPE(bp) == M_DATA) && ((qfirst = q->q_first) != NULL) &&
8424 (DB_TYPE(qfirst) == M_DATA) &&
8425 ((qfirst->b_flag & (MSGMARK|MSGDELIM)) == 0) &&
8426 ((bp->b_flag & (MSGMARK|MSGDELIM|MSGMARKNEXT)) == 0) &&
8427 (mp_cont_len(bp, NULL) < q->q_hiwat)) {
8428 /*
8429 * We use the same logic as defined in strrput()
8430 * but in reverse as we are putting back onto the
8431 * queue and want to retain byte ordering.
8432 * Consolidate M_DATA messages with M_DATA ONLY.
8433 * strrput() allows the consolidation of M_DATA onto
8434 * M_PROTO | M_PCPROTO but not the other way round.
8435 *
8436 * The consolidation does not take place if the message
8437 * we are returning to the queue is marked with either
8438 * of the marks or the delim flag or if q_first
8439 * is marked with MSGMARK. The MSGMARK check is needed to
8440 * handle the odd semantics of MSGMARK where essentially
8441 * the whole message is to be treated as marked.
8442 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from q_first
8443 * to the front of the b_cont chain.
8444 */
8445 rmvq_noenab(q, qfirst);
8446
8447 /*
8448 * The first message in the b_cont list
8449 * tracks MSGMARKNEXT and MSGNOTMARKNEXT.
8450 * We need to handle the case where we
8451 * are appending:
8452 *
8453 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT.
8454 * 2) a MSGMARKNEXT to a plain message.
8455 * 3) a MSGNOTMARKNEXT to a plain message
8456 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT
8457 * message.
8458 *
8459 * Thus we never append a MSGMARKNEXT or
8460 * MSGNOTMARKNEXT to a MSGMARKNEXT message.
8461 */
8462 if (qfirst->b_flag & MSGMARKNEXT) {
8463 bp->b_flag |= MSGMARKNEXT;
8464 bp->b_flag &= ~MSGNOTMARKNEXT;
8465 qfirst->b_flag &= ~MSGMARKNEXT;
8466 } else if (qfirst->b_flag & MSGNOTMARKNEXT) {
8467 bp->b_flag |= MSGNOTMARKNEXT;
8468 qfirst->b_flag &= ~MSGNOTMARKNEXT;
8469 }
8470
8471 linkb(bp, qfirst);
8472 }
8473 (void) putbq(q, bp);
8474
8475 /*
8476 * A message may have come in when the sd_lock was dropped in the
8477 * calling routine. If this is the case and STR*ATMARK info was
8478 * received, need to move that from the stream head to the q_last
8479 * so that SIOCATMARK can return the proper value.
8480 */
8481 if (stp->sd_flag & (STRATMARK | STRNOTATMARK)) {
8482 unsigned short *flagp = &q->q_last->b_flag;
8483 uint_t b_flag = (uint_t)*flagp;
8484
8485 if (stp->sd_flag & STRATMARK) {
8486 b_flag &= ~MSGNOTMARKNEXT;
8487 b_flag |= MSGMARKNEXT;
8488 stp->sd_flag &= ~STRATMARK;
8489 } else {
8490 b_flag &= ~MSGMARKNEXT;
8491 b_flag |= MSGNOTMARKNEXT;
8492 stp->sd_flag &= ~STRNOTATMARK;
8493 }
8494 *flagp = (unsigned short) b_flag;
8495 }
8496
8497 #ifdef DEBUG
8498 /*
8499 * Make sure that the flags are not messed up.
8500 */
8501 {
8502 mblk_t *mp;
8503 mp = q->q_last;
8504 while (mp != NULL) {
8505 ASSERT((mp->b_flag & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
8506 (MSGMARKNEXT|MSGNOTMARKNEXT));
8507 mp = mp->b_cont;
8508 }
8509 }
8510 #endif
8511 if (q->q_first == bp) {
8512 short pollevents;
8513
8514 if (stp->sd_flag & RSLEEP) {
8515 stp->sd_flag &= ~RSLEEP;
8516 cv_broadcast(&q->q_wait);
8517 }
8518 if (stp->sd_flag & STRPRI) {
8519 pollevents = POLLPRI;
8520 } else {
8521 if (band == 0) {
8522 if (!(stp->sd_rput_opt & SR_POLLIN))
8523 return;
8524 stp->sd_rput_opt &= ~SR_POLLIN;
8525 pollevents = POLLIN | POLLRDNORM;
8526 } else {
8527 pollevents = POLLIN | POLLRDBAND;
8528 }
8529 }
8530 mutex_exit(&stp->sd_lock);
8531 pollwakeup(&stp->sd_pollist, pollevents);
8532 mutex_enter(&stp->sd_lock);
8533 }
8534 }
8535
8536 /*
8537 * Return the held vnode attached to the stream head of a
8538 * given queue
8539 * It is the responsibility of the calling routine to ensure
8540 * that the queue does not go away (e.g. pop).
8541 */
8542 vnode_t *
8543 strq2vp(queue_t *qp)
8544 {
8545 vnode_t *vp;
8546 vp = STREAM(qp)->sd_vnode;
8547 ASSERT(vp != NULL);
8548 VN_HOLD(vp);
8549 return (vp);
8550 }
8551
8552 /*
8553 * return the stream head write queue for the given vp
8554 * It is the responsibility of the calling routine to ensure
8555 * that the stream or vnode do not close.
8556 */
8557 queue_t *
8558 strvp2wq(vnode_t *vp)
8559 {
8560 ASSERT(vp->v_stream != NULL);
8561 return (vp->v_stream->sd_wrq);
8562 }
8563
8564 /*
8565 * pollwakeup stream head
8566 * It is the responsibility of the calling routine to ensure
8567 * that the stream or vnode do not close.
8568 */
8569 void
8570 strpollwakeup(vnode_t *vp, short event)
8571 {
8572 ASSERT(vp->v_stream);
8573 pollwakeup(&vp->v_stream->sd_pollist, event);
8574 }
8575
8576 /*
8577 * Mate the stream heads of two vnodes together. If the two vnodes are the
8578 * same, we just make the write-side point at the read-side -- otherwise,
8579 * we do a full mate. Only works on vnodes associated with streams that are
8580 * still being built and thus have only a stream head.
8581 */
8582 void
8583 strmate(vnode_t *vp1, vnode_t *vp2)
8584 {
8585 queue_t *wrq1 = strvp2wq(vp1);
8586 queue_t *wrq2 = strvp2wq(vp2);
8587
8588 /*
8589 * Verify that there are no modules on the stream yet. We also
8590 * rely on the stream head always having a service procedure to
8591 * avoid tweaking q_nfsrv.
8592 */
8593 ASSERT(wrq1->q_next == NULL && wrq2->q_next == NULL);
8594 ASSERT(wrq1->q_qinfo->qi_srvp != NULL);
8595 ASSERT(wrq2->q_qinfo->qi_srvp != NULL);
8596
8597 /*
8598 * If the queues are the same, just twist; otherwise do a full mate.
8599 */
8600 if (wrq1 == wrq2) {
8601 wrq1->q_next = _RD(wrq1);
8602 } else {
8603 wrq1->q_next = _RD(wrq2);
8604 wrq2->q_next = _RD(wrq1);
8605 STREAM(wrq1)->sd_mate = STREAM(wrq2);
8606 STREAM(wrq1)->sd_flag |= STRMATE;
8607 STREAM(wrq2)->sd_mate = STREAM(wrq1);
8608 STREAM(wrq2)->sd_flag |= STRMATE;
8609 }
8610 }
8611
8612 /*
8613 * XXX will go away when console is correctly fixed.
8614 * Clean up the console PIDS, from previous I_SETSIG,
8615 * called only for cnopen which never calls strclean().
8616 */
8617 void
8618 str_cn_clean(struct vnode *vp)
8619 {
8620 strsig_t *ssp, *pssp, *tssp;
8621 struct stdata *stp;
8622 struct pid *pidp;
8623 int update = 0;
8624
8625 ASSERT(vp->v_stream);
8626 stp = vp->v_stream;
8627 pssp = NULL;
8628 mutex_enter(&stp->sd_lock);
8629 ssp = stp->sd_siglist;
8630 while (ssp) {
8631 mutex_enter(&pidlock);
8632 pidp = ssp->ss_pidp;
8633 /*
8634 * Get rid of PID if the proc is gone.
8635 */
8636 if (pidp->pid_prinactive) {
8637 tssp = ssp->ss_next;
8638 if (pssp)
8639 pssp->ss_next = tssp;
8640 else
8641 stp->sd_siglist = tssp;
8642 ASSERT(pidp->pid_ref <= 1);
8643 PID_RELE(ssp->ss_pidp);
8644 mutex_exit(&pidlock);
8645 kmem_free(ssp, sizeof (strsig_t));
8646 update = 1;
8647 ssp = tssp;
8648 continue;
8649 } else
8650 mutex_exit(&pidlock);
8651 pssp = ssp;
8652 ssp = ssp->ss_next;
8653 }
8654 if (update) {
8655 stp->sd_sigflags = 0;
8656 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
8657 stp->sd_sigflags |= ssp->ss_events;
8658 }
8659 mutex_exit(&stp->sd_lock);
8660 }
8661
8662 /*
8663 * Return B_TRUE if there is data in the message, B_FALSE otherwise.
8664 */
8665 static boolean_t
8666 msghasdata(mblk_t *bp)
8667 {
8668 for (; bp; bp = bp->b_cont)
8669 if (bp->b_datap->db_type == M_DATA) {
8670 ASSERT(bp->b_wptr >= bp->b_rptr);
8671 if (bp->b_wptr > bp->b_rptr)
8672 return (B_TRUE);
8673 }
8674 return (B_FALSE);
8675 }