1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * Copyright 2016 Joyent, Inc.
29 */
30
31 #include <sys/types.h>
32 #include <sys/devops.h>
33 #include <sys/conf.h>
34 #include <sys/modctl.h>
35 #include <sys/sunddi.h>
36 #include <sys/stat.h>
37 #include <sys/poll_impl.h>
38 #include <sys/errno.h>
39 #include <sys/kmem.h>
40 #include <sys/mkdev.h>
41 #include <sys/debug.h>
42 #include <sys/file.h>
43 #include <sys/sysmacros.h>
44 #include <sys/systm.h>
45 #include <sys/bitmap.h>
46 #include <sys/devpoll.h>
47 #include <sys/rctl.h>
48 #include <sys/resource.h>
49 #include <sys/schedctl.h>
50 #include <sys/epoll.h>
51
52 #define RESERVED 1
53
54 /* local data struct */
55 static dp_entry_t **devpolltbl; /* dev poll entries */
56 static size_t dptblsize;
57
58 static kmutex_t devpoll_lock; /* lock protecting dev tbl */
59 int devpoll_init; /* is /dev/poll initialized already */
60
61 /* device local functions */
62
63 static int dpopen(dev_t *devp, int flag, int otyp, cred_t *credp);
64 static int dpwrite(dev_t dev, struct uio *uiop, cred_t *credp);
65 static int dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
66 int *rvalp);
67 static int dppoll(dev_t dev, short events, int anyyet, short *reventsp,
68 struct pollhead **phpp);
69 static int dpclose(dev_t dev, int flag, int otyp, cred_t *credp);
70 static dev_info_t *dpdevi;
71
72
73 static struct cb_ops dp_cb_ops = {
74 dpopen, /* open */
75 dpclose, /* close */
76 nodev, /* strategy */
77 nodev, /* print */
78 nodev, /* dump */
79 nodev, /* read */
80 dpwrite, /* write */
81 dpioctl, /* ioctl */
82 nodev, /* devmap */
83 nodev, /* mmap */
84 nodev, /* segmap */
85 dppoll, /* poll */
86 ddi_prop_op, /* prop_op */
87 (struct streamtab *)0, /* streamtab */
88 D_MP, /* flags */
89 CB_REV, /* cb_ops revision */
90 nodev, /* aread */
91 nodev /* awrite */
92 };
93
94 static int dpattach(dev_info_t *, ddi_attach_cmd_t);
95 static int dpdetach(dev_info_t *, ddi_detach_cmd_t);
96 static int dpinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
97
98 static struct dev_ops dp_ops = {
99 DEVO_REV, /* devo_rev */
100 0, /* refcnt */
101 dpinfo, /* info */
102 nulldev, /* identify */
103 nulldev, /* probe */
104 dpattach, /* attach */
105 dpdetach, /* detach */
106 nodev, /* reset */
107 &dp_cb_ops, /* driver operations */
108 (struct bus_ops *)NULL, /* bus operations */
109 nulldev, /* power */
110 ddi_quiesce_not_needed, /* quiesce */
111 };
112
113
114 static struct modldrv modldrv = {
115 &mod_driverops, /* type of module - a driver */
116 "/dev/poll driver",
117 &dp_ops,
118 };
119
120 static struct modlinkage modlinkage = {
121 MODREV_1,
122 (void *)&modldrv,
123 NULL
124 };
125
126 static void pcachelink_assoc(pollcache_t *, pollcache_t *);
127 static void pcachelink_mark_stale(pollcache_t *);
128 static void pcachelink_purge_stale(pollcache_t *);
129 static void pcachelink_purge_all(pollcache_t *);
130
131
132 /*
133 * Locking Design
134 *
135 * The /dev/poll driver shares most of its code with poll sys call whose
136 * code is in common/syscall/poll.c. In poll(2) design, the pollcache
137 * structure is per lwp. An implicit assumption is made there that some
138 * portion of pollcache will never be touched by other lwps. E.g., in
139 * poll(2) design, no lwp will ever need to grow bitmap of other lwp.
140 * This assumption is not true for /dev/poll; hence the need for extra
141 * locking.
142 *
143 * To allow more parallelism, each /dev/poll file descriptor (indexed by
144 * minor number) has its own lock. Since read (dpioctl) is a much more
145 * frequent operation than write, we want to allow multiple reads on same
146 * /dev/poll fd. However, we prevent writes from being starved by giving
147 * priority to write operation. Theoretically writes can starve reads as
148 * well. But in practical sense this is not important because (1) writes
149 * happens less often than reads, and (2) write operation defines the
150 * content of poll fd a cache set. If writes happens so often that they
151 * can starve reads, that means the cached set is very unstable. It may
152 * not make sense to read an unstable cache set anyway. Therefore, the
153 * writers starving readers case is not handled in this design.
154 */
155
156 int
157 _init()
158 {
159 int error;
160
161 dptblsize = DEVPOLLSIZE;
162 devpolltbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP);
163 mutex_init(&devpoll_lock, NULL, MUTEX_DEFAULT, NULL);
164 devpoll_init = 1;
165 if ((error = mod_install(&modlinkage)) != 0) {
166 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize);
167 devpoll_init = 0;
168 }
169 return (error);
170 }
171
172 int
173 _fini()
174 {
175 int error;
176
177 if ((error = mod_remove(&modlinkage)) != 0) {
178 return (error);
179 }
180 mutex_destroy(&devpoll_lock);
181 kmem_free(devpolltbl, sizeof (caddr_t) * dptblsize);
182 return (0);
183 }
184
185 int
186 _info(struct modinfo *modinfop)
187 {
188 return (mod_info(&modlinkage, modinfop));
189 }
190
191 /*ARGSUSED*/
192 static int
193 dpattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
194 {
195 if (ddi_create_minor_node(devi, "poll", S_IFCHR, 0, DDI_PSEUDO, NULL)
196 == DDI_FAILURE) {
197 ddi_remove_minor_node(devi, NULL);
198 return (DDI_FAILURE);
199 }
200 dpdevi = devi;
201 return (DDI_SUCCESS);
202 }
203
204 static int
205 dpdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
206 {
207 if (cmd != DDI_DETACH)
208 return (DDI_FAILURE);
209
210 ddi_remove_minor_node(devi, NULL);
211 return (DDI_SUCCESS);
212 }
213
214 /* ARGSUSED */
215 static int
216 dpinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
217 {
218 int error;
219
220 switch (infocmd) {
221 case DDI_INFO_DEVT2DEVINFO:
222 *result = (void *)dpdevi;
223 error = DDI_SUCCESS;
224 break;
225 case DDI_INFO_DEVT2INSTANCE:
226 *result = (void *)0;
227 error = DDI_SUCCESS;
228 break;
229 default:
230 error = DDI_FAILURE;
231 }
232 return (error);
233 }
234
235 /*
236 * dp_pcache_poll has similar logic to pcache_poll() in poll.c. The major
237 * differences are: (1) /dev/poll requires scanning the bitmap starting at
238 * where it was stopped last time, instead of always starting from 0,
239 * (2) since user may not have cleaned up the cached fds when they are
240 * closed, some polldats in cache may refer to closed or reused fds. We
241 * need to check for those cases.
242 *
243 * NOTE: Upon closing an fd, automatic poll cache cleanup is done for
244 * poll(2) caches but NOT for /dev/poll caches. So expect some
245 * stale entries!
246 */
247 static int
248 dp_pcache_poll(dp_entry_t *dpep, void *dpbuf,
249 pollcache_t *pcp, nfds_t nfds, int *fdcntp)
250 {
251 int start, ostart, end;
252 int fdcnt, fd;
253 boolean_t done;
254 file_t *fp;
255 short revent;
256 boolean_t no_wrap;
257 pollhead_t *php;
258 polldat_t *pdp;
259 pollfd_t *pfdp;
260 epoll_event_t *epoll;
261 int error = 0;
262 short mask = POLLRDHUP | POLLWRBAND;
263 boolean_t is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0;
264
265 ASSERT(MUTEX_HELD(&pcp->pc_lock));
266 if (pcp->pc_bitmap == NULL) {
267 /*
268 * No Need to search because no poll fd
269 * has been cached.
270 */
271 return (error);
272 }
273
274 if (is_epoll) {
275 pfdp = NULL;
276 epoll = (epoll_event_t *)dpbuf;
277 } else {
278 pfdp = (pollfd_t *)dpbuf;
279 epoll = NULL;
280 }
281 retry:
282 start = ostart = pcp->pc_mapstart;
283 end = pcp->pc_mapend;
284 php = NULL;
285
286 if (start == 0) {
287 /*
288 * started from every begining, no need to wrap around.
289 */
290 no_wrap = B_TRUE;
291 } else {
292 no_wrap = B_FALSE;
293 }
294 done = B_FALSE;
295 fdcnt = 0;
296 while ((fdcnt < nfds) && !done) {
297 php = NULL;
298 revent = 0;
299 /*
300 * Examine the bit map in a circular fashion
301 * to avoid starvation. Always resume from
302 * last stop. Scan till end of the map. Then
303 * wrap around.
304 */
305 fd = bt_getlowbit(pcp->pc_bitmap, start, end);
306 ASSERT(fd <= end);
307 if (fd >= 0) {
308 if (fd == end) {
309 if (no_wrap) {
310 done = B_TRUE;
311 } else {
312 start = 0;
313 end = ostart - 1;
314 no_wrap = B_TRUE;
315 }
316 } else {
317 start = fd + 1;
318 }
319 pdp = pcache_lookup_fd(pcp, fd);
320 repoll:
321 ASSERT(pdp != NULL);
322 ASSERT(pdp->pd_fd == fd);
323 if (pdp->pd_fp == NULL) {
324 /*
325 * The fd is POLLREMOVed. This fd is
326 * logically no longer cached. So move
327 * on to the next one.
328 */
329 continue;
330 }
331 if ((fp = getf(fd)) == NULL) {
332 /*
333 * The fd has been closed, but user has not
334 * done a POLLREMOVE on this fd yet. Instead
335 * of cleaning it here implicitly, we return
336 * POLLNVAL. This is consistent with poll(2)
337 * polling a closed fd. Hope this will remind
338 * user to do a POLLREMOVE.
339 */
340 if (!is_epoll && pfdp != NULL) {
341 pfdp[fdcnt].fd = fd;
342 pfdp[fdcnt].revents = POLLNVAL;
343 fdcnt++;
344 continue;
345 }
346
347 /*
348 * In the epoll compatibility case, we actually
349 * perform the implicit removal to remain
350 * closer to the epoll semantics.
351 */
352 if (is_epoll) {
353 pdp->pd_fp = NULL;
354 pdp->pd_events = 0;
355
356 if (pdp->pd_php != NULL) {
357 pollhead_delete(pdp->pd_php,
358 pdp);
359 pdp->pd_php = NULL;
360 }
361
362 BT_CLEAR(pcp->pc_bitmap, fd);
363 continue;
364 }
365 }
366
367 if (fp != pdp->pd_fp) {
368 /*
369 * user is polling on a cached fd which was
370 * closed and then reused. Unfortunately
371 * there is no good way to inform user.
372 * If the file struct is also reused, we
373 * may not be able to detect the fd reuse
374 * at all. As long as this does not
375 * cause system failure and/or memory leak,
376 * we will play along. Man page states if
377 * user does not clean up closed fds, polling
378 * results will be indeterministic.
379 *
380 * XXX - perhaps log the detection of fd
381 * reuse?
382 */
383 pdp->pd_fp = fp;
384 }
385 /*
386 * XXX - pollrelock() logic needs to know which
387 * which pollcache lock to grab. It'd be a
388 * cleaner solution if we could pass pcp as
389 * an arguement in VOP_POLL interface instead
390 * of implicitly passing it using thread_t
391 * struct. On the other hand, changing VOP_POLL
392 * interface will require all driver/file system
393 * poll routine to change. May want to revisit
394 * the tradeoff later.
395 */
396 curthread->t_pollcache = pcp;
397 error = VOP_POLL(fp->f_vnode, pdp->pd_events, 0,
398 &revent, &php, NULL);
399 curthread->t_pollcache = NULL;
400 releasef(fd);
401 if (error != 0) {
402 break;
403 }
404
405 /*
406 * layered devices (e.g. console driver)
407 * may change the vnode and thus the pollhead
408 * pointer out from underneath us.
409 */
410 if (php != NULL && pdp->pd_php != NULL &&
411 php != pdp->pd_php) {
412 pollhead_delete(pdp->pd_php, pdp);
413 pdp->pd_php = php;
414 pollhead_insert(php, pdp);
415 /*
416 * The bit should still be set.
417 */
418 ASSERT(BT_TEST(pcp->pc_bitmap, fd));
419 goto retry;
420 }
421
422 if (revent != 0) {
423 if (pfdp != NULL) {
424 pfdp[fdcnt].fd = fd;
425 pfdp[fdcnt].events = pdp->pd_events;
426 pfdp[fdcnt].revents = revent;
427 } else if (epoll != NULL) {
428 epoll_event_t *ep = &epoll[fdcnt];
429
430 ASSERT(epoll != NULL);
431 ep->data.u64 = pdp->pd_epolldata;
432
433 /*
434 * If any of the event bits are set for
435 * which poll and epoll representations
436 * differ, swizzle in the native epoll
437 * values.
438 */
439 if (revent & mask) {
440 ep->events = (revent & ~mask) |
441 ((revent & POLLRDHUP) ?
442 EPOLLRDHUP : 0) |
443 ((revent & POLLWRBAND) ?
444 EPOLLWRBAND : 0);
445 } else {
446 ep->events = revent;
447 }
448
449 /*
450 * We define POLLWRNORM to be POLLOUT,
451 * but epoll has separate definitions
452 * for them; if POLLOUT is set and the
453 * user has asked for EPOLLWRNORM, set
454 * that as well.
455 */
456 if ((revent & POLLOUT) &&
457 (pdp->pd_events & EPOLLWRNORM)) {
458 ep->events |= EPOLLWRNORM;
459 }
460 } else {
461 pollstate_t *ps =
462 curthread->t_pollstate;
463 /*
464 * The devpoll handle itself is being
465 * polled. Notify the caller of any
466 * readable event(s), leaving as much
467 * state as possible untouched.
468 */
469 VERIFY(fdcnt == 0);
470 VERIFY(ps != NULL);
471
472 /*
473 * If a call to pollunlock() fails
474 * during VOP_POLL, skip over the fd
475 * and continue polling.
476 *
477 * Otherwise, report that there is an
478 * event pending.
479 */
480 if ((ps->ps_flags & POLLSTATE_ULFAIL)
481 != 0) {
482 ps->ps_flags &=
483 ~POLLSTATE_ULFAIL;
484 continue;
485 } else {
486 fdcnt++;
487 break;
488 }
489 }
490
491 /*
492 * If POLLET is set, clear the bit in the
493 * bitmap -- which effectively latches the
494 * edge on a pollwakeup() from the driver.
495 */
496 if (pdp->pd_events & POLLET)
497 BT_CLEAR(pcp->pc_bitmap, fd);
498
499 /*
500 * If POLLONESHOT is set, perform the implicit
501 * POLLREMOVE.
502 */
503 if (pdp->pd_events & POLLONESHOT) {
504 pdp->pd_fp = NULL;
505 pdp->pd_events = 0;
506
507 if (pdp->pd_php != NULL) {
508 pollhead_delete(pdp->pd_php,
509 pdp);
510 pdp->pd_php = NULL;
511 }
512
513 BT_CLEAR(pcp->pc_bitmap, fd);
514 }
515
516 fdcnt++;
517 } else if (php != NULL) {
518 /*
519 * We clear a bit or cache a poll fd if
520 * the driver returns a poll head ptr,
521 * which is expected in the case of 0
522 * revents. Some buggy driver may return
523 * NULL php pointer with 0 revents. In
524 * this case, we just treat the driver as
525 * "noncachable" and not clearing the bit
526 * in bitmap.
527 */
528 if ((pdp->pd_php != NULL) &&
529 ((pcp->pc_flag & PC_POLLWAKE) == 0)) {
530 BT_CLEAR(pcp->pc_bitmap, fd);
531 }
532 if (pdp->pd_php == NULL) {
533 pollhead_insert(php, pdp);
534 pdp->pd_php = php;
535 /*
536 * An event of interest may have
537 * arrived between the VOP_POLL() and
538 * the pollhead_insert(); check again.
539 */
540 goto repoll;
541 }
542 }
543 } else {
544 /*
545 * No bit set in the range. Check for wrap around.
546 */
547 if (!no_wrap) {
548 start = 0;
549 end = ostart - 1;
550 no_wrap = B_TRUE;
551 } else {
552 done = B_TRUE;
553 }
554 }
555 }
556
557 if (!done) {
558 pcp->pc_mapstart = start;
559 }
560 ASSERT(*fdcntp == 0);
561 *fdcntp = fdcnt;
562 return (error);
563 }
564
565 /*ARGSUSED*/
566 static int
567 dpopen(dev_t *devp, int flag, int otyp, cred_t *credp)
568 {
569 minor_t minordev;
570 dp_entry_t *dpep;
571 pollcache_t *pcp;
572
573 ASSERT(devpoll_init);
574 ASSERT(dptblsize <= MAXMIN);
575 mutex_enter(&devpoll_lock);
576 for (minordev = 0; minordev < dptblsize; minordev++) {
577 if (devpolltbl[minordev] == NULL) {
578 devpolltbl[minordev] = (dp_entry_t *)RESERVED;
579 break;
580 }
581 }
582 if (minordev == dptblsize) {
583 dp_entry_t **newtbl;
584 size_t oldsize;
585
586 /*
587 * Used up every entry in the existing devpoll table.
588 * Grow the table by DEVPOLLSIZE.
589 */
590 if ((oldsize = dptblsize) >= MAXMIN) {
591 mutex_exit(&devpoll_lock);
592 return (ENXIO);
593 }
594 dptblsize += DEVPOLLSIZE;
595 if (dptblsize > MAXMIN) {
596 dptblsize = MAXMIN;
597 }
598 newtbl = kmem_zalloc(sizeof (caddr_t) * dptblsize, KM_SLEEP);
599 bcopy(devpolltbl, newtbl, sizeof (caddr_t) * oldsize);
600 kmem_free(devpolltbl, sizeof (caddr_t) * oldsize);
601 devpolltbl = newtbl;
602 devpolltbl[minordev] = (dp_entry_t *)RESERVED;
603 }
604 mutex_exit(&devpoll_lock);
605
606 dpep = kmem_zalloc(sizeof (dp_entry_t), KM_SLEEP);
607 /*
608 * allocate a pollcache skeleton here. Delay allocating bitmap
609 * structures until dpwrite() time, since we don't know the
610 * optimal size yet. We also delay setting the pid until either
611 * dpwrite() or attempt to poll on the instance, allowing parents
612 * to create instances of /dev/poll for their children. (In the
613 * epoll compatibility case, this check isn't performed to maintain
614 * semantic compatibility.)
615 */
616 pcp = pcache_alloc();
617 dpep->dpe_pcache = pcp;
618 pcp->pc_pid = -1;
619 *devp = makedevice(getmajor(*devp), minordev); /* clone the driver */
620 mutex_enter(&devpoll_lock);
621 ASSERT(minordev < dptblsize);
622 ASSERT(devpolltbl[minordev] == (dp_entry_t *)RESERVED);
623 devpolltbl[minordev] = dpep;
624 mutex_exit(&devpoll_lock);
625 return (0);
626 }
627
628 /*
629 * Write to dev/poll add/remove fd's to/from a cached poll fd set,
630 * or change poll events for a watched fd.
631 */
632 /*ARGSUSED*/
633 static int
634 dpwrite(dev_t dev, struct uio *uiop, cred_t *credp)
635 {
636 minor_t minor;
637 dp_entry_t *dpep;
638 pollcache_t *pcp;
639 pollfd_t *pollfdp, *pfdp;
640 dvpoll_epollfd_t *epfdp;
641 uintptr_t limit;
642 int error, size;
643 ssize_t uiosize;
644 size_t copysize;
645 nfds_t pollfdnum;
646 struct pollhead *php = NULL;
647 polldat_t *pdp;
648 int fd;
649 file_t *fp;
650 boolean_t is_epoll, fds_added = B_FALSE;
651
652 minor = getminor(dev);
653
654 mutex_enter(&devpoll_lock);
655 ASSERT(minor < dptblsize);
656 dpep = devpolltbl[minor];
657 ASSERT(dpep != NULL);
658 mutex_exit(&devpoll_lock);
659
660 mutex_enter(&dpep->dpe_lock);
661 pcp = dpep->dpe_pcache;
662 is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0;
663 size = (is_epoll) ? sizeof (dvpoll_epollfd_t) : sizeof (pollfd_t);
664 mutex_exit(&dpep->dpe_lock);
665
666 if (!is_epoll && curproc->p_pid != pcp->pc_pid) {
667 if (pcp->pc_pid != -1) {
668 return (EACCES);
669 }
670
671 pcp->pc_pid = curproc->p_pid;
672 }
673
674 uiosize = uiop->uio_resid;
675 pollfdnum = uiosize / size;
676
677 /*
678 * For epoll-enabled handles, restrict the allowed write size to 2.
679 * This corresponds to an epoll_ctl(3C) performing an EPOLL_CTL_MOD
680 * operation which is expanded into two operations (DEL and ADD).
681 *
682 * All other operations performed through epoll_ctl(3C) will consist of
683 * a single entry.
684 */
685 if (is_epoll && pollfdnum > 2) {
686 return (EINVAL);
687 }
688
689 /*
690 * We want to make sure that pollfdnum isn't large enough to DoS us,
691 * but we also don't want to grab p_lock unnecessarily -- so we
692 * perform the full check against our resource limits if and only if
693 * pollfdnum is larger than the known-to-be-sane value of UINT8_MAX.
694 */
695 if (pollfdnum > UINT8_MAX) {
696 mutex_enter(&curproc->p_lock);
697 if (pollfdnum >
698 (uint_t)rctl_enforced_value(rctlproc_legacy[RLIMIT_NOFILE],
699 curproc->p_rctls, curproc)) {
700 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
701 curproc->p_rctls, curproc, RCA_SAFE);
702 mutex_exit(&curproc->p_lock);
703 return (EINVAL);
704 }
705 mutex_exit(&curproc->p_lock);
706 }
707
708 /*
709 * Copy in the pollfd array. Walk through the array and add
710 * each polled fd to the cached set.
711 */
712 pollfdp = kmem_alloc(uiosize, KM_SLEEP);
713 limit = (uintptr_t)pollfdp + (pollfdnum * size);
714
715 /*
716 * Although /dev/poll uses the write(2) interface to cache fds, it's
717 * not supposed to function as a seekable device. To prevent offset
718 * from growing and eventually exceed the maximum, reset the offset
719 * here for every call.
720 */
721 uiop->uio_loffset = 0;
722
723 /*
724 * Use uiocopy instead of uiomove when populating pollfdp, keeping
725 * uio_resid untouched for now. Write syscalls will translate EINTR
726 * into a success if they detect "successfully transfered" data via an
727 * updated uio_resid. Falsely suppressing such errors is disastrous.
728 */
729 if ((error = uiocopy((caddr_t)pollfdp, uiosize, UIO_WRITE, uiop,
730 ©size)) != 0) {
731 kmem_free(pollfdp, uiosize);
732 return (error);
733 }
734
735 /*
736 * We are about to enter the core portion of dpwrite(). Make sure this
737 * write has exclusive access in this portion of the code, i.e., no
738 * other writers in this code.
739 *
740 * Waiting for all readers to drop their references to the dpe is
741 * unecessary since the pollcache itself is protected by pc_lock.
742 */
743 mutex_enter(&dpep->dpe_lock);
744 dpep->dpe_writerwait++;
745 while ((dpep->dpe_flag & DP_WRITER_PRESENT) != 0) {
746 ASSERT(dpep->dpe_refcnt != 0);
747
748 /*
749 * The epoll API does not allow EINTR as a result when making
750 * modifications to the set of polled fds. Given that write
751 * activity is relatively quick and the size of accepted writes
752 * is limited above to two entries, a signal-ignorant wait is
753 * used here to avoid the EINTR.
754 */
755 if (is_epoll) {
756 cv_wait(&dpep->dpe_cv, &dpep->dpe_lock);
757 continue;
758 }
759
760 /*
761 * Non-epoll writers to /dev/poll handles can tolerate EINTR.
762 */
763 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) {
764 dpep->dpe_writerwait--;
765 mutex_exit(&dpep->dpe_lock);
766 kmem_free(pollfdp, uiosize);
767 return (EINTR);
768 }
769 }
770 dpep->dpe_writerwait--;
771 dpep->dpe_flag |= DP_WRITER_PRESENT;
772 dpep->dpe_refcnt++;
773
774 if (!is_epoll && (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0) {
775 /*
776 * The epoll compat mode was enabled while we were waiting to
777 * establish write access. It is not safe to continue since
778 * state was prepared for non-epoll operation.
779 */
780 error = EBUSY;
781 goto bypass;
782 }
783 mutex_exit(&dpep->dpe_lock);
784
785 /*
786 * Since the dpwrite() may recursively walk an added /dev/poll handle,
787 * pollstate_enter() deadlock and loop detection must be used.
788 */
789 (void) pollstate_create();
790 VERIFY(pollstate_enter(pcp) == PSE_SUCCESS);
791
792 if (pcp->pc_bitmap == NULL) {
793 pcache_create(pcp, pollfdnum);
794 }
795 for (pfdp = pollfdp; (uintptr_t)pfdp < limit;
796 pfdp = (pollfd_t *)((uintptr_t)pfdp + size)) {
797 fd = pfdp->fd;
798 if ((uint_t)fd >= P_FINFO(curproc)->fi_nfiles) {
799 /*
800 * epoll semantics demand that we return EBADF if our
801 * specified fd is invalid.
802 */
803 if (is_epoll) {
804 error = EBADF;
805 break;
806 }
807
808 continue;
809 }
810
811 pdp = pcache_lookup_fd(pcp, fd);
812 if (pfdp->events != POLLREMOVE) {
813
814 fp = NULL;
815
816 if (pdp == NULL) {
817 /*
818 * If we're in epoll compatibility mode, check
819 * that the fd is valid before allocating
820 * anything for it; epoll semantics demand that
821 * we return EBADF if our specified fd is
822 * invalid.
823 */
824 if (is_epoll) {
825 if ((fp = getf(fd)) == NULL) {
826 error = EBADF;
827 break;
828 }
829 }
830
831 pdp = pcache_alloc_fd(0);
832 pdp->pd_fd = fd;
833 pdp->pd_pcache = pcp;
834 pcache_insert_fd(pcp, pdp, pollfdnum);
835 } else {
836 /*
837 * epoll semantics demand that we error out if
838 * a file descriptor is added twice, which we
839 * check (imperfectly) by checking if we both
840 * have the file descriptor cached and the
841 * file pointer that correponds to the file
842 * descriptor matches our cached value. If
843 * there is a pointer mismatch, the file
844 * descriptor was closed without being removed.
845 * The converse is clearly not true, however,
846 * so to narrow the window by which a spurious
847 * EEXIST may be returned, we also check if
848 * this fp has been added to an epoll control
849 * descriptor in the past; if it hasn't, we
850 * know that this is due to fp reuse -- it's
851 * not a true EEXIST case. (By performing this
852 * additional check, we limit the window of
853 * spurious EEXIST to situations where a single
854 * file descriptor is being used across two or
855 * more epoll control descriptors -- and even
856 * then, the file descriptor must be closed and
857 * reused in a relatively tight time span.)
858 */
859 if (is_epoll) {
860 if (pdp->pd_fp != NULL &&
861 (fp = getf(fd)) != NULL &&
862 fp == pdp->pd_fp &&
863 (fp->f_flag2 & FEPOLLED)) {
864 error = EEXIST;
865 releasef(fd);
866 break;
867 }
868
869 /*
870 * We have decided that the cached
871 * information was stale: it either
872 * didn't match, or the fp had never
873 * actually been epoll()'d on before.
874 * We need to now clear our pd_events
875 * to assure that we don't mistakenly
876 * operate on cached event disposition.
877 */
878 pdp->pd_events = 0;
879 }
880 }
881
882 if (is_epoll) {
883 epfdp = (dvpoll_epollfd_t *)pfdp;
884 pdp->pd_epolldata = epfdp->dpep_data;
885 }
886
887 ASSERT(pdp->pd_fd == fd);
888 ASSERT(pdp->pd_pcache == pcp);
889 if (fd >= pcp->pc_mapsize) {
890 mutex_exit(&pcp->pc_lock);
891 pcache_grow_map(pcp, fd);
892 mutex_enter(&pcp->pc_lock);
893 }
894 if (fd > pcp->pc_mapend) {
895 pcp->pc_mapend = fd;
896 }
897 if (fp == NULL && (fp = getf(fd)) == NULL) {
898 /*
899 * The fd is not valid. Since we can't pass
900 * this error back in the write() call, set
901 * the bit in bitmap to force DP_POLL ioctl
902 * to examine it.
903 */
904 BT_SET(pcp->pc_bitmap, fd);
905 pdp->pd_events |= pfdp->events;
906 continue;
907 }
908
909 /*
910 * To (greatly) reduce EEXIST false positives, we
911 * denote that this fp has been epoll()'d. We do this
912 * regardless of epoll compatibility mode, as the flag
913 * is harmless if not in epoll compatibility mode.
914 */
915 fp->f_flag2 |= FEPOLLED;
916
917 /*
918 * Don't do VOP_POLL for an already cached fd with
919 * same poll events.
920 */
921 if ((pdp->pd_events == pfdp->events) &&
922 (pdp->pd_fp == fp)) {
923 /*
924 * the events are already cached
925 */
926 releasef(fd);
927 continue;
928 }
929
930 /*
931 * do VOP_POLL and cache this poll fd.
932 */
933 /*
934 * XXX - pollrelock() logic needs to know which
935 * which pollcache lock to grab. It'd be a
936 * cleaner solution if we could pass pcp as
937 * an arguement in VOP_POLL interface instead
938 * of implicitly passing it using thread_t
939 * struct. On the other hand, changing VOP_POLL
940 * interface will require all driver/file system
941 * poll routine to change. May want to revisit
942 * the tradeoff later.
943 */
944 curthread->t_pollcache = pcp;
945 error = VOP_POLL(fp->f_vnode, pfdp->events, 0,
946 &pfdp->revents, &php, NULL);
947 curthread->t_pollcache = NULL;
948 /*
949 * We always set the bit when this fd is cached;
950 * this forces the first DP_POLL to poll this fd.
951 * Real performance gain comes from subsequent
952 * DP_POLL. We also attempt a pollhead_insert();
953 * if it's not possible, we'll do it in dpioctl().
954 */
955 BT_SET(pcp->pc_bitmap, fd);
956 if (error != 0) {
957 releasef(fd);
958 break;
959 }
960 pdp->pd_fp = fp;
961 pdp->pd_events |= pfdp->events;
962 if (php != NULL) {
963 if (pdp->pd_php == NULL) {
964 pollhead_insert(php, pdp);
965 pdp->pd_php = php;
966 } else {
967 if (pdp->pd_php != php) {
968 pollhead_delete(pdp->pd_php,
969 pdp);
970 pollhead_insert(php, pdp);
971 pdp->pd_php = php;
972 }
973 }
974 }
975 fds_added = B_TRUE;
976 releasef(fd);
977 } else {
978 if (pdp == NULL || pdp->pd_fp == NULL) {
979 if (is_epoll) {
980 /*
981 * As with the add case (above), epoll
982 * semantics demand that we error out
983 * in this case.
984 */
985 error = ENOENT;
986 break;
987 }
988
989 continue;
990 }
991 ASSERT(pdp->pd_fd == fd);
992 pdp->pd_fp = NULL;
993 pdp->pd_events = 0;
994 ASSERT(pdp->pd_thread == NULL);
995 if (pdp->pd_php != NULL) {
996 pollhead_delete(pdp->pd_php, pdp);
997 pdp->pd_php = NULL;
998 }
999 BT_CLEAR(pcp->pc_bitmap, fd);
1000 }
1001 }
1002 /*
1003 * Wake any pollcache waiters so they can check the new descriptors.
1004 *
1005 * Any fds added to an recursive-capable pollcache could themselves be
1006 * /dev/poll handles. To ensure that proper event propagation occurs,
1007 * parent pollcaches are woken too, so that they can create any needed
1008 * pollcache links.
1009 */
1010 if (fds_added) {
1011 cv_broadcast(&pcp->pc_cv);
1012 pcache_wake_parents(pcp);
1013 }
1014 pollstate_exit(pcp);
1015 mutex_enter(&dpep->dpe_lock);
1016 bypass:
1017 dpep->dpe_flag &= ~DP_WRITER_PRESENT;
1018 dpep->dpe_refcnt--;
1019 cv_broadcast(&dpep->dpe_cv);
1020 mutex_exit(&dpep->dpe_lock);
1021 kmem_free(pollfdp, uiosize);
1022 if (error == 0) {
1023 /*
1024 * The state of uio_resid is updated only after the pollcache
1025 * is successfully modified.
1026 */
1027 uioskip(uiop, copysize);
1028 }
1029 return (error);
1030 }
1031
1032 #define DP_SIGMASK_RESTORE(ksetp) { \
1033 if (ksetp != NULL) { \
1034 mutex_enter(&p->p_lock); \
1035 if (lwp->lwp_cursig == 0) { \
1036 t->t_hold = lwp->lwp_sigoldmask; \
1037 t->t_flag &= ~T_TOMASK; \
1038 } \
1039 mutex_exit(&p->p_lock); \
1040 } \
1041 }
1042
1043 /*ARGSUSED*/
1044 static int
1045 dpioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1046 {
1047 minor_t minor;
1048 dp_entry_t *dpep;
1049 pollcache_t *pcp;
1050 hrtime_t now;
1051 int error = 0;
1052 boolean_t is_epoll;
1053 STRUCT_DECL(dvpoll, dvpoll);
1054
1055 if (cmd == DP_POLL || cmd == DP_PPOLL) {
1056 /* do this now, before we sleep on DP_WRITER_PRESENT */
1057 now = gethrtime();
1058 }
1059
1060 minor = getminor(dev);
1061 mutex_enter(&devpoll_lock);
1062 ASSERT(minor < dptblsize);
1063 dpep = devpolltbl[minor];
1064 mutex_exit(&devpoll_lock);
1065 ASSERT(dpep != NULL);
1066 pcp = dpep->dpe_pcache;
1067
1068 mutex_enter(&dpep->dpe_lock);
1069 is_epoll = (dpep->dpe_flag & DP_ISEPOLLCOMPAT) != 0;
1070
1071 if (cmd == DP_EPOLLCOMPAT) {
1072 if (dpep->dpe_refcnt != 0) {
1073 /*
1074 * We can't turn on epoll compatibility while there
1075 * are outstanding operations.
1076 */
1077 mutex_exit(&dpep->dpe_lock);
1078 return (EBUSY);
1079 }
1080
1081 /*
1082 * epoll compatibility is a one-way street: there's no way
1083 * to turn it off for a particular open.
1084 */
1085 dpep->dpe_flag |= DP_ISEPOLLCOMPAT;
1086 mutex_exit(&dpep->dpe_lock);
1087
1088 return (0);
1089 }
1090
1091 if (!is_epoll && curproc->p_pid != pcp->pc_pid) {
1092 if (pcp->pc_pid != -1) {
1093 mutex_exit(&dpep->dpe_lock);
1094 return (EACCES);
1095 }
1096
1097 pcp->pc_pid = curproc->p_pid;
1098 }
1099
1100 /* Wait until all writers have cleared the handle before continuing */
1101 while ((dpep->dpe_flag & DP_WRITER_PRESENT) != 0 ||
1102 (dpep->dpe_writerwait != 0)) {
1103 if (!cv_wait_sig_swap(&dpep->dpe_cv, &dpep->dpe_lock)) {
1104 mutex_exit(&dpep->dpe_lock);
1105 return (EINTR);
1106 }
1107 }
1108 dpep->dpe_refcnt++;
1109 mutex_exit(&dpep->dpe_lock);
1110
1111 switch (cmd) {
1112 case DP_POLL:
1113 case DP_PPOLL:
1114 {
1115 pollstate_t *ps;
1116 nfds_t nfds;
1117 int fdcnt = 0;
1118 size_t size, fdsize, dpsize;
1119 hrtime_t deadline = 0;
1120 k_sigset_t *ksetp = NULL;
1121 k_sigset_t kset;
1122 sigset_t set;
1123 kthread_t *t = curthread;
1124 klwp_t *lwp = ttolwp(t);
1125 struct proc *p = ttoproc(curthread);
1126
1127 STRUCT_INIT(dvpoll, mode);
1128
1129 /*
1130 * The dp_setp member is only required/consumed for DP_PPOLL,
1131 * which otherwise uses the same structure as DP_POLL.
1132 */
1133 if (cmd == DP_POLL) {
1134 dpsize = (uintptr_t)STRUCT_FADDR(dvpoll, dp_setp) -
1135 (uintptr_t)STRUCT_FADDR(dvpoll, dp_fds);
1136 } else {
1137 ASSERT(cmd == DP_PPOLL);
1138 dpsize = STRUCT_SIZE(dvpoll);
1139 }
1140
1141 if ((mode & FKIOCTL) != 0) {
1142 /* Kernel-internal ioctl call */
1143 bcopy((caddr_t)arg, STRUCT_BUF(dvpoll), dpsize);
1144 error = 0;
1145 } else {
1146 error = copyin((caddr_t)arg, STRUCT_BUF(dvpoll),
1147 dpsize);
1148 }
1149
1150 if (error) {
1151 DP_REFRELE(dpep);
1152 return (EFAULT);
1153 }
1154
1155 deadline = STRUCT_FGET(dvpoll, dp_timeout);
1156 if (deadline > 0) {
1157 /*
1158 * Convert the deadline from relative milliseconds
1159 * to absolute nanoseconds. They must wait for at
1160 * least a tick.
1161 */
1162 deadline = MSEC2NSEC(deadline);
1163 deadline = MAX(deadline, nsec_per_tick);
1164 deadline += now;
1165 }
1166
1167 if (cmd == DP_PPOLL) {
1168 void *setp = STRUCT_FGETP(dvpoll, dp_setp);
1169
1170 if (setp != NULL) {
1171 if ((mode & FKIOCTL) != 0) {
1172 /* Use the signal set directly */
1173 ksetp = (k_sigset_t *)setp;
1174 } else {
1175 if (copyin(setp, &set, sizeof (set))) {
1176 DP_REFRELE(dpep);
1177 return (EFAULT);
1178 }
1179 sigutok(&set, &kset);
1180 ksetp = &kset;
1181 }
1182
1183 mutex_enter(&p->p_lock);
1184 schedctl_finish_sigblock(t);
1185 lwp->lwp_sigoldmask = t->t_hold;
1186 t->t_hold = *ksetp;
1187 t->t_flag |= T_TOMASK;
1188
1189 /*
1190 * Like ppoll() with a non-NULL sigset, we'll
1191 * call cv_reltimedwait_sig() just to check for
1192 * signals. This call will return immediately
1193 * with either 0 (signalled) or -1 (no signal).
1194 * There are some conditions whereby we can
1195 * get 0 from cv_reltimedwait_sig() without
1196 * a true signal (e.g., a directed stop), so
1197 * we restore our signal mask in the unlikely
1198 * event that lwp_cursig is 0.
1199 */
1200 if (!cv_reltimedwait_sig(&t->t_delay_cv,
1201 &p->p_lock, 0, TR_CLOCK_TICK)) {
1202 if (lwp->lwp_cursig == 0) {
1203 t->t_hold = lwp->lwp_sigoldmask;
1204 t->t_flag &= ~T_TOMASK;
1205 }
1206
1207 mutex_exit(&p->p_lock);
1208
1209 DP_REFRELE(dpep);
1210 return (EINTR);
1211 }
1212
1213 mutex_exit(&p->p_lock);
1214 }
1215 }
1216
1217 if ((nfds = STRUCT_FGET(dvpoll, dp_nfds)) == 0) {
1218 /*
1219 * We are just using DP_POLL to sleep, so
1220 * we don't any of the devpoll apparatus.
1221 * Do not check for signals if we have a zero timeout.
1222 */
1223 DP_REFRELE(dpep);
1224 if (deadline == 0) {
1225 DP_SIGMASK_RESTORE(ksetp);
1226 return (0);
1227 }
1228
1229 mutex_enter(&curthread->t_delay_lock);
1230 while ((error =
1231 cv_timedwait_sig_hrtime(&curthread->t_delay_cv,
1232 &curthread->t_delay_lock, deadline)) > 0)
1233 continue;
1234 mutex_exit(&curthread->t_delay_lock);
1235
1236 DP_SIGMASK_RESTORE(ksetp);
1237
1238 return (error == 0 ? EINTR : 0);
1239 }
1240
1241 if (is_epoll) {
1242 size = nfds * (fdsize = sizeof (epoll_event_t));
1243 } else {
1244 size = nfds * (fdsize = sizeof (pollfd_t));
1245 }
1246
1247 /*
1248 * XXX It would be nice not to have to alloc each time, but it
1249 * requires another per thread structure hook. This can be
1250 * implemented later if data suggests that it's necessary.
1251 */
1252 ps = pollstate_create();
1253
1254 if (ps->ps_dpbufsize < size) {
1255 /*
1256 * If nfds is larger than twice the current maximum
1257 * open file count, we'll silently clamp it. This
1258 * only limits our exposure to allocating an
1259 * inordinate amount of kernel memory; it doesn't
1260 * otherwise affect the semantics. (We have this
1261 * check at twice the maximum instead of merely the
1262 * maximum because some applications pass an nfds that
1263 * is only slightly larger than their limit.)
1264 */
1265 mutex_enter(&p->p_lock);
1266 if ((nfds >> 1) > p->p_fno_ctl) {
1267 nfds = p->p_fno_ctl;
1268 size = nfds * fdsize;
1269 }
1270 mutex_exit(&p->p_lock);
1271
1272 if (ps->ps_dpbufsize < size) {
1273 kmem_free(ps->ps_dpbuf, ps->ps_dpbufsize);
1274 ps->ps_dpbuf = kmem_zalloc(size, KM_SLEEP);
1275 ps->ps_dpbufsize = size;
1276 }
1277 }
1278
1279 VERIFY(pollstate_enter(pcp) == PSE_SUCCESS);
1280 for (;;) {
1281 pcp->pc_flag &= ~PC_POLLWAKE;
1282
1283 /*
1284 * Mark all child pcachelinks as stale.
1285 * Those which are still part of the tree will be
1286 * marked as valid during the poll.
1287 */
1288 pcachelink_mark_stale(pcp);
1289
1290 error = dp_pcache_poll(dpep, ps->ps_dpbuf,
1291 pcp, nfds, &fdcnt);
1292 if (fdcnt > 0 || error != 0)
1293 break;
1294
1295 /* Purge still-stale child pcachelinks */
1296 pcachelink_purge_stale(pcp);
1297
1298 /*
1299 * A pollwake has happened since we polled cache.
1300 */
1301 if (pcp->pc_flag & PC_POLLWAKE)
1302 continue;
1303
1304 /*
1305 * Sleep until we are notified, signaled, or timed out.
1306 */
1307 if (deadline == 0) {
1308 /* immediate timeout; do not check signals */
1309 break;
1310 }
1311
1312 error = cv_timedwait_sig_hrtime(&pcp->pc_cv,
1313 &pcp->pc_lock, deadline);
1314
1315 /*
1316 * If we were awakened by a signal or timeout then
1317 * break the loop, else poll again.
1318 */
1319 if (error <= 0) {
1320 error = (error == 0) ? EINTR : 0;
1321 break;
1322 } else {
1323 error = 0;
1324 }
1325 }
1326 pollstate_exit(pcp);
1327
1328 DP_SIGMASK_RESTORE(ksetp);
1329
1330 if (error == 0 && fdcnt > 0) {
1331 /*
1332 * It should be noted that FKIOCTL does not influence
1333 * the copyout (vs bcopy) of dp_fds at this time.
1334 */
1335 if (copyout(ps->ps_dpbuf,
1336 STRUCT_FGETP(dvpoll, dp_fds), fdcnt * fdsize)) {
1337 DP_REFRELE(dpep);
1338 return (EFAULT);
1339 }
1340 *rvalp = fdcnt;
1341 }
1342 break;
1343 }
1344
1345 case DP_ISPOLLED:
1346 {
1347 pollfd_t pollfd;
1348 polldat_t *pdp;
1349
1350 STRUCT_INIT(dvpoll, mode);
1351 error = copyin((caddr_t)arg, &pollfd, sizeof (pollfd_t));
1352 if (error) {
1353 DP_REFRELE(dpep);
1354 return (EFAULT);
1355 }
1356 mutex_enter(&pcp->pc_lock);
1357 if (pcp->pc_hash == NULL) {
1358 /*
1359 * No Need to search because no poll fd
1360 * has been cached.
1361 */
1362 mutex_exit(&pcp->pc_lock);
1363 DP_REFRELE(dpep);
1364 return (0);
1365 }
1366 if (pollfd.fd < 0) {
1367 mutex_exit(&pcp->pc_lock);
1368 break;
1369 }
1370 pdp = pcache_lookup_fd(pcp, pollfd.fd);
1371 if ((pdp != NULL) && (pdp->pd_fd == pollfd.fd) &&
1372 (pdp->pd_fp != NULL)) {
1373 pollfd.revents = pdp->pd_events;
1374 if (copyout(&pollfd, (caddr_t)arg, sizeof (pollfd_t))) {
1375 mutex_exit(&pcp->pc_lock);
1376 DP_REFRELE(dpep);
1377 return (EFAULT);
1378 }
1379 *rvalp = 1;
1380 }
1381 mutex_exit(&pcp->pc_lock);
1382 break;
1383 }
1384
1385 default:
1386 DP_REFRELE(dpep);
1387 return (EINVAL);
1388 }
1389 DP_REFRELE(dpep);
1390 return (error);
1391 }
1392
1393 /*
1394 * Overview of Recursive Polling
1395 *
1396 * It is possible for /dev/poll to poll for events on file descriptors which
1397 * themselves are /dev/poll handles. Pending events in the child handle are
1398 * represented as readable data via the POLLIN flag. To limit surface area,
1399 * this recursion is presently allowed on only /dev/poll handles which have
1400 * been placed in epoll mode via the DP_EPOLLCOMPAT ioctl. Recursion depth is
1401 * limited to 5 in order to be consistent with Linux epoll.
1402 *
1403 * Extending dppoll() for VOP_POLL:
1404 *
1405 * The recursive /dev/poll implementation begins by extending dppoll() to
1406 * report when resources contained in the pollcache have relevant event state.
1407 * At the highest level, it means calling dp_pcache_poll() so it indicates if
1408 * fd events are present without consuming them or altering the pollcache
1409 * bitmap. This ensures that a subsequent DP_POLL operation on the bitmap will
1410 * yield the initiating event. Additionally, the VOP_POLL should return in
1411 * such a way that dp_pcache_poll() does not clear the parent bitmap entry
1412 * which corresponds to the child /dev/poll fd. This means that child
1413 * pollcaches will be checked during every poll which facilitates wake-up
1414 * behavior detailed below.
1415 *
1416 * Pollcache Links and Wake Events:
1417 *
1418 * Recursive /dev/poll avoids complicated pollcache locking constraints during
1419 * pollwakeup events by eschewing the traditional pollhead mechanism in favor
1420 * of a different approach. For each pollcache at the root of a recursive
1421 * /dev/poll "tree", pcachelink_t structures are established to all child
1422 * /dev/poll pollcaches. During pollnotify() in a child pollcache, the
1423 * linked list of pcachelink_t entries is walked, where those marked as valid
1424 * incur a cv_broadcast to their parent pollcache. Most notably, these
1425 * pcachelink_t cv wakeups are performed without acquiring pc_lock on the
1426 * parent pollcache (which would require careful deadlock avoidance). This
1427 * still allows the woken poll on the parent to discover the pertinent events
1428 * due to the fact that bitmap entires for the child pollcache are always
1429 * maintained by the dppoll() logic above.
1430 *
1431 * Depth Limiting and Loop Prevention:
1432 *
1433 * As each pollcache is encountered (either via DP_POLL or dppoll()), depth and
1434 * loop constraints are enforced via pollstate_enter(). The pollcache_t
1435 * pointer is compared against any existing entries in ps_pc_stack and is added
1436 * to the end if no match (and therefore loop) is found. Once poll operations
1437 * for a given pollcache_t are complete, pollstate_exit() clears the pointer
1438 * from the list. The pollstate_enter() and pollstate_exit() functions are
1439 * responsible for acquiring and releasing pc_lock, respectively.
1440 *
1441 * Deadlock Safety:
1442 *
1443 * Descending through a tree of recursive /dev/poll handles involves the tricky
1444 * business of sequentially entering multiple pollcache locks. This tree
1445 * topology cannot define a lock acquisition order in such a way that it is
1446 * immune to deadlocks between threads. The pollstate_enter() and
1447 * pollstate_exit() functions provide an interface for recursive /dev/poll
1448 * operations to safely lock pollcaches while failing gracefully in the face of
1449 * deadlocking topologies. (See pollstate_contend() for more detail about how
1450 * deadlocks are detected and resolved.)
1451 */
1452
1453 /*ARGSUSED*/
1454 static int
1455 dppoll(dev_t dev, short events, int anyyet, short *reventsp,
1456 struct pollhead **phpp)
1457 {
1458 minor_t minor;
1459 dp_entry_t *dpep;
1460 pollcache_t *pcp;
1461 int res, rc = 0;
1462
1463 minor = getminor(dev);
1464 mutex_enter(&devpoll_lock);
1465 ASSERT(minor < dptblsize);
1466 dpep = devpolltbl[minor];
1467 ASSERT(dpep != NULL);
1468 mutex_exit(&devpoll_lock);
1469
1470 mutex_enter(&dpep->dpe_lock);
1471 if ((dpep->dpe_flag & DP_ISEPOLLCOMPAT) == 0) {
1472 /* Poll recursion is not yet supported for non-epoll handles */
1473 *reventsp = POLLERR;
1474 mutex_exit(&dpep->dpe_lock);
1475 return (0);
1476 } else {
1477 dpep->dpe_refcnt++;
1478 pcp = dpep->dpe_pcache;
1479 mutex_exit(&dpep->dpe_lock);
1480 }
1481
1482 res = pollstate_enter(pcp);
1483 if (res == PSE_SUCCESS) {
1484 nfds_t nfds = 1;
1485 int fdcnt = 0;
1486 pollstate_t *ps = curthread->t_pollstate;
1487
1488 rc = dp_pcache_poll(dpep, NULL, pcp, nfds, &fdcnt);
1489 if (rc == 0) {
1490 *reventsp = (fdcnt > 0) ? POLLIN : 0;
1491 }
1492 pcachelink_assoc(pcp, ps->ps_pc_stack[0]);
1493 pollstate_exit(pcp);
1494 } else {
1495 switch (res) {
1496 case PSE_FAIL_DEPTH:
1497 rc = EINVAL;
1498 break;
1499 case PSE_FAIL_LOOP:
1500 case PSE_FAIL_DEADLOCK:
1501 rc = ELOOP;
1502 break;
1503 default:
1504 /*
1505 * If anything else has gone awry, such as being polled
1506 * from an unexpected context, fall back to the
1507 * recursion-intolerant response.
1508 */
1509 *reventsp = POLLERR;
1510 rc = 0;
1511 break;
1512 }
1513 }
1514
1515 DP_REFRELE(dpep);
1516 return (rc);
1517 }
1518
1519 /*
1520 * devpoll close should do enough clean up before the pollcache is deleted,
1521 * i.e., it should ensure no one still references the pollcache later.
1522 * There is no "permission" check in here. Any process having the last
1523 * reference of this /dev/poll fd can close.
1524 */
1525 /*ARGSUSED*/
1526 static int
1527 dpclose(dev_t dev, int flag, int otyp, cred_t *credp)
1528 {
1529 minor_t minor;
1530 dp_entry_t *dpep;
1531 pollcache_t *pcp;
1532 int i;
1533 polldat_t **hashtbl;
1534 polldat_t *pdp;
1535
1536 minor = getminor(dev);
1537
1538 mutex_enter(&devpoll_lock);
1539 dpep = devpolltbl[minor];
1540 ASSERT(dpep != NULL);
1541 devpolltbl[minor] = NULL;
1542 mutex_exit(&devpoll_lock);
1543 pcp = dpep->dpe_pcache;
1544 ASSERT(pcp != NULL);
1545 /*
1546 * At this point, no other lwp can access this pollcache via the
1547 * /dev/poll fd. This pollcache is going away, so do the clean
1548 * up without the pc_lock.
1549 */
1550 hashtbl = pcp->pc_hash;
1551 for (i = 0; i < pcp->pc_hashsize; i++) {
1552 for (pdp = hashtbl[i]; pdp; pdp = pdp->pd_hashnext) {
1553 if (pdp->pd_php != NULL) {
1554 pollhead_delete(pdp->pd_php, pdp);
1555 pdp->pd_php = NULL;
1556 pdp->pd_fp = NULL;
1557 }
1558 }
1559 }
1560 /*
1561 * pollwakeup() may still interact with this pollcache. Wait until
1562 * it is done.
1563 */
1564 mutex_enter(&pcp->pc_no_exit);
1565 ASSERT(pcp->pc_busy >= 0);
1566 while (pcp->pc_busy > 0)
1567 cv_wait(&pcp->pc_busy_cv, &pcp->pc_no_exit);
1568 mutex_exit(&pcp->pc_no_exit);
1569
1570 /* Clean up any pollcache links created via recursive /dev/poll */
1571 if (pcp->pc_parents != NULL || pcp->pc_children != NULL) {
1572 /*
1573 * Because of the locking rules for pcachelink manipulation,
1574 * acquring pc_lock is required for this step.
1575 */
1576 mutex_enter(&pcp->pc_lock);
1577 pcachelink_purge_all(pcp);
1578 mutex_exit(&pcp->pc_lock);
1579 }
1580
1581 pcache_destroy(pcp);
1582 ASSERT(dpep->dpe_refcnt == 0);
1583 kmem_free(dpep, sizeof (dp_entry_t));
1584 return (0);
1585 }
1586
1587 static void
1588 pcachelink_locked_rele(pcachelink_t *pl)
1589 {
1590 ASSERT(MUTEX_HELD(&pl->pcl_lock));
1591 VERIFY(pl->pcl_refcnt >= 1);
1592
1593 pl->pcl_refcnt--;
1594 if (pl->pcl_refcnt == 0) {
1595 VERIFY(pl->pcl_state == PCL_INVALID);
1596 ASSERT(pl->pcl_parent_pc == NULL);
1597 ASSERT(pl->pcl_child_pc == NULL);
1598 ASSERT(pl->pcl_parent_next == NULL);
1599 ASSERT(pl->pcl_child_next == NULL);
1600
1601 pl->pcl_state = PCL_FREE;
1602 mutex_destroy(&pl->pcl_lock);
1603 kmem_free(pl, sizeof (pcachelink_t));
1604 } else {
1605 mutex_exit(&pl->pcl_lock);
1606 }
1607 }
1608
1609 /*
1610 * Associate parent and child pollcaches via a pcachelink_t. If an existing
1611 * link (stale or valid) between the two is found, it will be reused. If a
1612 * suitable link is not found for reuse, a new one will be allocated.
1613 */
1614 static void
1615 pcachelink_assoc(pollcache_t *child, pollcache_t *parent)
1616 {
1617 pcachelink_t *pl, **plpn;
1618
1619 ASSERT(MUTEX_HELD(&child->pc_lock));
1620 ASSERT(MUTEX_HELD(&parent->pc_lock));
1621
1622 /* Search for an existing link we can reuse. */
1623 plpn = &child->pc_parents;
1624 for (pl = child->pc_parents; pl != NULL; pl = *plpn) {
1625 mutex_enter(&pl->pcl_lock);
1626 if (pl->pcl_state == PCL_INVALID) {
1627 /* Clean any invalid links while walking the list */
1628 *plpn = pl->pcl_parent_next;
1629 pl->pcl_child_pc = NULL;
1630 pl->pcl_parent_next = NULL;
1631 pcachelink_locked_rele(pl);
1632 } else if (pl->pcl_parent_pc == parent) {
1633 /* Successfully found parent link */
1634 ASSERT(pl->pcl_state == PCL_VALID ||
1635 pl->pcl_state == PCL_STALE);
1636 pl->pcl_state = PCL_VALID;
1637 mutex_exit(&pl->pcl_lock);
1638 return;
1639 } else {
1640 plpn = &pl->pcl_parent_next;
1641 mutex_exit(&pl->pcl_lock);
1642 }
1643 }
1644
1645 /* No existing link to the parent was found. Create a fresh one. */
1646 pl = kmem_zalloc(sizeof (pcachelink_t), KM_SLEEP);
1647 mutex_init(&pl->pcl_lock, NULL, MUTEX_DEFAULT, NULL);
1648
1649 pl->pcl_parent_pc = parent;
1650 pl->pcl_child_next = parent->pc_children;
1651 parent->pc_children = pl;
1652 pl->pcl_refcnt++;
1653
1654 pl->pcl_child_pc = child;
1655 pl->pcl_parent_next = child->pc_parents;
1656 child->pc_parents = pl;
1657 pl->pcl_refcnt++;
1658
1659 pl->pcl_state = PCL_VALID;
1660 }
1661
1662 /*
1663 * Mark all child links in a pollcache as stale. Any invalid child links found
1664 * during iteration are purged.
1665 */
1666 static void
1667 pcachelink_mark_stale(pollcache_t *pcp)
1668 {
1669 pcachelink_t *pl, **plpn;
1670
1671 ASSERT(MUTEX_HELD(&pcp->pc_lock));
1672
1673 plpn = &pcp->pc_children;
1674 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) {
1675 mutex_enter(&pl->pcl_lock);
1676 if (pl->pcl_state == PCL_INVALID) {
1677 /*
1678 * Remove any invalid links while we are going to the
1679 * trouble of walking the list.
1680 */
1681 *plpn = pl->pcl_child_next;
1682 pl->pcl_parent_pc = NULL;
1683 pl->pcl_child_next = NULL;
1684 pcachelink_locked_rele(pl);
1685 } else {
1686 pl->pcl_state = PCL_STALE;
1687 plpn = &pl->pcl_child_next;
1688 mutex_exit(&pl->pcl_lock);
1689 }
1690 }
1691 }
1692
1693 /*
1694 * Purge all stale (or invalid) child links from a pollcache.
1695 */
1696 static void
1697 pcachelink_purge_stale(pollcache_t *pcp)
1698 {
1699 pcachelink_t *pl, **plpn;
1700
1701 ASSERT(MUTEX_HELD(&pcp->pc_lock));
1702
1703 plpn = &pcp->pc_children;
1704 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) {
1705 mutex_enter(&pl->pcl_lock);
1706 switch (pl->pcl_state) {
1707 case PCL_STALE:
1708 pl->pcl_state = PCL_INVALID;
1709 /* FALLTHROUGH */
1710 case PCL_INVALID:
1711 *plpn = pl->pcl_child_next;
1712 pl->pcl_parent_pc = NULL;
1713 pl->pcl_child_next = NULL;
1714 pcachelink_locked_rele(pl);
1715 break;
1716 default:
1717 plpn = &pl->pcl_child_next;
1718 mutex_exit(&pl->pcl_lock);
1719 }
1720 }
1721 }
1722
1723 /*
1724 * Purge all child and parent links from a pollcache, regardless of status.
1725 */
1726 static void
1727 pcachelink_purge_all(pollcache_t *pcp)
1728 {
1729 pcachelink_t *pl, **plpn;
1730
1731 ASSERT(MUTEX_HELD(&pcp->pc_lock));
1732
1733 plpn = &pcp->pc_parents;
1734 for (pl = pcp->pc_parents; pl != NULL; pl = *plpn) {
1735 mutex_enter(&pl->pcl_lock);
1736 pl->pcl_state = PCL_INVALID;
1737 *plpn = pl->pcl_parent_next;
1738 pl->pcl_child_pc = NULL;
1739 pl->pcl_parent_next = NULL;
1740 pcachelink_locked_rele(pl);
1741 }
1742
1743 plpn = &pcp->pc_children;
1744 for (pl = pcp->pc_children; pl != NULL; pl = *plpn) {
1745 mutex_enter(&pl->pcl_lock);
1746 pl->pcl_state = PCL_INVALID;
1747 *plpn = pl->pcl_child_next;
1748 pl->pcl_parent_pc = NULL;
1749 pl->pcl_child_next = NULL;
1750 pcachelink_locked_rele(pl);
1751 }
1752
1753 ASSERT(pcp->pc_parents == NULL);
1754 ASSERT(pcp->pc_children == NULL);
1755 }