1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2020 Joyent, Inc.
29 */
30
31 /*
32 * This file containts all the functions required for interactions of
33 * event sources with the event port file system.
34 */
35
36 #include <sys/types.h>
37 #include <sys/conf.h>
38 #include <sys/stat.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/debug.h>
42 #include <sys/file.h>
43 #include <sys/sysmacros.h>
44 #include <sys/systm.h>
45 #include <sys/bitmap.h>
46 #include <sys/rctl.h>
47 #include <sys/atomic.h>
48 #include <sys/poll_impl.h>
49 #include <sys/port_impl.h>
50
51 /*
52 * Maximum number of elements allowed to be passed in a single call of a
53 * port function (port_sendn(), port_getn(). We need to allocate kernel memory
54 * for all of them at once, so we can't let it scale without limit.
55 */
56 uint_t port_max_list = PORT_MAX_LIST;
57 port_control_t port_control; /* Event port framework main structure */
58
59 /*
60 * Block other threads from using a port.
61 * We enter holding portq->portq_mutex but
62 * we may drop and reacquire this lock.
63 * Callers must deal with this fact.
64 */
65 void
66 port_block(port_queue_t *portq)
67 {
68 ASSERT(MUTEX_HELD(&portq->portq_mutex));
69
70 while (portq->portq_flags & PORTQ_BLOCKED)
71 cv_wait(&portq->portq_block_cv, &portq->portq_mutex);
72 portq->portq_flags |= PORTQ_BLOCKED;
73 }
74
75 /*
76 * Undo port_block(portq).
77 */
78 void
79 port_unblock(port_queue_t *portq)
80 {
81 ASSERT(MUTEX_HELD(&portq->portq_mutex));
82
83 portq->portq_flags &= ~PORTQ_BLOCKED;
84 cv_signal(&portq->portq_block_cv);
85 }
86
87 /*
88 * Called from pollwakeup(PORT_SOURCE_FD source) to determine
89 * if the port's fd needs to be notified of poll events. If yes,
90 * we mark the port indicating that pollwakeup() is referring
91 * it so that the port_t does not disappear. pollwakeup()
92 * calls port_pollwkdone() after notifying. In port_pollwkdone(),
93 * we clear the hold on the port_t (clear PORTQ_POLLWK_PEND).
94 */
95 int
96 port_pollwkup(port_t *pp)
97 {
98 int events = 0;
99 port_queue_t *portq;
100 portq = &pp->port_queue;
101 mutex_enter(&portq->portq_mutex);
102
103 /*
104 * Normally, we should not have a situation where PORTQ_POLLIN
105 * and PORTQ_POLLWK_PEND are set at the same time, but it is
106 * possible. So, in pollwakeup() we ensure that no new fd's get
107 * added to the pollhead between the time it notifies poll events
108 * and calls poll_wkupdone() where we clear the PORTQ_POLLWK_PEND flag.
109 */
110 if (portq->portq_flags & PORTQ_POLLIN &&
111 !(portq->portq_flags & PORTQ_POLLWK_PEND)) {
112 portq->portq_flags &= ~PORTQ_POLLIN;
113 portq->portq_flags |= PORTQ_POLLWK_PEND;
114 events = POLLIN;
115 }
116 mutex_exit(&portq->portq_mutex);
117 return (events);
118 }
119
120 void
121 port_pollwkdone(port_t *pp)
122 {
123 port_queue_t *portq;
124 portq = &pp->port_queue;
125 ASSERT(portq->portq_flags & PORTQ_POLLWK_PEND);
126 mutex_enter(&portq->portq_mutex);
127 portq->portq_flags &= ~PORTQ_POLLWK_PEND;
128 cv_signal(&pp->port_cv);
129 mutex_exit(&portq->portq_mutex);
130 }
131
132
133 /*
134 * The port_send_event() function is used by all event sources to submit
135 * trigerred events to a port. All the data required for the event management
136 * is already stored in the port_kevent_t structure.
137 * The event port internal data is stored in the port_kevent_t structure
138 * during the allocation time (see port_alloc_event()). The data related to
139 * the event itself and to the event source management is stored in the
140 * port_kevent_t structure between the allocation time and submit time
141 * (see port_init_event()).
142 *
143 * This function is often called from interrupt level.
144 */
145 void
146 port_send_event(port_kevent_t *pkevp)
147 {
148 port_queue_t *portq;
149
150 portq = &pkevp->portkev_port->port_queue;
151 mutex_enter(&portq->portq_mutex);
152
153 if (pkevp->portkev_flags & PORT_KEV_DONEQ) {
154 /* Event already in the port queue */
155 if (pkevp->portkev_source == PORT_SOURCE_FD) {
156 mutex_exit(&pkevp->portkev_lock);
157 }
158 mutex_exit(&portq->portq_mutex);
159 return;
160 }
161
162 /* put event in the port queue */
163 list_insert_tail(&portq->portq_list, pkevp);
164 portq->portq_nent++;
165
166 /*
167 * Remove the PORTQ_WAIT_EVENTS flag to indicate
168 * that new events are available.
169 */
170 portq->portq_flags &= ~PORTQ_WAIT_EVENTS;
171 pkevp->portkev_flags |= PORT_KEV_DONEQ; /* event enqueued */
172
173 if (pkevp->portkev_source == PORT_SOURCE_FD) {
174 mutex_exit(&pkevp->portkev_lock);
175 }
176
177 /* Check if thread is in port_close() waiting for outstanding events */
178 if (portq->portq_flags & PORTQ_CLOSE) {
179 /* Check if all outstanding events are already in port queue */
180 if (pkevp->portkev_port->port_curr <= portq->portq_nent)
181 cv_signal(&portq->portq_closecv);
182 }
183
184 if (portq->portq_getn == 0) {
185 /*
186 * No thread retrieving events -> check if enough events are
187 * available to satify waiting threads.
188 */
189 if (portq->portq_thread &&
190 (portq->portq_nent >= portq->portq_nget))
191 cv_signal(&portq->portq_thread->portget_cv);
192 }
193
194 /*
195 * If some thread is polling the port's fd, then notify it.
196 * For PORT_SOURCE_FD source, we don't need to call pollwakeup()
197 * here as it will result in a recursive call(PORT_SOURCE_FD source
198 * is pollwakeup()). Therefore pollwakeup() itself will notify the
199 * ports if being polled.
200 */
201 if (pkevp->portkev_source != PORT_SOURCE_FD &&
202 portq->portq_flags & PORTQ_POLLIN) {
203 port_t *pp;
204
205 portq->portq_flags &= ~PORTQ_POLLIN;
206 /*
207 * Need to save port_t for calling pollwakeup since port_getn()
208 * may end up freeing pkevp once portq_mutex is dropped.
209 *
210 * For that matter, if PORTQ_CLOSE is set, all of the port_t
211 * may be freed upon dropping portq_mutex.
212 */
213 pp = (portq->portq_flags & PORTQ_CLOSE) ?
214 NULL : pkevp->portkev_port;
215 mutex_exit(&portq->portq_mutex);
216 if (pp != NULL)
217 pollwakeup(&pp->port_pollhd, POLLIN);
218 } else {
219 mutex_exit(&portq->portq_mutex);
220 }
221 }
222
223 /*
224 * The port_alloc_event() function has to be used by all event sources
225 * to request an slot for event notification.
226 * The slot reservation could be denied because of lack of resources.
227 * For that reason the event source should allocate an event slot as early
228 * as possible and be prepared to get an error code instead of the
229 * port event pointer.
230 * Al current event sources allocate an event slot during a system call
231 * entry. They return an error code to the application if an event slot
232 * could not be reserved.
233 * It is also recommended to associate the event source with the port
234 * before some other port function is used.
235 * The port argument is a file descriptor obtained by the application as
236 * a return value of port_create().
237 * Possible values of flags are:
238 * PORT_ALLOC_DEFAULT
239 * This is the standard type of port events. port_get(n) will free this
240 * type of event structures as soon as the events are delivered to the
241 * application.
242 * PORT_ALLOC_PRIVATE
243 * This type of event will be use for private use of the event source.
244 * The port_get(n) function will deliver events of such an structure to
245 * the application but it will not free the event structure itself.
246 * The event source must free this structure using port_free_event().
247 * PORT_ALLOC_CACHED
248 * This type of events is used when the event source helds an own
249 * cache.
250 * The port_get(n) function will deliver events of such an structure to
251 * the application but it will not free the event structure itself.
252 * The event source must free this structure using port_free_event().
253 */
254 int
255 port_alloc_event(int port, int flags, int source, port_kevent_t **pkevpp)
256 {
257 port_t *pp;
258 file_t *fp;
259 port_kevent_t *pkevp;
260 int err;
261
262 if ((fp = getf(port)) == NULL)
263 return (EBADF);
264
265 if (fp->f_vnode->v_type != VPORT) {
266 releasef(port);
267 return (EBADFD);
268 }
269
270 pkevp = kmem_cache_alloc(port_control.pc_cache, KM_NOSLEEP);
271 if (pkevp == NULL) {
272 releasef(port);
273 return (ENOMEM);
274 }
275
276 /*
277 * port_max_events is controlled by the resource control
278 * process.port-max-events
279 */
280 pp = VTOEP(fp->f_vnode);
281 mutex_enter(&pp->port_queue.portq_mutex);
282
283 /* Two possible error conditions. */
284 if (pp->port_queue.portq_flags & PORTQ_CLOSE)
285 err = EBADFD; /* Mid-close. */
286 else if (pp->port_curr >= pp->port_max_events)
287 err = EAGAIN; /* Resources unavailabile. */
288 else
289 err = 0; /* Good to go! */
290
291 if (err != 0) {
292 mutex_exit(&pp->port_queue.portq_mutex);
293 kmem_cache_free(port_control.pc_cache, pkevp);
294 releasef(port);
295 return (err);
296 }
297 pp->port_curr++;
298 mutex_exit(&pp->port_queue.portq_mutex);
299
300 bzero(pkevp, sizeof (port_kevent_t));
301 mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL);
302 pkevp->portkev_source = source;
303 pkevp->portkev_flags = flags;
304 pkevp->portkev_pid = curproc->p_pid;
305 pkevp->portkev_port = pp;
306 *pkevpp = pkevp;
307 releasef(port);
308 return (0);
309 }
310
311 /*
312 * This function is faster than the standard port_alloc_event() and
313 * can be used when the event source already allocated an event from
314 * a port.
315 */
316 int
317 port_dup_event(port_kevent_t *pkevp, port_kevent_t **pkevdupp, int flags)
318 {
319 int error;
320
321 error = port_alloc_event_local(pkevp->portkev_port,
322 pkevp->portkev_source, flags, pkevdupp);
323 if (error == 0)
324 (*pkevdupp)->portkev_pid = pkevp->portkev_pid;
325 return (error);
326 }
327
328 /*
329 * port_alloc_event_local() is reserved for internal use only.
330 * It is doing the same job as port_alloc_event() but with the event port
331 * pointer as the first argument.
332 * The check of the validity of the port file descriptor is skipped here.
333 */
334 int
335 port_alloc_event_local(port_t *pp, int source, int flags,
336 port_kevent_t **pkevpp)
337 {
338 port_kevent_t *pkevp;
339 int err;
340
341 pkevp = kmem_cache_alloc(port_control.pc_cache, KM_NOSLEEP);
342 if (pkevp == NULL)
343 return (ENOMEM);
344
345 mutex_enter(&pp->port_queue.portq_mutex);
346
347 /* Two possible error conditions. */
348 if (pp->port_queue.portq_flags & PORTQ_CLOSE)
349 err = EBADFD; /* Mid-close. */
350 else if (pp->port_curr >= pp->port_max_events)
351 err = EAGAIN; /* Resources unavailable. */
352 else
353 err = 0; /* Good to go! */
354
355 if (err != 0) {
356 mutex_exit(&pp->port_queue.portq_mutex);
357 kmem_cache_free(port_control.pc_cache, pkevp);
358 return (err);
359 }
360 pp->port_curr++;
361 mutex_exit(&pp->port_queue.portq_mutex);
362
363 bzero(pkevp, sizeof (port_kevent_t));
364 mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL);
365 pkevp->portkev_flags = flags;
366 pkevp->portkev_port = pp;
367 pkevp->portkev_source = source;
368 pkevp->portkev_pid = curproc->p_pid;
369 *pkevpp = pkevp;
370 return (0);
371 }
372
373 /*
374 * port_alloc_event_block() has the same functionality of port_alloc_event() +
375 * - it blocks if not enough event slots are available and
376 * - it blocks if not enough memory is available.
377 * Currently port_dispatch() is using this function to increase the
378 * reliability of event delivery for library event sources.
379 */
380 int
381 port_alloc_event_block(port_t *pp, int source, int flags,
382 port_kevent_t **pkevpp)
383 {
384 port_kevent_t *pkevp =
385 kmem_cache_alloc(port_control.pc_cache, KM_SLEEP);
386
387 mutex_enter(&pp->port_queue.portq_mutex);
388 /*
389 * Mid-close check needs to happen immediately before any
390 * resource checking or cv_wait()-ing of any sort.
391 */
392 if (pp->port_queue.portq_flags & PORTQ_CLOSE) {
393 mutex_exit(&pp->port_queue.portq_mutex);
394 kmem_cache_free(port_control.pc_cache, pkevp);
395 return (EBADFD);
396 }
397
398 while (pp->port_curr >= pp->port_max_events) {
399 if (!cv_wait_sig(&pp->port_cv, &pp->port_queue.portq_mutex)) {
400 /* signal detected */
401 mutex_exit(&pp->port_queue.portq_mutex);
402 kmem_cache_free(port_control.pc_cache, pkevp);
403 return (EINTR);
404 }
405
406 /* Oh, and we have to re-check the close state. */
407 if (pp->port_queue.portq_flags & PORTQ_CLOSE) {
408 mutex_exit(&pp->port_queue.portq_mutex);
409 kmem_cache_free(port_control.pc_cache, pkevp);
410 return (EBADFD);
411 }
412 }
413 pp->port_curr++;
414 mutex_exit(&pp->port_queue.portq_mutex);
415
416 bzero(pkevp, sizeof (port_kevent_t));
417 mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL);
418 pkevp->portkev_flags = flags;
419 pkevp->portkev_port = pp;
420 pkevp->portkev_source = source;
421 pkevp->portkev_pid = curproc->p_pid;
422 *pkevpp = pkevp;
423 return (0);
424 }
425
426 /*
427 * Take an event out of the port queue
428 */
429 static void
430 port_remove_event_doneq(port_kevent_t *pkevp, port_queue_t *portq)
431 {
432 ASSERT(MUTEX_HELD(&portq->portq_mutex));
433 list_remove(&portq->portq_list, pkevp);
434 portq->portq_nent--;
435 pkevp->portkev_flags &= ~PORT_KEV_DONEQ;
436 }
437
438 /*
439 * The port_remove_done_event() function takes a fired event out of the
440 * port queue.
441 * Currently this function is required to cancel a fired event because
442 * the application is delivering new association data (see port_associate_fd()).
443 */
444 int
445 port_remove_done_event(port_kevent_t *pkevp)
446 {
447 port_queue_t *portq;
448 int removed = 0;
449
450 portq = &pkevp->portkev_port->port_queue;
451 mutex_enter(&portq->portq_mutex);
452 /* wait for port_get() or port_getn() */
453 port_block(portq);
454 if (pkevp->portkev_flags & PORT_KEV_DONEQ) {
455 /* event still in port queue */
456 if (portq->portq_getn) {
457 /*
458 * There could be still fired events in the temp queue;
459 * push those events back to the port queue and
460 * remove requested event afterwards.
461 */
462 port_push_eventq(portq);
463 }
464 /* now remove event from the port queue */
465 port_remove_event_doneq(pkevp, portq);
466 removed = 1;
467 }
468 port_unblock(portq);
469 mutex_exit(&portq->portq_mutex);
470 return (removed);
471 }
472
473 /*
474 * Return port event back to the kmem_cache.
475 * If the event is currently in the port queue the event itself will only
476 * be set as invalid. The port_get(n) function will not deliver such events
477 * to the application and it will return them back to the kmem_cache.
478 */
479 void
480 port_free_event(port_kevent_t *pkevp)
481 {
482 port_queue_t *portq;
483 port_t *pp;
484
485 pp = pkevp->portkev_port;
486 if (pp == NULL)
487 return;
488 if (pkevp->portkev_flags & PORT_ALLOC_PRIVATE) {
489 port_free_event_local(pkevp, B_TRUE);
490 return;
491 }
492
493 portq = &pp->port_queue;
494 mutex_enter(&portq->portq_mutex);
495 port_block(portq);
496 if (pkevp->portkev_flags & PORT_KEV_DONEQ) {
497 pkevp->portkev_flags |= PORT_KEV_FREE;
498 pkevp->portkev_callback = NULL;
499 port_unblock(portq);
500 mutex_exit(&portq->portq_mutex);
501 return;
502 }
503 port_unblock(portq);
504
505 if (pkevp->portkev_flags & PORT_KEV_CACHED) {
506 mutex_exit(&portq->portq_mutex);
507 return;
508 }
509
510 if (--pp->port_curr < pp->port_max_events)
511 cv_signal(&pp->port_cv);
512 if (portq->portq_flags & PORTQ_CLOSE) {
513 /*
514 * Another thread is closing the event port.
515 * That thread will sleep until all allocated event
516 * structures returned to the event port framework.
517 * The portq_mutex is used to synchronize the status
518 * of the allocated event structures (port_curr).
519 */
520 if (pp->port_curr <= portq->portq_nent)
521 cv_signal(&portq->portq_closecv);
522 }
523
524 /*
525 * We're holding portq_mutex and we decremented port_curr, so don't
526 * have port_free_event_local() do it for us.
527 */
528 port_free_event_local(pkevp, B_FALSE);
529 }
530
531 /*
532 * This event port internal function is used by port_free_event() and
533 * other port internal functions to return event structures back to the
534 * kmem_cache.
535 *
536 * If the caller already holds the portq_mutex, indicate by setting
537 * lock_and_decrement to B_FALSE. This function MUST drop portq_mutex as
538 * it can call pollwakeup() at its end.
539 */
540 void
541 port_free_event_local(port_kevent_t *pkevp, boolean_t lock_and_decrement)
542 {
543 port_t *pp = pkevp->portkev_port;
544 port_queue_t *portq = &pp->port_queue;
545 int wakeup;
546
547 pkevp->portkev_callback = NULL;
548 pkevp->portkev_flags = 0;
549 pkevp->portkev_port = NULL;
550 mutex_destroy(&pkevp->portkev_lock);
551 kmem_cache_free(port_control.pc_cache, pkevp);
552
553 if (lock_and_decrement) {
554 /*
555 * If we're entering here from outside port_event_free(),
556 * grab the mutex. We enter here if we hadn't already
557 * decremented-and-signaled port_curr.
558 */
559 mutex_enter(&portq->portq_mutex);
560 if (--pp->port_curr < pp->port_max_events)
561 cv_signal(&pp->port_cv);
562 }
563 ASSERT(MUTEX_HELD(&portq->portq_mutex));
564
565 /*
566 * Don't send the POLLOUT if we're mid-close. We can only send it if
567 * we've dropped the mutex, and if we're closing, dropping the mutex
568 * could lead to another thread freeing pp->port_pollhd and the rest
569 * of *pp.
570 */
571 wakeup = ((portq->portq_flags & (PORTQ_POLLOUT | PORTQ_CLOSE)) ==
572 PORTQ_POLLOUT);
573 portq->portq_flags &= ~PORTQ_POLLOUT;
574 mutex_exit(&portq->portq_mutex);
575
576 /* Submit a POLLOUT event if requested */
577 if (wakeup)
578 pollwakeup(&pp->port_pollhd, POLLOUT);
579 }
580
581 /*
582 * port_init_event(port_event_t *pev, uintptr_t object, void *user,
583 * int (*port_callback)(void *, int *, pid_t, int, void *), void *sysarg);
584 * This function initializes most of the "wired" elements of the port
585 * event structure. This is normally being used just after the allocation
586 * of the port event structure.
587 * pkevp : pointer to the port event structure
588 * object : object associated with this event structure
589 * user : user defined pointer delivered with the association function
590 * port_callback:
591 * Address of the callback function which will be called
592 * - just before the event is delivered to the application.
593 * The callback function is called in user context and can be
594 * used for copyouts, e.g.
595 * - on close() or dissociation of the event. The sub-system
596 * must remove immediately every existing association of
597 * some object with this event.
598 * sysarg : event source propietary data
599 */
600 void
601 port_init_event(port_kevent_t *pkevp, uintptr_t object, void *user,
602 int (*port_callback)(void *, int *, pid_t, int, void *),
603 void *sysarg)
604 {
605 pkevp->portkev_object = object;
606 pkevp->portkev_user = user;
607 pkevp->portkev_callback = port_callback;
608 pkevp->portkev_arg = sysarg;
609 }
610
611 /*
612 * This routine removes a portfd_t from the fd cache's hash table.
613 */
614 void
615 port_pcache_remove_fd(port_fdcache_t *pcp, portfd_t *pfd)
616 {
617 polldat_t *lpdp;
618 polldat_t *cpdp;
619 portfd_t **bucket;
620 polldat_t *pdp = PFTOD(pfd);
621
622 ASSERT(MUTEX_HELD(&pcp->pc_lock));
623 bucket = PORT_FD_BUCKET(pcp, pdp->pd_fd);
624 cpdp = PFTOD(*bucket);
625 if (pdp == cpdp) {
626 *bucket = PDTOF(pdp->pd_hashnext);
627 if (--pcp->pc_fdcount == 0) {
628 /*
629 * signal the thread which may have blocked in
630 * port_close_sourcefd() on lastclose waiting
631 * for pc_fdcount to drop to 0.
632 */
633 cv_signal(&pcp->pc_lclosecv);
634 }
635 kmem_free(pfd, sizeof (portfd_t));
636 return;
637 }
638
639 while (cpdp != NULL) {
640 lpdp = cpdp;
641 cpdp = cpdp->pd_hashnext;
642 if (cpdp == pdp) {
643 /* polldat struct found */
644 lpdp->pd_hashnext = pdp->pd_hashnext;
645 if (--pcp->pc_fdcount == 0) {
646 /*
647 * signal the thread which may have blocked in
648 * port_close_sourcefd() on lastclose waiting
649 * for pc_fdcount to drop to 0.
650 */
651 cv_signal(&pcp->pc_lclosecv);
652 }
653 break;
654 }
655 }
656 ASSERT(cpdp != NULL);
657 kmem_free(pfd, sizeof (portfd_t));
658 }
659
660 /*
661 * The port_push_eventq() function is used to move all remaining events
662 * from the temporary queue used in port_get(n)() to the standard port
663 * queue.
664 */
665 void
666 port_push_eventq(port_queue_t *portq)
667 {
668 /*
669 * Append temporary portq_get_list to the port queue. On return
670 * the temporary portq_get_list is empty.
671 */
672 list_move_tail(&portq->portq_list, &portq->portq_get_list);
673 portq->portq_nent += portq->portq_tnent;
674 portq->portq_tnent = 0;
675 }
676
677 /*
678 * The port_remove_fd_object() function frees all resources associated with
679 * delivered portfd_t structure. Returns 1 if the port_kevent was found
680 * and removed from the port queue.
681 */
682 int
683 port_remove_fd_object(portfd_t *pfd, port_t *pp, port_fdcache_t *pcp)
684 {
685 port_queue_t *portq;
686 polldat_t *pdp = PFTOD(pfd);
687 port_kevent_t *pkevp;
688 int error;
689 int removed = 0;
690
691 ASSERT(MUTEX_HELD(&pcp->pc_lock));
692 if (pdp->pd_php != NULL) {
693 pollhead_delete(pdp->pd_php, pdp);
694 pdp->pd_php = NULL;
695 }
696 pkevp = pdp->pd_portev;
697 portq = &pp->port_queue;
698 mutex_enter(&portq->portq_mutex);
699 port_block(portq);
700 if (pkevp->portkev_flags & PORT_KEV_DONEQ) {
701 if (portq->portq_getn && portq->portq_tnent) {
702 /*
703 * move events from the temporary "get" queue
704 * back to the port queue
705 */
706 port_push_eventq(portq);
707 }
708 /* cleanup merged port queue */
709 port_remove_event_doneq(pkevp, portq);
710 removed = 1;
711 }
712 port_unblock(portq);
713 mutex_exit(&portq->portq_mutex);
714 if (pkevp->portkev_callback) {
715 (void) (*pkevp->portkev_callback)(pkevp->portkev_arg,
716 &error, pkevp->portkev_pid, PORT_CALLBACK_DISSOCIATE,
717 pkevp);
718 }
719 port_free_event_local(pkevp, B_TRUE);
720
721 /* remove polldat struct */
722 port_pcache_remove_fd(pcp, pfd);
723 return (removed);
724 }
725
726 /*
727 * The port_close_fd() function dissociates a file descriptor from a port
728 * and removes all allocated resources.
729 * close(2) detects in the uf_entry_t structure that the fd is associated
730 * with a port (at least one port).
731 * The fd can be associated with several ports.
732 */
733 void
734 port_close_pfd(portfd_t *pfd)
735 {
736 port_t *pp;
737 port_fdcache_t *pcp;
738
739 /*
740 * the portfd_t passed in should be for this proc.
741 */
742 ASSERT(curproc->p_pid == PFTOD(pfd)->pd_portev->portkev_pid);
743 pp = PFTOD(pfd)->pd_portev->portkev_port;
744 pcp = pp->port_queue.portq_pcp;
745 mutex_enter(&pcp->pc_lock);
746 (void) port_remove_fd_object(pfd, pp, pcp);
747 mutex_exit(&pcp->pc_lock);
748 }
749
750 /*
751 * The port_associate_ksource() function associates an event source with a port.
752 * On port_close() all associated sources are requested to free all local
753 * resources associated with the event port.
754 * The association of a source with a port can only be done one time. Further
755 * calls of this function will only increment the reference counter.
756 * The allocated port_source_t structure is removed from the port as soon as
757 * the reference counter becomes 0.
758 */
759 /* ARGSUSED */
760 int
761 port_associate_ksource(int port, int source, port_source_t **portsrc,
762 void (*port_src_close)(void *, int, pid_t, int), void *arg,
763 int (*port_src_associate)(port_kevent_t *, int, int, uintptr_t, void *))
764 {
765 port_t *pp;
766 file_t *fp;
767 port_source_t **ps;
768 port_source_t *pse;
769
770 if ((fp = getf(port)) == NULL)
771 return (EBADF);
772
773 if (fp->f_vnode->v_type != VPORT) {
774 releasef(port);
775 return (EBADFD);
776 }
777 pp = VTOEP(fp->f_vnode);
778
779 mutex_enter(&pp->port_queue.portq_source_mutex);
780 if (pp->port_queue.portq_scache == NULL) {
781 /*
782 * This event-port is mid-close.
783 * By the time portq_scache is freed, PORTQ_CLOSE was set.
784 */
785 ASSERT(pp->port_queue.portq_flags & PORTQ_CLOSE);
786 mutex_exit(&pp->port_queue.portq_source_mutex);
787 releasef(port);
788 /* Equivalent of a bad file descriptor at this point. */
789 return (EBADFD);
790 }
791 ps = &pp->port_queue.portq_scache[PORT_SHASH(source)];
792 for (pse = *ps; pse != NULL; pse = pse->portsrc_next) {
793 if (pse->portsrc_source == source)
794 break;
795 }
796
797 if (pse == NULL) {
798 /* Create association of the event source with the port */
799 pse = kmem_zalloc(sizeof (port_source_t), KM_NOSLEEP);
800 if (pse == NULL) {
801 mutex_exit(&pp->port_queue.portq_source_mutex);
802 releasef(port);
803 return (ENOMEM);
804 }
805 pse->portsrc_source = source;
806 pse->portsrc_close = port_src_close;
807 pse->portsrc_closearg = arg;
808 pse->portsrc_cnt = 1;
809 if (*ps)
810 pse->portsrc_next = (*ps)->portsrc_next;
811 *ps = pse;
812 } else {
813 /* entry already available, source is only requesting count */
814 pse->portsrc_cnt++;
815 }
816 mutex_exit(&pp->port_queue.portq_source_mutex);
817 releasef(port);
818 if (portsrc)
819 *portsrc = pse;
820 return (0);
821 }
822
823 /*
824 * The port_dissociate_ksource() function dissociates an event source from
825 * a port.
826 */
827 int
828 port_dissociate_ksource(int port, int source, port_source_t *ps)
829 {
830 port_t *pp;
831 file_t *fp;
832 port_source_t **psh;
833
834 if (ps == NULL)
835 return (EINVAL);
836
837 if ((fp = getf(port)) == NULL)
838 return (EBADF);
839
840 if (fp->f_vnode->v_type != VPORT) {
841 releasef(port);
842 return (EBADFD);
843 }
844 pp = VTOEP(fp->f_vnode);
845
846 mutex_enter(&pp->port_queue.portq_source_mutex);
847 if (--ps->portsrc_cnt == 0) {
848 /* last association removed -> free source structure */
849 if (ps->portsrc_prev == NULL) {
850 /* first entry */
851 psh = &pp->port_queue.portq_scache[PORT_SHASH(source)];
852 *psh = ps->portsrc_next;
853 if (ps->portsrc_next)
854 ps->portsrc_next->portsrc_prev = NULL;
855 } else {
856 ps->portsrc_prev->portsrc_next = ps->portsrc_next;
857 if (ps->portsrc_next)
858 ps->portsrc_next->portsrc_prev =
859 ps->portsrc_prev;
860 }
861 kmem_free(ps, sizeof (port_source_t));
862 }
863 mutex_exit(&pp->port_queue.portq_source_mutex);
864 releasef(port);
865 return (0);
866 }
867
868 void
869 free_fopdata(vnode_t *vp)
870 {
871 portfop_vp_t *pvp;
872 pvp = vp->v_fopdata;
873 ASSERT(pvp->pvp_femp == NULL);
874 mutex_destroy(&pvp->pvp_mutex);
875 list_destroy(&pvp->pvp_pfoplist);
876 kmem_free(pvp, sizeof (*pvp));
877 vp->v_fopdata = NULL;
878 }