Print this page
re #13613 rb4516 Tunables needs volatile keyword
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/timer.c
+++ new/usr/src/uts/common/os/timer.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
29 30 */
30 31
31 32 #include <sys/timer.h>
32 33 #include <sys/systm.h>
33 34 #include <sys/param.h>
34 35 #include <sys/kmem.h>
35 36 #include <sys/debug.h>
36 37 #include <sys/policy.h>
37 38 #include <sys/port_impl.h>
38 39 #include <sys/port_kernel.h>
39 40 #include <sys/contract/process_impl.h>
40 41
41 42 static kmem_cache_t *clock_timer_cache;
42 43 static clock_backend_t *clock_backend[CLOCK_MAX];
43 44 static int timer_port_callback(void *, int *, pid_t, int, void *);
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
44 45 static void timer_close_port(void *, int, pid_t, int);
45 46
46 47 #define CLOCK_BACKEND(clk) \
47 48 ((clk) < CLOCK_MAX && (clk) >= 0 ? clock_backend[(clk)] : NULL)
48 49
49 50 /*
50 51 * Tunable to increase the maximum number of POSIX timers per-process. This
51 52 * may _only_ be tuned in /etc/system or by patching the kernel binary; it
52 53 * _cannot_ be tuned on a running system.
53 54 */
54 -int timer_max = _TIMER_MAX;
55 +volatile int timer_max = _TIMER_MAX;
55 56
56 57 /*
57 58 * timer_lock() locks the specified interval timer. It doesn't look at the
58 59 * ITLK_REMOVE bit; it's up to callers to look at this if they need to
59 60 * care. p_lock must be held on entry; it may be dropped and reaquired,
60 61 * but timer_lock() will always return with p_lock held.
61 62 *
62 63 * Note that timer_create() doesn't call timer_lock(); it creates timers
63 64 * with the ITLK_LOCKED bit explictly set.
64 65 */
65 66 static void
66 67 timer_lock(proc_t *p, itimer_t *it)
67 68 {
68 69 ASSERT(MUTEX_HELD(&p->p_lock));
69 70
70 71 while (it->it_lock & ITLK_LOCKED) {
71 72 it->it_blockers++;
72 73 cv_wait(&it->it_cv, &p->p_lock);
73 74 it->it_blockers--;
74 75 }
75 76
76 77 it->it_lock |= ITLK_LOCKED;
77 78 }
78 79
79 80 /*
80 81 * timer_unlock() unlocks the specified interval timer, waking up any
81 82 * waiters. p_lock must be held on entry; it will not be dropped by
82 83 * timer_unlock().
83 84 */
84 85 static void
85 86 timer_unlock(proc_t *p, itimer_t *it)
86 87 {
87 88 ASSERT(MUTEX_HELD(&p->p_lock));
88 89 ASSERT(it->it_lock & ITLK_LOCKED);
89 90 it->it_lock &= ~ITLK_LOCKED;
90 91 cv_signal(&it->it_cv);
91 92 }
92 93
93 94 /*
94 95 * timer_delete_locked() takes a proc pointer, timer ID and locked interval
95 96 * timer, and deletes the specified timer. It must be called with p_lock
96 97 * held, and cannot be called on a timer which already has ITLK_REMOVE set;
97 98 * the caller must check this. timer_delete_locked() will set the ITLK_REMOVE
98 99 * bit and will iteratively unlock and lock the interval timer until all
99 100 * blockers have seen the ITLK_REMOVE and cleared out. It will then zero
100 101 * out the specified entry in the p_itimer array, and call into the clock
101 102 * backend to complete the deletion.
102 103 *
103 104 * This function will always return with p_lock held.
104 105 */
105 106 static void
106 107 timer_delete_locked(proc_t *p, timer_t tid, itimer_t *it)
107 108 {
108 109 ASSERT(MUTEX_HELD(&p->p_lock));
109 110 ASSERT(!(it->it_lock & ITLK_REMOVE));
110 111 ASSERT(it->it_lock & ITLK_LOCKED);
111 112
112 113 it->it_lock |= ITLK_REMOVE;
113 114
114 115 /*
115 116 * If there are threads waiting to lock this timer, we'll unlock
116 117 * the timer, and block on the cv. Threads blocking our removal will
117 118 * have the opportunity to run; when they see the ITLK_REMOVE flag
118 119 * set, they will immediately unlock the timer.
119 120 */
120 121 while (it->it_blockers) {
121 122 timer_unlock(p, it);
122 123 cv_wait(&it->it_cv, &p->p_lock);
123 124 timer_lock(p, it);
124 125 }
125 126
126 127 ASSERT(p->p_itimer[tid] == it);
127 128 p->p_itimer[tid] = NULL;
128 129
129 130 /*
130 131 * No one is blocked on this timer, and no one will be (we've set
131 132 * p_itimer[tid] to be NULL; no one can find it). Now we call into
132 133 * the clock backend to delete the timer; it is up to the backend to
133 134 * guarantee that timer_fire() has completed (and will never again
134 135 * be called) for this timer.
135 136 */
136 137 mutex_exit(&p->p_lock);
137 138
138 139 it->it_backend->clk_timer_delete(it);
139 140
140 141 if (it->it_portev) {
141 142 mutex_enter(&it->it_mutex);
142 143 if (it->it_portev) {
143 144 port_kevent_t *pev;
144 145 /* dissociate timer from the event port */
145 146 (void) port_dissociate_ksource(it->it_portfd,
146 147 PORT_SOURCE_TIMER, (port_source_t *)it->it_portsrc);
147 148 pev = (port_kevent_t *)it->it_portev;
148 149 it->it_portev = NULL;
149 150 it->it_flags &= ~IT_PORT;
150 151 it->it_pending = 0;
151 152 mutex_exit(&it->it_mutex);
152 153 (void) port_remove_done_event(pev);
153 154 port_free_event(pev);
154 155 } else {
155 156 mutex_exit(&it->it_mutex);
156 157 }
157 158 }
158 159
159 160 mutex_enter(&p->p_lock);
160 161
161 162 /*
162 163 * We need to be careful freeing the sigqueue for this timer;
163 164 * if a signal is pending, the sigqueue needs to be freed
164 165 * synchronously in siginfofree(). The need to free the sigqueue
165 166 * in siginfofree() is indicated by setting sq_func to NULL.
166 167 */
167 168 if (it->it_pending > 0) {
168 169 it->it_sigq->sq_func = NULL;
169 170 } else {
170 171 kmem_free(it->it_sigq, sizeof (sigqueue_t));
171 172 }
172 173
173 174 ASSERT(it->it_blockers == 0);
174 175 kmem_cache_free(clock_timer_cache, it);
175 176 }
176 177
177 178 /*
178 179 * timer_grab() and its companion routine, timer_release(), are wrappers
179 180 * around timer_lock()/_unlock() which allow the timer_*(3R) routines to
180 181 * (a) share error handling code and (b) not grab p_lock themselves. Routines
181 182 * which are called with p_lock held (e.g. timer_lwpbind(), timer_lwpexit())
182 183 * must call timer_lock()/_unlock() explictly.
183 184 *
184 185 * timer_grab() takes a proc and a timer ID, and returns a pointer to a
185 186 * locked interval timer. p_lock must _not_ be held on entry; timer_grab()
186 187 * may acquire p_lock, but will always return with p_lock dropped.
187 188 *
188 189 * If timer_grab() fails, it will return NULL. timer_grab() will fail if
189 190 * one or more of the following is true:
190 191 *
191 192 * (a) The specified timer ID is out of range.
192 193 *
193 194 * (b) The specified timer ID does not correspond to a timer ID returned
194 195 * from timer_create(3R).
195 196 *
196 197 * (c) The specified timer ID is currently being removed.
197 198 *
198 199 */
199 200 static itimer_t *
200 201 timer_grab(proc_t *p, timer_t tid)
201 202 {
202 203 itimer_t **itp, *it;
203 204
204 205 if (tid >= timer_max || tid < 0)
205 206 return (NULL);
206 207
207 208 mutex_enter(&p->p_lock);
208 209
209 210 if ((itp = p->p_itimer) == NULL || (it = itp[tid]) == NULL) {
210 211 mutex_exit(&p->p_lock);
211 212 return (NULL);
212 213 }
213 214
214 215 timer_lock(p, it);
215 216
216 217 if (it->it_lock & ITLK_REMOVE) {
217 218 /*
218 219 * Someone is removing this timer; it will soon be invalid.
219 220 */
220 221 timer_unlock(p, it);
221 222 mutex_exit(&p->p_lock);
222 223 return (NULL);
223 224 }
224 225
225 226 mutex_exit(&p->p_lock);
226 227
227 228 return (it);
228 229 }
229 230
230 231 /*
231 232 * timer_release() releases a timer acquired with timer_grab(). p_lock
232 233 * should not be held on entry; timer_release() will acquire p_lock but
233 234 * will drop it before returning.
234 235 */
235 236 static void
236 237 timer_release(proc_t *p, itimer_t *it)
237 238 {
238 239 mutex_enter(&p->p_lock);
239 240 timer_unlock(p, it);
240 241 mutex_exit(&p->p_lock);
241 242 }
242 243
243 244 /*
244 245 * timer_delete_grabbed() deletes a timer acquired with timer_grab().
245 246 * p_lock should not be held on entry; timer_delete_grabbed() will acquire
246 247 * p_lock, but will drop it before returning.
247 248 */
248 249 static void
249 250 timer_delete_grabbed(proc_t *p, timer_t tid, itimer_t *it)
250 251 {
251 252 mutex_enter(&p->p_lock);
252 253 timer_delete_locked(p, tid, it);
253 254 mutex_exit(&p->p_lock);
254 255 }
255 256
256 257 void
257 258 clock_timer_init()
258 259 {
259 260 clock_timer_cache = kmem_cache_create("timer_cache",
260 261 sizeof (itimer_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
261 262 }
262 263
263 264 void
264 265 clock_add_backend(clockid_t clock, clock_backend_t *backend)
265 266 {
266 267 ASSERT(clock >= 0 && clock < CLOCK_MAX);
267 268 ASSERT(clock_backend[clock] == NULL);
268 269
269 270 clock_backend[clock] = backend;
270 271 }
271 272
272 273 clock_backend_t *
273 274 clock_get_backend(clockid_t clock)
274 275 {
275 276 if (clock < 0 || clock >= CLOCK_MAX)
276 277 return (NULL);
277 278
278 279 return (clock_backend[clock]);
279 280 }
280 281
281 282 int
282 283 clock_settime(clockid_t clock, timespec_t *tp)
283 284 {
284 285 timespec_t t;
285 286 clock_backend_t *backend;
286 287 int error;
287 288
288 289 if ((backend = CLOCK_BACKEND(clock)) == NULL)
289 290 return (set_errno(EINVAL));
290 291
291 292 if (secpolicy_settime(CRED()) != 0)
292 293 return (set_errno(EPERM));
293 294
294 295 if (get_udatamodel() == DATAMODEL_NATIVE) {
295 296 if (copyin(tp, &t, sizeof (timespec_t)) != 0)
296 297 return (set_errno(EFAULT));
297 298 } else {
298 299 timespec32_t t32;
299 300
300 301 if (copyin(tp, &t32, sizeof (timespec32_t)) != 0)
301 302 return (set_errno(EFAULT));
302 303
303 304 TIMESPEC32_TO_TIMESPEC(&t, &t32);
304 305 }
305 306
306 307 if (itimerspecfix(&t))
307 308 return (set_errno(EINVAL));
308 309
309 310 error = backend->clk_clock_settime(&t);
310 311
311 312 if (error)
312 313 return (set_errno(error));
313 314
314 315 return (0);
315 316 }
316 317
317 318 int
318 319 clock_gettime(clockid_t clock, timespec_t *tp)
319 320 {
320 321 timespec_t t;
321 322 clock_backend_t *backend;
322 323 int error;
323 324
324 325 if ((backend = CLOCK_BACKEND(clock)) == NULL)
325 326 return (set_errno(EINVAL));
326 327
327 328 error = backend->clk_clock_gettime(&t);
328 329
329 330 if (error)
330 331 return (set_errno(error));
331 332
332 333 if (get_udatamodel() == DATAMODEL_NATIVE) {
333 334 if (copyout(&t, tp, sizeof (timespec_t)) != 0)
334 335 return (set_errno(EFAULT));
335 336 } else {
336 337 timespec32_t t32;
337 338
338 339 if (TIMESPEC_OVERFLOW(&t))
339 340 return (set_errno(EOVERFLOW));
340 341 TIMESPEC_TO_TIMESPEC32(&t32, &t);
341 342
342 343 if (copyout(&t32, tp, sizeof (timespec32_t)) != 0)
343 344 return (set_errno(EFAULT));
344 345 }
345 346
346 347 return (0);
347 348 }
348 349
349 350 int
350 351 clock_getres(clockid_t clock, timespec_t *tp)
351 352 {
352 353 timespec_t t;
353 354 clock_backend_t *backend;
354 355 int error;
355 356
356 357 /*
357 358 * Strangely, the standard defines clock_getres() with a NULL tp
358 359 * to do nothing (regardless of the validity of the specified
359 360 * clock_id). Go figure.
360 361 */
361 362 if (tp == NULL)
362 363 return (0);
363 364
364 365 if ((backend = CLOCK_BACKEND(clock)) == NULL)
365 366 return (set_errno(EINVAL));
366 367
367 368 error = backend->clk_clock_getres(&t);
368 369
369 370 if (error)
370 371 return (set_errno(error));
371 372
372 373 if (get_udatamodel() == DATAMODEL_NATIVE) {
373 374 if (copyout(&t, tp, sizeof (timespec_t)) != 0)
374 375 return (set_errno(EFAULT));
375 376 } else {
376 377 timespec32_t t32;
377 378
378 379 if (TIMESPEC_OVERFLOW(&t))
379 380 return (set_errno(EOVERFLOW));
380 381 TIMESPEC_TO_TIMESPEC32(&t32, &t);
381 382
382 383 if (copyout(&t32, tp, sizeof (timespec32_t)) != 0)
383 384 return (set_errno(EFAULT));
384 385 }
385 386
386 387 return (0);
387 388 }
388 389
389 390 void
390 391 timer_signal(sigqueue_t *sigq)
391 392 {
392 393 itimer_t *it = (itimer_t *)sigq->sq_backptr;
393 394
394 395 /*
395 396 * There are some conditions during a fork or an exit when we can
396 397 * call siginfofree() without p_lock held. To prevent a race
397 398 * between timer_signal() and timer_fire() with regard to it_pending,
398 399 * we therefore acquire it_mutex in both paths.
399 400 */
400 401 mutex_enter(&it->it_mutex);
401 402 ASSERT(it->it_pending > 0);
402 403 it->it_overrun = it->it_pending - 1;
403 404 it->it_pending = 0;
404 405 mutex_exit(&it->it_mutex);
405 406 }
406 407
407 408 /*
408 409 * This routine is called from the clock backend.
409 410 */
410 411 static void
411 412 timer_fire(itimer_t *it)
412 413 {
413 414 proc_t *p;
414 415 int proc_lock_held;
415 416
416 417 if (it->it_flags & IT_SIGNAL) {
417 418 /*
418 419 * See the comment in timer_signal() for why it is not
419 420 * sufficient to only grab p_lock here. Because p_lock can be
420 421 * held on entry to timer_signal(), the lock ordering is
421 422 * necessarily p_lock before it_mutex.
422 423 */
423 424
424 425 p = it->it_proc;
425 426 proc_lock_held = 1;
426 427 mutex_enter(&p->p_lock);
427 428 } else {
428 429 /*
429 430 * IT_PORT:
430 431 * If a timer was ever programmed to send events to a port,
431 432 * the IT_PORT flag will remain set until:
432 433 * a) the timer is deleted (see timer_delete_locked()) or
433 434 * b) the port is being closed (see timer_close_port()).
434 435 * Both cases are synchronized with the it_mutex.
435 436 * We don't need to use the p_lock because it is only
436 437 * required in the IT_SIGNAL case.
437 438 * If IT_PORT was set and the port is being closed then
438 439 * the timer notification is set to NONE. In such a case
439 440 * the timer itself and the it_pending counter remain active
440 441 * until the application deletes the counter or the process
441 442 * exits.
442 443 */
443 444 proc_lock_held = 0;
444 445 }
445 446 mutex_enter(&it->it_mutex);
446 447
447 448 if (it->it_pending > 0) {
448 449 if (it->it_pending < INT_MAX)
449 450 it->it_pending++;
450 451 mutex_exit(&it->it_mutex);
451 452 } else {
452 453 if (it->it_flags & IT_PORT) {
453 454 it->it_pending = 1;
454 455 port_send_event((port_kevent_t *)it->it_portev);
455 456 mutex_exit(&it->it_mutex);
456 457 } else if (it->it_flags & IT_SIGNAL) {
457 458 it->it_pending = 1;
458 459 mutex_exit(&it->it_mutex);
459 460 sigaddqa(p, NULL, it->it_sigq);
460 461 } else {
461 462 mutex_exit(&it->it_mutex);
462 463 }
463 464 }
464 465
465 466 if (proc_lock_held)
466 467 mutex_exit(&p->p_lock);
467 468 }
468 469
469 470 int
470 471 timer_create(clockid_t clock, struct sigevent *evp, timer_t *tid)
471 472 {
472 473 struct sigevent ev;
473 474 proc_t *p = curproc;
474 475 clock_backend_t *backend;
475 476 itimer_t *it, **itp;
476 477 sigqueue_t *sigq;
477 478 cred_t *cr = CRED();
478 479 int error = 0;
479 480 timer_t i;
480 481 port_notify_t tim_pnevp;
481 482 port_kevent_t *pkevp = NULL;
482 483
483 484 if ((backend = CLOCK_BACKEND(clock)) == NULL)
484 485 return (set_errno(EINVAL));
485 486
486 487 if (evp != NULL) {
487 488 /*
488 489 * short copyin() for binary compatibility
489 490 * fetch oldsigevent to determine how much to copy in.
490 491 */
491 492 if (get_udatamodel() == DATAMODEL_NATIVE) {
492 493 if (copyin(evp, &ev, sizeof (struct oldsigevent)))
493 494 return (set_errno(EFAULT));
494 495
495 496 if (ev.sigev_notify == SIGEV_PORT ||
496 497 ev.sigev_notify == SIGEV_THREAD) {
497 498 if (copyin(ev.sigev_value.sival_ptr, &tim_pnevp,
498 499 sizeof (port_notify_t)))
499 500 return (set_errno(EFAULT));
500 501 }
501 502 #ifdef _SYSCALL32_IMPL
502 503 } else {
503 504 struct sigevent32 ev32;
504 505 port_notify32_t tim_pnevp32;
505 506
506 507 if (copyin(evp, &ev32, sizeof (struct oldsigevent32)))
507 508 return (set_errno(EFAULT));
508 509 ev.sigev_notify = ev32.sigev_notify;
509 510 ev.sigev_signo = ev32.sigev_signo;
510 511 /*
511 512 * See comment in sigqueue32() on handling of 32-bit
512 513 * sigvals in a 64-bit kernel.
513 514 */
514 515 ev.sigev_value.sival_int = ev32.sigev_value.sival_int;
515 516 if (ev.sigev_notify == SIGEV_PORT ||
516 517 ev.sigev_notify == SIGEV_THREAD) {
517 518 if (copyin((void *)(uintptr_t)
518 519 ev32.sigev_value.sival_ptr,
519 520 (void *)&tim_pnevp32,
520 521 sizeof (port_notify32_t)))
521 522 return (set_errno(EFAULT));
522 523 tim_pnevp.portnfy_port =
523 524 tim_pnevp32.portnfy_port;
524 525 tim_pnevp.portnfy_user =
525 526 (void *)(uintptr_t)tim_pnevp32.portnfy_user;
526 527 }
527 528 #endif
528 529 }
529 530 switch (ev.sigev_notify) {
530 531 case SIGEV_NONE:
531 532 break;
532 533 case SIGEV_SIGNAL:
533 534 if (ev.sigev_signo < 1 || ev.sigev_signo >= NSIG)
534 535 return (set_errno(EINVAL));
535 536 break;
536 537 case SIGEV_THREAD:
537 538 case SIGEV_PORT:
538 539 break;
539 540 default:
540 541 return (set_errno(EINVAL));
541 542 }
542 543 } else {
543 544 /*
544 545 * Use the clock's default sigevent (this is a structure copy).
545 546 */
546 547 ev = backend->clk_default;
547 548 }
548 549
549 550 /*
550 551 * We'll allocate our timer and sigqueue now, before we grab p_lock.
551 552 * If we can't find an empty slot, we'll free them before returning.
552 553 */
553 554 it = kmem_cache_alloc(clock_timer_cache, KM_SLEEP);
554 555 bzero(it, sizeof (itimer_t));
555 556 mutex_init(&it->it_mutex, NULL, MUTEX_DEFAULT, NULL);
556 557 sigq = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
557 558
558 559 mutex_enter(&p->p_lock);
559 560
560 561 /*
561 562 * If this is this process' first timer, we need to attempt to allocate
562 563 * an array of timerstr_t pointers. We drop p_lock to perform the
563 564 * allocation; if we return to discover that p_itimer is non-NULL,
564 565 * we will free our allocation and drive on.
565 566 */
566 567 if ((itp = p->p_itimer) == NULL) {
567 568 mutex_exit(&p->p_lock);
568 569 itp = kmem_zalloc(timer_max * sizeof (itimer_t *), KM_SLEEP);
569 570 mutex_enter(&p->p_lock);
570 571
571 572 if (p->p_itimer == NULL)
572 573 p->p_itimer = itp;
573 574 else {
574 575 kmem_free(itp, timer_max * sizeof (itimer_t *));
575 576 itp = p->p_itimer;
576 577 }
577 578 }
578 579
579 580 for (i = 0; i < timer_max && itp[i] != NULL; i++)
580 581 continue;
581 582
582 583 if (i == timer_max) {
583 584 /*
584 585 * We couldn't find a slot. Drop p_lock, free the preallocated
585 586 * timer and sigqueue, and return an error.
586 587 */
587 588 mutex_exit(&p->p_lock);
588 589 kmem_cache_free(clock_timer_cache, it);
589 590 kmem_free(sigq, sizeof (sigqueue_t));
590 591
591 592 return (set_errno(EAGAIN));
592 593 }
593 594
594 595 ASSERT(i < timer_max && itp[i] == NULL);
595 596
596 597 /*
597 598 * If we develop other notification mechanisms, this will need
598 599 * to call into (yet another) backend.
599 600 */
600 601 sigq->sq_info.si_signo = ev.sigev_signo;
601 602 if (evp == NULL)
602 603 sigq->sq_info.si_value.sival_int = i;
603 604 else
604 605 sigq->sq_info.si_value = ev.sigev_value;
605 606 sigq->sq_info.si_code = SI_TIMER;
606 607 sigq->sq_info.si_pid = p->p_pid;
607 608 sigq->sq_info.si_ctid = PRCTID(p);
608 609 sigq->sq_info.si_zoneid = getzoneid();
609 610 sigq->sq_info.si_uid = crgetruid(cr);
610 611 sigq->sq_func = timer_signal;
611 612 sigq->sq_next = NULL;
612 613 sigq->sq_backptr = it;
613 614 it->it_sigq = sigq;
614 615 it->it_backend = backend;
615 616 it->it_lock = ITLK_LOCKED;
616 617 itp[i] = it;
617 618
618 619
619 620 if (ev.sigev_notify == SIGEV_THREAD ||
620 621 ev.sigev_notify == SIGEV_PORT) {
621 622 int port;
622 623
623 624 /*
624 625 * This timer is programmed to use event port notification when
625 626 * the timer fires:
626 627 * - allocate a port event structure and prepare it to be sent
627 628 * to the port as soon as the timer fires.
628 629 * - when the timer fires :
629 630 * - if event structure was already sent to the port then this
630 631 * is a timer fire overflow => increment overflow counter.
631 632 * - otherwise send pre-allocated event structure to the port.
632 633 * - the events field of the port_event_t structure counts the
633 634 * number of timer fired events.
634 635 * - The event structured is allocated using the
635 636 * PORT_ALLOC_CACHED flag.
636 637 * This flag indicates that the timer itself will manage and
637 638 * free the event structure when required.
638 639 */
639 640
640 641 it->it_flags |= IT_PORT;
641 642 port = tim_pnevp.portnfy_port;
642 643
643 644 /* associate timer as event source with the port */
644 645 error = port_associate_ksource(port, PORT_SOURCE_TIMER,
645 646 (port_source_t **)&it->it_portsrc, timer_close_port,
646 647 (void *)it, NULL);
647 648 if (error) {
648 649 itp[i] = NULL; /* clear slot */
649 650 mutex_exit(&p->p_lock);
650 651 kmem_cache_free(clock_timer_cache, it);
651 652 kmem_free(sigq, sizeof (sigqueue_t));
652 653 return (set_errno(error));
653 654 }
654 655
655 656 /* allocate an event structure/slot */
656 657 error = port_alloc_event(port, PORT_ALLOC_SCACHED,
657 658 PORT_SOURCE_TIMER, &pkevp);
658 659 if (error) {
659 660 (void) port_dissociate_ksource(port, PORT_SOURCE_TIMER,
660 661 (port_source_t *)it->it_portsrc);
661 662 itp[i] = NULL; /* clear slot */
662 663 mutex_exit(&p->p_lock);
663 664 kmem_cache_free(clock_timer_cache, it);
664 665 kmem_free(sigq, sizeof (sigqueue_t));
665 666 return (set_errno(error));
666 667 }
667 668
668 669 /* initialize event data */
669 670 port_init_event(pkevp, i, tim_pnevp.portnfy_user,
670 671 timer_port_callback, it);
671 672 it->it_portev = pkevp;
672 673 it->it_portfd = port;
673 674 } else {
674 675 if (ev.sigev_notify == SIGEV_SIGNAL)
675 676 it->it_flags |= IT_SIGNAL;
676 677 }
677 678
678 679 mutex_exit(&p->p_lock);
679 680
680 681 /*
681 682 * Call on the backend to verify the event argument (or return
682 683 * EINVAL if this clock type does not support timers).
683 684 */
684 685 if ((error = backend->clk_timer_create(it, timer_fire)) != 0)
685 686 goto err;
686 687
687 688 it->it_lwp = ttolwp(curthread);
688 689 it->it_proc = p;
689 690
690 691 if (copyout(&i, tid, sizeof (timer_t)) != 0) {
691 692 error = EFAULT;
692 693 goto err;
693 694 }
694 695
695 696 /*
696 697 * If we're here, then we have successfully created the timer; we
697 698 * just need to release the timer and return.
698 699 */
699 700 timer_release(p, it);
700 701
701 702 return (0);
702 703
703 704 err:
704 705 /*
705 706 * If we're here, an error has occurred late in the timer creation
706 707 * process. We need to regrab p_lock, and delete the incipient timer.
707 708 * Since we never unlocked the timer (it was born locked), it's
708 709 * impossible for a removal to be pending.
709 710 */
710 711 ASSERT(!(it->it_lock & ITLK_REMOVE));
711 712 timer_delete_grabbed(p, i, it);
712 713
713 714 return (set_errno(error));
714 715 }
715 716
716 717 int
717 718 timer_gettime(timer_t tid, itimerspec_t *val)
718 719 {
719 720 proc_t *p = curproc;
720 721 itimer_t *it;
721 722 itimerspec_t when;
722 723 int error;
723 724
724 725 if ((it = timer_grab(p, tid)) == NULL)
725 726 return (set_errno(EINVAL));
726 727
727 728 error = it->it_backend->clk_timer_gettime(it, &when);
728 729
729 730 timer_release(p, it);
730 731
731 732 if (error == 0) {
732 733 if (get_udatamodel() == DATAMODEL_NATIVE) {
733 734 if (copyout(&when, val, sizeof (itimerspec_t)))
734 735 error = EFAULT;
735 736 } else {
736 737 if (ITIMERSPEC_OVERFLOW(&when))
737 738 error = EOVERFLOW;
738 739 else {
739 740 itimerspec32_t w32;
740 741
741 742 ITIMERSPEC_TO_ITIMERSPEC32(&w32, &when)
742 743 if (copyout(&w32, val, sizeof (itimerspec32_t)))
743 744 error = EFAULT;
744 745 }
745 746 }
746 747 }
747 748
748 749 return (error ? set_errno(error) : 0);
749 750 }
750 751
751 752 int
752 753 timer_settime(timer_t tid, int flags, itimerspec_t *val, itimerspec_t *oval)
753 754 {
754 755 itimerspec_t when;
755 756 itimer_t *it;
756 757 proc_t *p = curproc;
757 758 int error;
758 759
759 760 if (oval != NULL) {
760 761 if ((error = timer_gettime(tid, oval)) != 0)
761 762 return (error);
762 763 }
763 764
764 765 if (get_udatamodel() == DATAMODEL_NATIVE) {
765 766 if (copyin(val, &when, sizeof (itimerspec_t)))
766 767 return (set_errno(EFAULT));
767 768 } else {
768 769 itimerspec32_t w32;
769 770
770 771 if (copyin(val, &w32, sizeof (itimerspec32_t)))
771 772 return (set_errno(EFAULT));
772 773
773 774 ITIMERSPEC32_TO_ITIMERSPEC(&when, &w32);
774 775 }
775 776
776 777 if (itimerspecfix(&when.it_value) ||
777 778 (itimerspecfix(&when.it_interval) &&
778 779 timerspecisset(&when.it_value))) {
779 780 return (set_errno(EINVAL));
780 781 }
781 782
782 783 if ((it = timer_grab(p, tid)) == NULL)
783 784 return (set_errno(EINVAL));
784 785
785 786 error = it->it_backend->clk_timer_settime(it, flags, &when);
786 787
787 788 timer_release(p, it);
788 789
789 790 return (error ? set_errno(error) : 0);
790 791 }
791 792
792 793 int
793 794 timer_delete(timer_t tid)
794 795 {
795 796 proc_t *p = curproc;
796 797 itimer_t *it;
797 798
798 799 if ((it = timer_grab(p, tid)) == NULL)
799 800 return (set_errno(EINVAL));
800 801
801 802 timer_delete_grabbed(p, tid, it);
802 803
803 804 return (0);
804 805 }
805 806
806 807 int
807 808 timer_getoverrun(timer_t tid)
808 809 {
809 810 int overrun;
810 811 proc_t *p = curproc;
811 812 itimer_t *it;
812 813
813 814 if ((it = timer_grab(p, tid)) == NULL)
814 815 return (set_errno(EINVAL));
815 816
816 817 /*
817 818 * The it_overrun field is protected by p_lock; we need to acquire
818 819 * it before looking at the value.
819 820 */
820 821 mutex_enter(&p->p_lock);
821 822 overrun = it->it_overrun;
822 823 mutex_exit(&p->p_lock);
823 824
824 825 timer_release(p, it);
825 826
826 827 return (overrun);
827 828 }
828 829
829 830 /*
830 831 * Entered/exited with p_lock held, but will repeatedly drop and regrab p_lock.
831 832 */
832 833 void
833 834 timer_lwpexit(void)
834 835 {
835 836 timer_t i;
836 837 proc_t *p = curproc;
837 838 klwp_t *lwp = ttolwp(curthread);
838 839 itimer_t *it, **itp;
839 840
840 841 ASSERT(MUTEX_HELD(&p->p_lock));
841 842
842 843 if ((itp = p->p_itimer) == NULL)
843 844 return;
844 845
845 846 for (i = 0; i < timer_max; i++) {
846 847 if ((it = itp[i]) == NULL)
847 848 continue;
848 849
849 850 timer_lock(p, it);
850 851
851 852 if ((it->it_lock & ITLK_REMOVE) || it->it_lwp != lwp) {
852 853 /*
853 854 * This timer is either being removed or it isn't
854 855 * associated with this lwp.
855 856 */
856 857 timer_unlock(p, it);
857 858 continue;
858 859 }
859 860
860 861 /*
861 862 * The LWP that created this timer is going away. To the user,
862 863 * our behavior here is explicitly undefined. We will simply
863 864 * null out the it_lwp field; if the LWP was bound to a CPU,
864 865 * the cyclic will stay bound to that CPU until the process
865 866 * exits.
866 867 */
867 868 it->it_lwp = NULL;
868 869 timer_unlock(p, it);
869 870 }
870 871 }
871 872
872 873 /*
873 874 * Called to notify of an LWP binding change. Entered/exited with p_lock
874 875 * held, but will repeatedly drop and regrab p_lock.
875 876 */
876 877 void
877 878 timer_lwpbind()
878 879 {
879 880 timer_t i;
880 881 proc_t *p = curproc;
881 882 klwp_t *lwp = ttolwp(curthread);
882 883 itimer_t *it, **itp;
883 884
884 885 ASSERT(MUTEX_HELD(&p->p_lock));
885 886
886 887 if ((itp = p->p_itimer) == NULL)
887 888 return;
888 889
889 890 for (i = 0; i < timer_max; i++) {
890 891 if ((it = itp[i]) == NULL)
891 892 continue;
892 893
893 894 timer_lock(p, it);
894 895
895 896 if (!(it->it_lock & ITLK_REMOVE) && it->it_lwp == lwp) {
896 897 /*
897 898 * Drop p_lock and jump into the backend.
898 899 */
899 900 mutex_exit(&p->p_lock);
900 901 it->it_backend->clk_timer_lwpbind(it);
901 902 mutex_enter(&p->p_lock);
902 903 }
903 904
904 905 timer_unlock(p, it);
905 906 }
906 907 }
907 908
908 909 /*
909 910 * This function should only be called if p_itimer is non-NULL.
910 911 */
911 912 void
912 913 timer_exit(void)
913 914 {
914 915 timer_t i;
915 916 proc_t *p = curproc;
916 917
917 918 ASSERT(p->p_itimer != NULL);
918 919
919 920 for (i = 0; i < timer_max; i++)
920 921 (void) timer_delete(i);
921 922
922 923 kmem_free(p->p_itimer, timer_max * sizeof (itimer_t *));
923 924 p->p_itimer = NULL;
924 925 }
925 926
926 927 /*
927 928 * timer_port_callback() is a callback function which is associated with the
928 929 * timer event and is activated just before the event is delivered to the user.
929 930 * The timer uses this function to update/set the overflow counter and
930 931 * to reenable the use of the event structure.
931 932 */
932 933
933 934 /* ARGSUSED */
934 935 static int
935 936 timer_port_callback(void *arg, int *events, pid_t pid, int flag, void *evp)
936 937 {
937 938 itimer_t *it = arg;
938 939
939 940 mutex_enter(&it->it_mutex);
940 941 if (curproc != it->it_proc) {
941 942 /* can not deliver timer events to another proc */
942 943 mutex_exit(&it->it_mutex);
943 944 return (EACCES);
944 945 }
945 946 *events = it->it_pending; /* 1 = 1 event, >1 # of overflows */
946 947 it->it_pending = 0; /* reinit overflow counter */
947 948 /*
948 949 * This function can also be activated when the port is being closed
949 950 * and a timer event is already submitted to the port.
950 951 * In such a case the event port framework will use the
951 952 * close-callback function to notify the events sources.
952 953 * The timer close-callback function is timer_close_port() which
953 954 * will free all allocated resources (including the allocated
954 955 * port event structure).
955 956 * For that reason we don't need to check the value of flag here.
956 957 */
957 958 mutex_exit(&it->it_mutex);
958 959 return (0);
959 960 }
960 961
961 962 /*
962 963 * port is being closed ... free all allocated port event structures
963 964 * The delivered arg currently correspond to the first timer associated with
964 965 * the port and it is not useable in this case.
965 966 * We have to scan the list of activated timers in the current proc and
966 967 * compare them with the delivered port id.
967 968 */
968 969
969 970 /* ARGSUSED */
970 971 static void
971 972 timer_close_port(void *arg, int port, pid_t pid, int lastclose)
972 973 {
973 974 proc_t *p = curproc;
974 975 timer_t tid;
975 976 itimer_t *it;
976 977
977 978 for (tid = 0; tid < timer_max; tid++) {
978 979 if ((it = timer_grab(p, tid)) == NULL)
979 980 continue;
980 981 if (it->it_portev) {
981 982 mutex_enter(&it->it_mutex);
982 983 if (it->it_portfd == port) {
983 984 port_kevent_t *pev;
984 985 pev = (port_kevent_t *)it->it_portev;
985 986 it->it_portev = NULL;
986 987 it->it_flags &= ~IT_PORT;
987 988 mutex_exit(&it->it_mutex);
988 989 (void) port_remove_done_event(pev);
989 990 port_free_event(pev);
990 991 } else {
991 992 mutex_exit(&it->it_mutex);
992 993 }
993 994 }
994 995 timer_release(p, it);
995 996 }
996 997 }
|
↓ open down ↓ |
932 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX