Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/fs/proc/prmachdep.c
+++ new/usr/src/uts/intel/fs/proc/prmachdep.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * Copyright 2023 Oxide Computer Company
32 32 */
33 33
34 34 #include <sys/types.h>
35 35 #include <sys/t_lock.h>
36 36 #include <sys/param.h>
37 37 #include <sys/cred.h>
38 38 #include <sys/debug.h>
39 39 #include <sys/inline.h>
40 40 #include <sys/kmem.h>
41 41 #include <sys/proc.h>
42 42 #include <sys/regset.h>
43 43 #include <sys/privregs.h>
44 44 #include <sys/sysmacros.h>
45 45 #include <sys/systm.h>
46 46 #include <sys/vfs.h>
47 47 #include <sys/vnode.h>
48 48 #include <sys/psw.h>
49 49 #include <sys/pcb.h>
50 50 #include <sys/buf.h>
51 51 #include <sys/signal.h>
52 52 #include <sys/user.h>
53 53 #include <sys/cpuvar.h>
54 54 #include <sys/stdalign.h>
55 55
56 56 #include <sys/fault.h>
57 57 #include <sys/syscall.h>
58 58 #include <sys/procfs.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/stack.h>
61 61 #include <sys/debugreg.h>
62 62 #include <sys/copyops.h>
63 63
64 64 #include <sys/vmem.h>
65 65 #include <sys/mman.h>
66 66 #include <sys/vmparam.h>
67 67 #include <sys/fp.h>
68 68 #include <sys/archsystm.h>
69 69 #include <sys/vmsystm.h>
70 70 #include <vm/hat.h>
71 71 #include <vm/as.h>
72 72 #include <vm/seg.h>
73 73 #include <vm/seg_kmem.h>
74 74 #include <vm/seg_kp.h>
75 75 #include <vm/page.h>
76 76
77 77 #include <sys/sysi86.h>
78 78
79 79 #include <fs/proc/prdata.h>
80 80
81 81 int prnwatch = 10000; /* maximum number of watched areas */
82 82
83 83 /*
84 84 * Force a thread into the kernel if it is not already there.
85 85 * This is a no-op on uniprocessors.
86 86 */
87 87 /* ARGSUSED */
88 88 void
89 89 prpokethread(kthread_t *t)
90 90 {
91 91 if (t->t_state == TS_ONPROC && t->t_cpu != CPU)
92 92 poke_cpu(t->t_cpu->cpu_id);
93 93 }
94 94
95 95 /*
96 96 * Return general registers.
97 97 */
98 98 void
99 99 prgetprregs(klwp_t *lwp, prgregset_t prp)
100 100 {
101 101 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
102 102
103 103 getgregs(lwp, prp);
104 104 }
105 105
106 106 /*
107 107 * Set general registers.
108 108 * (Note: This can be an alias to setgregs().)
109 109 */
110 110 void
111 111 prsetprregs(klwp_t *lwp, prgregset_t prp, int initial)
112 112 {
113 113 if (initial) /* set initial values */
114 114 lwptoregs(lwp)->r_ps = PSL_USER;
115 115 (void) setgregs(lwp, prp);
116 116 }
117 117
118 118 #ifdef _SYSCALL32_IMPL
119 119
120 120 /*
121 121 * Convert prgregset32 to native prgregset
122 122 */
123 123 void
124 124 prgregset_32ton(klwp_t *lwp, prgregset32_t src, prgregset_t dst)
125 125 {
126 126 struct regs *rp = lwptoregs(lwp);
127 127
128 128 dst[REG_GSBASE] = lwp->lwp_pcb.pcb_gsbase;
129 129 dst[REG_FSBASE] = lwp->lwp_pcb.pcb_fsbase;
130 130
131 131 dst[REG_DS] = (uint16_t)src[DS];
132 132 dst[REG_ES] = (uint16_t)src[ES];
133 133
134 134 dst[REG_GS] = (uint16_t)src[GS];
135 135 dst[REG_FS] = (uint16_t)src[FS];
136 136 dst[REG_SS] = (uint16_t)src[SS];
137 137 dst[REG_RSP] = (uint32_t)src[UESP];
138 138 dst[REG_RFL] =
139 139 (rp->r_ps & ~PSL_USERMASK) | (src[EFL] & PSL_USERMASK);
140 140 dst[REG_CS] = (uint16_t)src[CS];
141 141 dst[REG_RIP] = (uint32_t)src[EIP];
142 142 dst[REG_ERR] = (uint32_t)src[ERR];
143 143 dst[REG_TRAPNO] = (uint32_t)src[TRAPNO];
144 144 dst[REG_RAX] = (uint32_t)src[EAX];
145 145 dst[REG_RCX] = (uint32_t)src[ECX];
146 146 dst[REG_RDX] = (uint32_t)src[EDX];
147 147 dst[REG_RBX] = (uint32_t)src[EBX];
148 148 dst[REG_RBP] = (uint32_t)src[EBP];
149 149 dst[REG_RSI] = (uint32_t)src[ESI];
150 150 dst[REG_RDI] = (uint32_t)src[EDI];
151 151 dst[REG_R8] = dst[REG_R9] = dst[REG_R10] = dst[REG_R11] =
152 152 dst[REG_R12] = dst[REG_R13] = dst[REG_R14] = dst[REG_R15] = 0;
153 153 }
154 154
155 155 /*
156 156 * Return 32-bit general registers
157 157 */
158 158 void
159 159 prgetprregs32(klwp_t *lwp, prgregset32_t prp)
160 160 {
161 161 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
162 162 getgregs32(lwp, prp);
163 163 }
164 164
165 165 #endif /* _SYSCALL32_IMPL */
166 166
167 167 /*
168 168 * Get the syscall return values for the lwp.
169 169 */
170 170 int
171 171 prgetrvals(klwp_t *lwp, long *rval1, long *rval2)
172 172 {
173 173 struct regs *r = lwptoregs(lwp);
174 174
175 175 if (r->r_ps & PS_C)
176 176 return (r->r_r0);
177 177 if (lwp->lwp_eosys == JUSTRETURN) {
178 178 *rval1 = 0;
179 179 *rval2 = 0;
180 180 } else if (lwp_getdatamodel(lwp) != DATAMODEL_NATIVE) {
181 181 /*
182 182 * XX64 Not sure we -really- need to do this, because the
183 183 * syscall return already masks off the bottom values ..?
184 184 */
185 185 *rval1 = r->r_r0 & (uint32_t)0xffffffffu;
186 186 *rval2 = r->r_r1 & (uint32_t)0xffffffffu;
187 187 } else {
188 188 *rval1 = r->r_r0;
189 189 *rval2 = r->r_r1;
190 190 }
191 191 return (0);
192 192 }
193 193
194 194 /*
195 195 * Does the system support floating-point, either through hardware
196 196 * or by trapping and emulating floating-point machine instructions?
197 197 */
198 198 int
199 199 prhasfp(void)
200 200 {
201 201 extern int fp_kind;
202 202
203 203 return (fp_kind != FP_NO);
204 204 }
205 205
206 206 /*
207 207 * Get floating-point registers.
208 208 */
209 209 void
210 210 prgetprfpregs(klwp_t *lwp, prfpregset_t *pfp)
211 211 {
212 212 bzero(pfp, sizeof (prfpregset_t));
213 213 getfpregs(lwp, pfp);
214 214 }
215 215
216 216 #if defined(_SYSCALL32_IMPL)
217 217 void
218 218 prgetprfpregs32(klwp_t *lwp, prfpregset32_t *pfp)
219 219 {
220 220 bzero(pfp, sizeof (*pfp));
221 221 getfpregs32(lwp, pfp);
222 222 }
223 223 #endif /* _SYSCALL32_IMPL */
224 224
225 225 /*
226 226 * Set floating-point registers.
227 227 * (Note: This can be an alias to setfpregs().)
228 228 */
229 229 void
230 230 prsetprfpregs(klwp_t *lwp, prfpregset_t *pfp)
231 231 {
232 232 setfpregs(lwp, pfp);
233 233 }
234 234
235 235 #if defined(_SYSCALL32_IMPL)
236 236 void
237 237 prsetprfpregs32(klwp_t *lwp, prfpregset32_t *pfp)
238 238 {
239 239 setfpregs32(lwp, pfp);
240 240 }
241 241 #endif /* _SYSCALL32_IMPL */
242 242
243 243 /*
244 244 * This is a general function that the main part of /proc and the rest of the
245 245 * system uses to ask does a given process actually have extended state. Right
246 246 * now, this question is not process-specific, but rather CPU specific. We look
247 247 * at whether xsave has been enabled to determine that. While strictly speaking
248 248 * one could make the argument that all amd64 CPUs support fxsave and we could
249 249 * emulate something that only supports that, we don't think that makes sense.
250 250 */
251 251 int
252 252 prhasx(proc_t *p)
253 253 {
254 254 return (fpu_xsave_enabled());
255 255 }
256 256
257 257 /*
258 258 * Return the minimum size that we need to determine the full size of a
259 259 * prxregset_t.
260 260 */
261 261 boolean_t
262 262 prwriteminxreg(size_t *sizep)
263 263 {
264 264 *sizep = sizeof (prxregset_hdr_t);
265 265 return (B_TRUE);
266 266 }
267 267
268 268 /*
269 269 * This routine services both ILP32 and LP64 callers. We cannot assume anything
270 270 * about the alignment of argp and must bcopy things to known structures that we
271 271 * care about. We are guaranteed we have prxregset_hdr_t bytes because we asked
272 272 * for them above.
273 273 */
274 274 boolean_t
275 275 prwritesizexreg(const void *argp, size_t *sizep)
276 276 {
277 277 prxregset_hdr_t hdr;
278 278
279 279 /*
280 280 * While it's tempting to validate everything here, the only thing we
281 281 * care about is that we understand the type and the size meets our
282 282 * constraints:
283 283 *
284 284 * o We actually have an item of type PR_TYPE_XSAVE, otherwise we
285 285 * don't know what this is.
286 286 * o The indicated size actually contains at least the
287 287 * prxregset_hdr_t.
288 288 * o The indicated size isn't larger than what the FPU tells us is
289 289 * allowed.
290 290 *
291 291 * We do not check if the reset of the structure makes semantic sense at
292 292 * this point. We save all other validation for the normal set function
293 293 * as that's when we'll have the rest of our data.
294 294 */
295 295 bcopy(argp, &hdr, sizeof (hdr));
296 296 if (hdr.pr_type != PR_TYPE_XSAVE ||
297 297 hdr.pr_size > fpu_proc_xregs_max_size() ||
298 298 hdr.pr_size < sizeof (prxregset_hdr_t)) {
299 299 return (B_FALSE);
300 300 }
301 301
302 302 *sizep = hdr.pr_size - sizeof (prxregset_hdr_t);
303 303 return (B_TRUE);
304 304 }
305 305
306 306 /*
307 307 * Get the size of the extra registers. The ultimate size here depends on a
308 308 * combination of a few different things. Right now the xregs always have our
309 309 * header, the illumos-specific XCR information, the xsave information, and then
310 310 * otherwise this varies based on the items that the CPU supports.
311 311 *
312 312 * The ultimate size here is going to be:
313 313 *
314 314 * o 1x prxregset_hdr_t
315 315 * o n prxregset_info_t structures
316 316 * o The individual data for each one
317 317 */
318 318 size_t
319 319 prgetprxregsize(proc_t *p)
320 320 {
321 321 uint32_t size;
322 322
323 323 fpu_proc_xregs_info(p, NULL, &size, NULL);
324 324 return (size);
325 325 }
326 326
327 327 /*
328 328 * Get extra registers.
329 329 */
330 330 void
331 331 prgetprxregs(klwp_t *lwp, prxregset_t *prx)
332 332 {
333 333 fpu_proc_xregs_get(lwp, prx);
334 334 }
335 335
336 336 /*
337 337 * Set extra registers.
338 338 *
339 339 * We've been given a regset to set. Before we hand it off to the FPU, we have
340 340 * to go through and make sure that the different parts of this actually make
341 341 * sense. The kernel has guaranteed us through the functions above that we have
342 342 * the number of bytes that the header indicates are present. In particular we
343 343 * need to validate:
344 344 *
345 345 * o The information in the header is reasonable: we have a known type, flags
346 346 * and padding are zero, and there is at least one info structure.
347 347 * o Each of the info structures has a valid type, size, and fits within the
348 348 * data we were given.
349 349 * o We do not validate or modify the actual data in the different pieces for
350 350 * validity. That is considered something that the FPU does. Similarly if
351 351 * something is read-only or not used, that is something that it checks.
352 352 *
353 353 * While we would like to return something other than EINVAL, the /proc APIs
354 354 * pretty much lead that to being the primary errno for all sorts of situations.
355 355 */
356 356 int
357 357 prsetprxregs(klwp_t *lwp, prxregset_t *prx)
358 358 {
359 359 size_t infosz;
360 360 prxregset_hdr_t *hdr = (prxregset_hdr_t *)prx;
361 361
362 362 if (hdr->pr_type != PR_TYPE_XSAVE || hdr->pr_flags != 0 ||
363 363 hdr->pr_pad[0] != 0 || hdr->pr_pad[1] != 0 || hdr->pr_pad[2] != 0 ||
364 364 hdr->pr_pad[3] != 0 || hdr->pr_ninfo == 0) {
365 365 return (EINVAL);
366 366 }
367 367
368 368 infosz = hdr->pr_ninfo * sizeof (prxregset_info_t) +
369 369 sizeof (prxregset_hdr_t);
370 370 if (infosz > hdr->pr_size) {
371 371 return (EINVAL);
372 372 }
373 373
374 374 for (uint32_t i = 0; i < hdr->pr_ninfo; i++) {
375 375 uint32_t exp_size;
376 376 size_t need_len, exp_align;
377 377 const prxregset_info_t *info = &hdr->pr_info[i];
378 378
379 379 switch (info->pri_type) {
380 380 case PRX_INFO_XCR:
381 381 exp_size = sizeof (prxregset_xcr_t);
382 382 exp_align = alignof (prxregset_xcr_t);
383 383 break;
384 384 case PRX_INFO_XSAVE:
385 385 exp_size = sizeof (prxregset_xsave_t);
386 386 exp_align = alignof (prxregset_xsave_t);
387 387 break;
388 388 case PRX_INFO_YMM:
389 389 exp_size = sizeof (prxregset_ymm_t);
390 390 exp_align = alignof (prxregset_ymm_t);
391 391 break;
392 392 case PRX_INFO_OPMASK:
393 393 exp_size = sizeof (prxregset_opmask_t);
394 394 exp_align = alignof (prxregset_opmask_t);
395 395 break;
396 396 case PRX_INFO_ZMM:
397 397 exp_size = sizeof (prxregset_zmm_t);
398 398 exp_align = alignof (prxregset_zmm_t);
399 399 break;
400 400 case PRX_INFO_HI_ZMM:
401 401 exp_size = sizeof (prxregset_hi_zmm_t);
402 402 exp_align = alignof (prxregset_hi_zmm_t);
403 403 break;
404 404 default:
405 405 return (EINVAL);
406 406 }
407 407
408 408 if (info->pri_flags != 0 || info->pri_size != exp_size) {
409 409 return (EINVAL);
410 410 }
411 411
412 412 if ((info->pri_offset % exp_align) != 0) {
413 413 return (EINVAL);
414 414 }
415 415
416 416 /*
417 417 * No bytes of this item's entry should overlap with the
418 418 * information area. If users want to overlap the actual data
419 419 * information for some odd reason, we don't check that and let
420 420 * them do what they want. However, the total data for this
421 421 * region must actually fit. Because exp_size and pri_offset are
422 422 * uint32_t's, we can sum them without overflow worries in an
423 423 * LP64 environment.
424 424 *
425 425 * While we try to grantee alignment when writing this structure
426 426 * out to userland, that is in no way a requirement and users
427 427 * are allowed to start these structures wherever they want.
428 428 * Hence that is not checked here.
429 429 */
430 430 need_len = (size_t)exp_size + (size_t)info->pri_offset;
431 431 if (info->pri_offset < infosz ||
432 432 need_len > (size_t)hdr->pr_size) {
433 433 return (EINVAL);
434 434 }
435 435 }
436 436
437 437 return (fpu_proc_xregs_set(lwp, prx));
438 438 }
439 439
440 440 /*
441 441 * Return the base (lower limit) of the process stack.
442 442 */
443 443 caddr_t
444 444 prgetstackbase(proc_t *p)
445 445 {
446 446 return (p->p_usrstack - p->p_stksize);
447 447 }
448 448
449 449 /*
450 450 * Return the "addr" field for pr_addr in prpsinfo_t.
451 451 * This is a vestige of the past, so whatever we return is OK.
452 452 */
453 453 caddr_t
454 454 prgetpsaddr(proc_t *p)
455 455 {
456 456 return ((caddr_t)p);
457 457 }
458 458
459 459 /*
460 460 * Arrange to single-step the lwp.
461 461 */
462 462 void
463 463 prstep(klwp_t *lwp, int watchstep)
464 464 {
465 465 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
466 466
467 467 /*
468 468 * flag LWP so that its r_efl trace bit (PS_T) will be set on
469 469 * next return to usermode.
470 470 */
471 471 lwp->lwp_pcb.pcb_flags |= REQUEST_STEP;
472 472 lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
473 473
474 474 if (watchstep)
475 475 lwp->lwp_pcb.pcb_flags |= WATCH_STEP;
476 476 else
477 477 lwp->lwp_pcb.pcb_flags |= NORMAL_STEP;
478 478
479 479 aston(lwptot(lwp)); /* let trap() set PS_T in rp->r_efl */
480 480 }
481 481
482 482 /*
483 483 * Undo prstep().
484 484 */
485 485 void
486 486 prnostep(klwp_t *lwp)
487 487 {
488 488 ASSERT(ttolwp(curthread) == lwp ||
489 489 MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
490 490
491 491 /*
492 492 * flag LWP so that its r_efl trace bit (PS_T) will be cleared on
493 493 * next return to usermode.
494 494 */
495 495 lwp->lwp_pcb.pcb_flags |= REQUEST_NOSTEP;
496 496
497 497 lwp->lwp_pcb.pcb_flags &=
498 498 ~(REQUEST_STEP|NORMAL_STEP|WATCH_STEP|DEBUG_PENDING);
499 499
500 500 aston(lwptot(lwp)); /* let trap() clear PS_T in rp->r_efl */
501 501 }
502 502
503 503 /*
504 504 * Return non-zero if a single-step is in effect.
505 505 */
506 506 int
507 507 prisstep(klwp_t *lwp)
508 508 {
509 509 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
510 510
511 511 return ((lwp->lwp_pcb.pcb_flags &
512 512 (NORMAL_STEP|WATCH_STEP|DEBUG_PENDING)) != 0);
513 513 }
514 514
515 515 /*
516 516 * Set the PC to the specified virtual address.
517 517 */
518 518 void
519 519 prsvaddr(klwp_t *lwp, caddr_t vaddr)
520 520 {
521 521 struct regs *r = lwptoregs(lwp);
522 522
523 523 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
524 524
525 525 r->r_pc = (uintptr_t)vaddr;
526 526 }
527 527
528 528 /*
529 529 * Map address "addr" in address space "as" into a kernel virtual address.
530 530 * The memory is guaranteed to be resident and locked down.
531 531 */
532 532 caddr_t
533 533 prmapin(struct as *as, caddr_t addr, int writing)
534 534 {
535 535 page_t *pp;
536 536 caddr_t kaddr;
537 537 pfn_t pfnum;
538 538
539 539 /*
540 540 * XXX - Because of past mistakes, we have bits being returned
541 541 * by getpfnum that are actually the page type bits of the pte.
542 542 * When the object we are trying to map is a memory page with
543 543 * a page structure everything is ok and we can use the optimal
544 544 * method, ppmapin. Otherwise, we have to do something special.
545 545 */
546 546 pfnum = hat_getpfnum(as->a_hat, addr);
547 547 if (pf_is_memory(pfnum)) {
548 548 pp = page_numtopp_nolock(pfnum);
549 549 if (pp != NULL) {
550 550 ASSERT(PAGE_LOCKED(pp));
551 551 kaddr = ppmapin(pp, writing ?
552 552 (PROT_READ | PROT_WRITE) : PROT_READ, (caddr_t)-1);
553 553 return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
554 554 }
555 555 }
556 556
557 557 /*
558 558 * Oh well, we didn't have a page struct for the object we were
559 559 * trying to map in; ppmapin doesn't handle devices, but allocating a
560 560 * heap address allows ppmapout to free virtual space when done.
561 561 */
562 562 kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
563 563
564 564 hat_devload(kas.a_hat, kaddr, MMU_PAGESIZE, pfnum,
565 565 writing ? (PROT_READ | PROT_WRITE) : PROT_READ, 0);
566 566
567 567 return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
568 568 }
569 569
570 570 /*
571 571 * Unmap address "addr" in address space "as"; inverse of prmapin().
572 572 */
573 573 /* ARGSUSED */
574 574 void
575 575 prmapout(struct as *as, caddr_t addr, caddr_t vaddr, int writing)
576 576 {
577 577 extern void ppmapout(caddr_t);
578 578
579 579 vaddr = (caddr_t)((uintptr_t)vaddr & PAGEMASK);
580 580 ppmapout(vaddr);
581 581 }
582 582
583 583 /*
584 584 * Make sure the lwp is in an orderly state
585 585 * for inspection by a debugger through /proc.
586 586 *
587 587 * This needs to be called only once while the current thread remains in the
588 588 * kernel and needs to be called while holding no resources (mutex locks, etc).
589 589 *
590 590 * As a hedge against these conditions, if prstop() is called repeatedly
591 591 * before prunstop() is called, it does nothing and just returns.
592 592 *
593 593 * prunstop() must be called before the thread returns to user level.
594 594 */
595 595 /* ARGSUSED */
596 596 void
597 597 prstop(int why, int what)
598 598 {
599 599 klwp_t *lwp = ttolwp(curthread);
600 600 struct regs *r = lwptoregs(lwp);
601 601
602 602 if (lwp->lwp_pcb.pcb_flags & PRSTOP_CALLED)
603 603 return;
604 604
605 605 /*
606 606 * Make sure we don't deadlock on a recursive call
607 607 * to prstop(). stop() tests the lwp_nostop flag.
608 608 */
609 609 ASSERT(lwp->lwp_nostop == 0);
610 610 lwp->lwp_nostop = 1;
611 611
612 612 if (copyin_nowatch((caddr_t)r->r_pc, &lwp->lwp_pcb.pcb_instr,
613 613 sizeof (lwp->lwp_pcb.pcb_instr)) == 0)
614 614 lwp->lwp_pcb.pcb_flags |= INSTR_VALID;
615 615 else {
616 616 lwp->lwp_pcb.pcb_flags &= ~INSTR_VALID;
617 617 lwp->lwp_pcb.pcb_instr = 0;
618 618 }
619 619
620 620 (void) save_syscall_args();
621 621 ASSERT(lwp->lwp_nostop == 1);
622 622 lwp->lwp_nostop = 0;
623 623
624 624 lwp->lwp_pcb.pcb_flags |= PRSTOP_CALLED;
625 625 aston(curthread); /* so prunstop() will be called */
626 626 }
627 627
628 628 /*
629 629 * Inform prstop() that it should do its work again
630 630 * the next time it is called.
631 631 */
632 632 void
633 633 prunstop(void)
634 634 {
635 635 ttolwp(curthread)->lwp_pcb.pcb_flags &= ~PRSTOP_CALLED;
636 636 }
637 637
638 638 /*
639 639 * Fetch the user-level instruction on which the lwp is stopped.
640 640 * It was saved by the lwp itself, in prstop().
641 641 * Return non-zero if the instruction is valid.
642 642 */
643 643 int
644 644 prfetchinstr(klwp_t *lwp, ulong_t *ip)
645 645 {
646 646 *ip = (ulong_t)(instr_t)lwp->lwp_pcb.pcb_instr;
647 647 return (lwp->lwp_pcb.pcb_flags & INSTR_VALID);
648 648 }
649 649
650 650 /*
651 651 * Called from trap() when a load or store instruction
652 652 * falls in a watched page but is not a watchpoint.
653 653 * We emulate the instruction in the kernel.
654 654 */
655 655 /* ARGSUSED */
656 656 int
657 657 pr_watch_emul(struct regs *rp, caddr_t addr, enum seg_rw rw)
658 658 {
659 659 #ifdef SOMEDAY
660 660 int res;
661 661 proc_t *p = curproc;
662 662 char *badaddr = (caddr_t)(-1);
663 663 int mapped;
664 664
665 665 /* prevent recursive calls to pr_watch_emul() */
666 666 ASSERT(!(curthread->t_flag & T_WATCHPT));
667 667 curthread->t_flag |= T_WATCHPT;
668 668
669 669 watch_disable_addr(addr, 8, rw);
670 670 res = do_unaligned(rp, &badaddr);
671 671 watch_enable_addr(addr, 8, rw);
672 672
673 673 curthread->t_flag &= ~T_WATCHPT;
674 674 if (res == SIMU_SUCCESS) {
675 675 /* adjust the pc */
676 676 return (1);
677 677 }
678 678 #endif
679 679 return (0);
680 680 }
681 681
682 682 /*
683 683 * Return the number of active entries in the local descriptor table.
684 684 */
685 685 int
686 686 prnldt(proc_t *p)
687 687 {
688 688 int limit, i, n;
689 689 user_desc_t *udp;
690 690
691 691 ASSERT(MUTEX_HELD(&p->p_ldtlock));
692 692
693 693 /*
694 694 * Currently 64 bit processes cannot have private LDTs.
695 695 */
696 696 ASSERT(p->p_model != DATAMODEL_LP64 || p->p_ldt == NULL);
697 697
698 698 if (p->p_ldt == NULL)
699 699 return (0);
700 700 n = 0;
701 701 limit = p->p_ldtlimit;
702 702 ASSERT(limit >= 0 && limit < MAXNLDT);
703 703
704 704 /*
705 705 * Count all present user descriptors.
706 706 */
707 707 for (i = LDT_UDBASE, udp = &p->p_ldt[i]; i <= limit; i++, udp++)
708 708 if (udp->usd_type != 0 || udp->usd_dpl != 0 || udp->usd_p != 0)
709 709 n++;
710 710 return (n);
711 711 }
712 712
713 713 /*
714 714 * Fetch the active entries from the local descriptor table.
715 715 */
716 716 void
717 717 prgetldt(proc_t *p, struct ssd *ssd)
718 718 {
719 719 int i, limit;
720 720 user_desc_t *udp;
721 721
722 722 ASSERT(MUTEX_HELD(&p->p_ldtlock));
723 723
724 724 if (p->p_ldt == NULL)
725 725 return;
726 726
727 727 limit = p->p_ldtlimit;
728 728 ASSERT(limit >= 0 && limit < MAXNLDT);
729 729
730 730 /*
731 731 * All present user descriptors.
732 732 */
733 733 for (i = LDT_UDBASE, udp = &p->p_ldt[i]; i <= limit; i++, udp++)
734 734 if (udp->usd_type != 0 || udp->usd_dpl != 0 ||
735 735 udp->usd_p != 0)
736 736 usd_to_ssd(udp, ssd++, SEL_LDT(i));
737 737 }
|
↓ open down ↓ |
737 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX