Print this page
Initial fix LX for IPD 38
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/brand/lx/lx_archdep.c
+++ new/usr/src/uts/intel/brand/lx/lx_archdep.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2018 Joyent, Inc.
14 14 */
15 15
16 16 /*
17 17 * LX brand Intel-specific routines.
18 18 */
19 19
20 20 #include <sys/types.h>
21 21 #include <sys/sunddi.h>
22 22 #include <sys/ddi.h>
23 23 #include <sys/brand.h>
24 24 #include <sys/lx_brand.h>
25 25 #include <sys/lx_misc.h>
26 26 #include <sys/privregs.h>
27 27 #include <sys/pcb.h>
28 28 #include <sys/archsystm.h>
29 29 #include <sys/stack.h>
30 30 #include <sys/sdt.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/psw.h>
33 33 #include <lx_errno.h>
34 34
35 35 /*
36 36 * Argument constants for fix_segreg.
37 37 * See usr/src/uts/intel/ia32/os/archdep.c for the originals.
38 38 */
39 39 #define IS_CS 1
40 40 #define IS_NOT_CS 0
41 41
42 42 extern greg_t fix_segreg(greg_t, int, model_t);
43 43
44 44
45 45 #define LX_REG(ucp, r) ((ucp)->uc_mcontext.gregs[(r)])
46 46
47 47 #define PSLMERGE(oldval, newval) \
48 48 (((oldval) & ~PSL_USERMASK) | ((newval) & PSL_USERMASK))
49 49
50 50 #ifdef __amd64
51 51 /* 64-bit native user_regs_struct */
52 52 typedef struct lx_user_regs64 {
53 53 int64_t lxur_r15;
54 54 int64_t lxur_r14;
55 55 int64_t lxur_r13;
56 56 int64_t lxur_r12;
57 57 int64_t lxur_rbp;
58 58 int64_t lxur_rbx;
59 59 int64_t lxur_r11;
60 60 int64_t lxur_r10;
61 61 int64_t lxur_r9;
62 62 int64_t lxur_r8;
63 63 int64_t lxur_rax;
64 64 int64_t lxur_rcx;
65 65 int64_t lxur_rdx;
66 66 int64_t lxur_rsi;
67 67 int64_t lxur_rdi;
68 68 int64_t lxur_orig_rax;
69 69 int64_t lxur_rip;
70 70 int64_t lxur_xcs;
71 71 int64_t lxur_rflags;
72 72 int64_t lxur_rsp;
73 73 int64_t lxur_xss;
74 74 int64_t lxur_xfs_base;
75 75 int64_t lxur_xgs_base;
76 76 int64_t lxur_xds;
77 77 int64_t lxur_xes;
78 78 int64_t lxur_xfs;
79 79 int64_t lxur_xgs;
80 80 } lx_user_regs64_t;
81 81
82 82 /* 64-bit native user_fpregs_struct */
83 83 typedef struct lx_user_fpregs64 {
84 84 uint16_t lxufp_cwd;
85 85 uint16_t lxufp_swd;
86 86 uint16_t lxufp_ftw;
87 87 uint16_t lxufp_fop;
88 88 uint64_t lxufp_rip;
89 89 uint64_t lxufp_rdp;
90 90 uint32_t lxufp_mxcsr;
91 91 uint32_t lxufp_mxcr_mask;
92 92 /* 8*16 bytes for each FP-reg = 128 bytes */
93 93 uint32_t lxufp_st_space[32];
94 94 /* 16*16 bytes for each XMM-reg = 256 bytes */
95 95 uint32_t lxufp_xmm_space[64];
96 96 uint32_t lxufp_padding[24];
97 97 } lx_user_fpregs64_t;
98 98
99 99 /* 64-bit native user_struct */
100 100 typedef struct lx_user64 {
101 101 lx_user_regs64_t lxu_regs;
102 102 int32_t lxu_fpvalid;
103 103 int32_t lxu_pad0;
104 104 lx_user_fpregs64_t lxu_i387;
105 105 uint64_t lxu_tsize;
106 106 uint64_t lxu_dsize;
107 107 uint64_t lxu_ssize;
108 108 uint64_t lxu_start_code;
109 109 uint64_t lxu_start_stack;
110 110 int64_t lxu_signal;
111 111 int32_t lxu_reserved;
112 112 int32_t lxu_pad1;
113 113 /* help gdb to locate user_regs structure */
114 114 caddr_t lxu_ar0;
115 115 /* help gdb to locate user_fpregs structure */
116 116 caddr_t lxu_fpstate;
117 117 uint64_t lxu_magic;
118 118 char lxu_comm[32];
119 119 uint64_t lxu_debugreg[8];
120 120 uint64_t lxu_error_code;
121 121 uint64_t lxu_fault_address;
122 122 } lx_user64_t;
123 123
124 124 #endif /* __amd64 */
125 125
126 126 /* 32-bit native user_regs_struct */
127 127 typedef struct lx_user_regs32 {
128 128 int32_t lxur_ebx;
129 129 int32_t lxur_ecx;
130 130 int32_t lxur_edx;
131 131 int32_t lxur_esi;
132 132 int32_t lxur_edi;
133 133 int32_t lxur_ebp;
134 134 int32_t lxur_eax;
135 135 int32_t lxur_xds;
136 136 int32_t lxur_xes;
137 137 int32_t lxur_xfs;
138 138 int32_t lxur_xgs;
139 139 int32_t lxur_orig_eax;
140 140 int32_t lxur_eip;
141 141 int32_t lxur_xcs;
142 142 int32_t lxur_eflags;
143 143 int32_t lxur_esp;
144 144 int32_t lxur_xss;
145 145 } lx_user_regs32_t;
146 146
147 147 /* 32-bit native user_fpregs_struct */
148 148 typedef struct lx_user_fpregs32 {
149 149 int32_t lxufp_cwd;
150 150 int32_t lxufp_swd;
151 151 int32_t lxufp_twd;
152 152 int32_t lxufp_fip;
153 153 int32_t lxufp_fcs;
154 154 int32_t lxufp_foo;
155 155 int32_t lxufp_fos;
156 156 int32_t lxufp_st_space[20];
157 157 } lx_user_fpregs32_t;
158 158
159 159 /* 32-bit native user_fpxregs_struct */
160 160 typedef struct lx_user_fpxregs32 {
161 161 uint16_t lxufpx_cwd;
162 162 uint16_t lxufpx_swd;
163 163 uint16_t lxufpx_twd;
164 164 uint16_t lxufpx_fop;
165 165 int32_t lxufpx_fip;
166 166 int32_t lxufpx_fcs;
167 167 int32_t lxufpx_foo;
168 168 int32_t lxufpx_fos;
169 169 int32_t lxufpx_mxcsr;
170 170 int32_t lxufpx_reserved;
171 171 /* 8*16 bytes for each FP-reg = 128 bytes */
172 172 int32_t lxufpx_st_space[32];
173 173 /* 8*16 bytes for each XMM-reg = 128 bytes */
174 174 int32_t lxufpx_xmm_space[32];
175 175 int32_t lxufpx_padding[56];
176 176 } lx_user_fpxregs32_t;
177 177
178 178 /* 32-bit native user_struct */
179 179 typedef struct lx_user32 {
180 180 lx_user_regs32_t lxu_regs;
181 181 int32_t lxu_fpvalid;
182 182 lx_user_fpregs32_t lxu_i387;
183 183 uint32_t lxu_tsize;
184 184 uint32_t lxu_dsize;
185 185 uint32_t lxu_ssize;
186 186 uint32_t lxu_start_code;
187 187 uint32_t lxu_start_stack;
188 188 int32_t lxu_signal;
189 189 int32_t lxu_reserved;
190 190 caddr32_t lxu_ar0;
191 191 caddr32_t lxu_fpstate;
192 192 uint32_t lxu_magic;
193 193 char lxu_comm[32];
194 194 int32_t lxu_debugreg[8];
195 195 } lx_user32_t;
196 196
197 197 /*
198 198 * Certain version of strace (on centos6 for example) use the %cs value to
199 199 * determine what kind of process is being traced. Here is a sample comment:
200 200 * Check CS register value. On x86-64 linux it is:
201 201 * 0x33 for long mode (64 bit and x32))
202 202 * 0x23 for compatibility mode (32 bit)
203 203 * %ds = 0x2b for x32 mode (x86-64 in 32 bit)
204 204 * We can't change the %cs value in the ucp (see setgregs and _sys_rtt) so we
205 205 * emulate the expected value for ptrace use.
206 206 */
207 207 #define LX_CS_64BIT 0x33
208 208 #define LX_CS_32BIT 0x23
209 209
210 210 extern int getsetcontext(int, void *);
211 211 #if defined(_SYSCALL32_IMPL)
212 212 extern int getsetcontext32(int, void *);
213 213 #endif
214 214
215 215 static int
216 216 lx_rw_uc(proc_t *p, void *ucp, void *kucp, size_t ucsz, boolean_t writing)
217 217 {
218 218 int error = 0;
219 219 size_t rem = ucsz;
220 220 off_t pos = 0;
221 221
222 222 VERIFY(MUTEX_HELD(&p->p_lock));
223 223
224 224 /*
225 225 * Grab P_PR_LOCK so that we can drop p_lock while doing I/O.
226 226 */
227 227 sprlock_proc(p);
228 228
229 229 /*
230 230 * Drop p_lock while we do I/O to avoid deadlock with the clock thread.
231 231 */
232 232 mutex_exit(&p->p_lock);
233 233 while (rem != 0) {
234 234 uintptr_t addr = (uintptr_t)ucp + pos;
235 235 size_t len = MIN(rem, PAGESIZE - (addr & PAGEOFFSET));
236 236
237 237 if (writing) {
238 238 error = uwrite(p, (caddr_t)kucp + pos, len, addr);
239 239 } else {
240 240 error = uread(p, (caddr_t)kucp + pos, len, addr);
241 241 }
242 242
243 243 if (error != 0) {
244 244 break;
245 245 }
246 246
247 247 rem -= len;
248 248 pos += len;
249 249 }
250 250 mutex_enter(&p->p_lock);
251 251
252 252 sprunlock(p);
253 253 mutex_enter(&p->p_lock);
254 254
255 255 return (error);
256 256 }
257 257
258 258 /*
259 259 * Read a ucontext_t from the target process, which may or may not be
260 260 * the current process.
261 261 */
262 262 static int
263 263 lx_read_uc(proc_t *p, void *ucp, void *kucp, size_t ucsz)
264 264 {
265 265 return (lx_rw_uc(p, ucp, kucp, ucsz, B_FALSE));
266 266 }
267 267
268 268 /*
269 269 * Write a ucontext_t to the target process, which may or may not be
270 270 * the current process.
271 271 */
272 272 static int
273 273 lx_write_uc(proc_t *p, void *ucp, void *kucp, size_t ucsz)
274 274 {
275 275 return (lx_rw_uc(p, ucp, kucp, ucsz, B_TRUE));
276 276 }
277 277
278 278 static void
279 279 lx_getfpregs32(lx_lwp_data_t *lwpd, lx_user_fpregs32_t *lfp)
280 280 {
281 281 #ifdef __amd64
282 282 fpregset32_t fp;
283 283 getfpregs32(lwpd->br_lwp, &fp);
284 284 #else /* __i386 */
285 285 fpregset_t fp;
286 286 getfpregs(lwpd->br_lwp, &fp);
287 287 #endif /* __amd64 */
288 288
289 289 /*
290 290 * The fpchip_state.state field should correspond to all 27 fields in
291 291 * the 32-bit structure.
292 292 */
293 293 bcopy(&fp.fp_reg_set.fpchip_state.state, lfp, sizeof (*lfp));
294 294 }
295 295
296 296 static void
297 297 lx_setfpregs32(lx_lwp_data_t *lwpd, lx_user_fpregs32_t *lfp)
298 298 {
299 299 #ifdef __amd64
300 300 fpregset32_t fp;
301 301 #else /* __i386 */
302 302 fpregset_t fp;
303 303 #endif /* __amd64 */
304 304
305 305 /*
306 306 * The fpchip_state field should correspond to all 27 fields in the
307 307 * native 32-bit structure.
308 308 */
309 309 bcopy(lfp, &fp.fp_reg_set.fpchip_state.state, sizeof (*lfp));
310 310
311 311 #ifdef __amd64
312 312 setfpregs32(lwpd->br_lwp, &fp);
313 313 #else /* __i386 */
314 314 setfpregs(lwpd->br_lwp, &fp);
315 315 #endif /* __amd64 */
316 316 }
317 317
318 318 static int
319 319 lx_get_user_regs32_uc(klwp_t *lwp, void *ucp, lx_user_regs32_t *lxrp)
320 320 {
321 321 proc_t *p = lwptoproc(lwp);
322 322 ucontext32_t uc;
323 323
324 324 if (lx_read_uc(p, ucp, &uc, sizeof (uc)) != 0) {
325 325 return (-1);
326 326 }
327 327
328 328 lxrp->lxur_ebx = LX_REG(&uc, EBX);
329 329 lxrp->lxur_ecx = LX_REG(&uc, ECX);
330 330 lxrp->lxur_edx = LX_REG(&uc, EDX);
331 331 lxrp->lxur_esi = LX_REG(&uc, ESI);
332 332 lxrp->lxur_edi = LX_REG(&uc, EDI);
333 333 lxrp->lxur_ebp = LX_REG(&uc, EBP);
334 334 lxrp->lxur_eax = LX_REG(&uc, EAX);
335 335 lxrp->lxur_orig_eax = 0;
336 336
337 337 lxrp->lxur_eip = LX_REG(&uc, EIP);
338 338 lxrp->lxur_eflags = LX_REG(&uc, EFL);
339 339 lxrp->lxur_esp = LX_REG(&uc, UESP);
340 340 lxrp->lxur_xss = LX_REG(&uc, SS);
341 341
342 342 /* emulated %cs, see defines */
343 343 lxrp->lxur_xcs = LX_CS_32BIT;
344 344 lxrp->lxur_xds = LX_REG(&uc, DS);
345 345 lxrp->lxur_xes = LX_REG(&uc, ES);
346 346 lxrp->lxur_xfs = LX_REG(&uc, FS);
347 347 lxrp->lxur_xgs = LX_REG(&uc, GS);
348 348 return (0);
349 349 }
350 350
351 351 static int
352 352 lx_get_user_regs32(lx_lwp_data_t *lwpd, lx_user_regs32_t *lxrp)
353 353 {
354 354 klwp_t *lwp = lwpd->br_lwp;
355 355 struct regs *rp = lwptoregs(lwp);
356 356 void *ucp;
357 357 #ifdef __amd64
358 358 struct pcb *pcb = &lwp->lwp_pcb;
359 359 #endif
360 360
361 361 VERIFY(lwp_getdatamodel(lwp) == DATAMODEL_ILP32);
362 362
363 363 switch (lx_regs_location(lwpd, &ucp, B_FALSE)) {
364 364 case LX_REG_LOC_UNAVAIL:
365 365 return (-1);
366 366
367 367 case LX_REG_LOC_UCP:
368 368 return (lx_get_user_regs32_uc(lwp, ucp, lxrp));
369 369
370 370 case LX_REG_LOC_LWP:
371 371 /* transformation below */
372 372 break;
373 373
374 374 default:
375 375 VERIFY(0);
376 376 break;
377 377 }
378 378
379 379 #ifdef __amd64
380 380 lxrp->lxur_ebx = (int32_t)rp->r_rbx;
381 381 lxrp->lxur_ecx = (int32_t)rp->r_rcx;
382 382 lxrp->lxur_edx = (int32_t)rp->r_rdx;
383 383 lxrp->lxur_esi = (int32_t)rp->r_rsi;
384 384 lxrp->lxur_edi = (int32_t)rp->r_rdi;
385 385 lxrp->lxur_ebp = (int32_t)rp->r_rbp;
386 386 lxrp->lxur_eax = (int32_t)rp->r_rax;
387 387 lxrp->lxur_orig_eax = 0;
388 388 lxrp->lxur_eip = (int32_t)rp->r_rip;
389 389 lxrp->lxur_eflags = (int32_t)rp->r_rfl;
390 390 lxrp->lxur_esp = (int32_t)rp->r_rsp;
391 391 lxrp->lxur_xss = (int32_t)rp->r_ss;
392 392
393 393 kpreempt_disable();
394 394 if (PCB_NEED_UPDATE_SEGS(pcb)) {
395 395 lxrp->lxur_xds = pcb->pcb_ds;
396 396 lxrp->lxur_xes = pcb->pcb_es;
397 397 lxrp->lxur_xfs = pcb->pcb_fs;
398 398 lxrp->lxur_xgs = pcb->pcb_gs;
399 399 } else {
400 400 lxrp->lxur_xds = rp->r_ds;
401 401 lxrp->lxur_xes = rp->r_es;
402 402 lxrp->lxur_xfs = rp->r_fs;
403 403 lxrp->lxur_xgs = rp->r_gs;
404 404 }
405 405 kpreempt_enable();
406 406 #else /* __i386 */
407 407 lxrp->lxur_ebx = rp->r_ebx;
408 408 lxrp->lxur_ecx = rp->r_ecx;
409 409 lxrp->lxur_edx = rp->r_edx;
410 410 lxrp->lxur_esi = rp->r_esi;
411 411 lxrp->lxur_edi = rp->r_edi;
412 412 lxrp->lxur_ebp = rp->r_ebp;
413 413 lxrp->lxur_eax = rp->r_eax;
414 414 lxrp->lxur_orig_eax = 0;
415 415 lxrp->lxur_eip = rp->r_eip;
416 416 lxrp->lxur_eflags = rp->r_efl;
417 417 lxrp->lxur_esp = rp->r_esp;
418 418 lxrp->lxur_xss = rp->r_ss;
419 419
420 420 lxrp->lxur_xds = rp->r_ds;
421 421 lxrp->lxur_xes = rp->r_es;
422 422 lxrp->lxur_xfs = rp->r_fs;
423 423 lxrp->lxur_xgs = rp->r_gs;
424 424 #endif /* __amd64 */
425 425
426 426 /* emulated %cs, see defines */
427 427 lxrp->lxur_xcs = LX_CS_32BIT;
428 428
429 429 if (lwpd->br_ptrace_whatstop == LX_PR_SYSENTRY) {
430 430 lxrp->lxur_eax = (int32_t)-lx_errno(ENOTSUP, EINVAL);
431 431 lxrp->lxur_orig_eax = (int32_t)lwpd->br_syscall_num;
432 432 } else if (lwpd->br_ptrace_whatstop == LX_PR_SYSEXIT) {
433 433 lxrp->lxur_orig_eax = (int32_t)lwpd->br_syscall_num;
434 434 }
435 435
436 436 return (0);
437 437 }
438 438
439 439 static int
440 440 lx_set_user_regs32_uc(klwp_t *lwp, void *ucp, lx_user_regs32_t *lxrp)
441 441 {
442 442 proc_t *p = lwptoproc(lwp);
443 443 ucontext32_t uc;
444 444
445 445 if (lx_read_uc(p, ucp, &uc, sizeof (uc)) != 0) {
446 446 return (-1);
447 447 }
448 448
449 449 /*
450 450 * Note: we currently ignore "lxur_orig_rax" here since this
451 451 * path should not be used for system call stops.
452 452 */
453 453 LX_REG(&uc, EBP) = lxrp->lxur_ebp;
454 454 LX_REG(&uc, EBX) = lxrp->lxur_ebx;
455 455 LX_REG(&uc, EAX) = lxrp->lxur_eax;
456 456 LX_REG(&uc, ECX) = lxrp->lxur_ecx;
457 457 LX_REG(&uc, EDX) = lxrp->lxur_edx;
458 458 LX_REG(&uc, ESI) = lxrp->lxur_esi;
459 459 LX_REG(&uc, EDI) = lxrp->lxur_edi;
460 460 LX_REG(&uc, EIP) = lxrp->lxur_eip;
461 461 LX_REG(&uc, EFL) = PSLMERGE(LX_REG(&uc, EFL), lxrp->lxur_eflags);
462 462 LX_REG(&uc, UESP) = lxrp->lxur_esp;
463 463 LX_REG(&uc, SS) = fix_segreg(lxrp->lxur_xss, IS_NOT_CS,
464 464 DATAMODEL_ILP32);
465 465
466 466 /* %cs is ignored because of our lies */
467 467 LX_REG(&uc, DS) = fix_segreg(lxrp->lxur_xds, IS_NOT_CS,
468 468 DATAMODEL_ILP32);
469 469 LX_REG(&uc, ES) = fix_segreg(lxrp->lxur_xes, IS_NOT_CS,
470 470 DATAMODEL_ILP32);
471 471 LX_REG(&uc, FS) = fix_segreg(lxrp->lxur_xfs, IS_NOT_CS,
472 472 DATAMODEL_ILP32);
473 473 LX_REG(&uc, GS) = fix_segreg(lxrp->lxur_xgs, IS_NOT_CS,
474 474 DATAMODEL_ILP32);
475 475
476 476 if (lx_write_uc(p, ucp, &uc, sizeof (uc)) != 0) {
477 477 return (-1);
478 478 }
479 479 return (0);
480 480 }
481 481
482 482 static int
483 483 lx_set_user_regs32(lx_lwp_data_t *lwpd, lx_user_regs32_t *lxrp)
484 484 {
485 485 klwp_t *lwp = lwpd->br_lwp;
486 486 struct regs *rp = lwptoregs(lwp);
487 487 void *ucp;
488 488 #ifdef __amd64
489 489 struct pcb *pcb = &lwp->lwp_pcb;
490 490 #endif
491 491
492 492 VERIFY(lwp_getdatamodel(lwp) == DATAMODEL_ILP32);
493 493
494 494 switch (lx_regs_location(lwpd, &ucp, B_TRUE)) {
495 495 case LX_REG_LOC_UNAVAIL:
496 496 return (-1);
497 497
498 498 case LX_REG_LOC_UCP:
499 499 return (lx_set_user_regs32_uc(lwp, ucp, lxrp));
500 500
501 501 case LX_REG_LOC_LWP:
502 502 /* transformation below */
503 503 break;
504 504
505 505 default:
506 506 VERIFY(0);
507 507 break;
508 508 }
509 509
510 510 #ifdef __amd64
511 511 rp->r_rbx = (int32_t)lxrp->lxur_ebx;
512 512 rp->r_rcx = (int32_t)lxrp->lxur_ecx;
513 513 rp->r_rdx = (int32_t)lxrp->lxur_edx;
514 514 rp->r_rsi = (int32_t)lxrp->lxur_esi;
515 515 rp->r_rdi = (int32_t)lxrp->lxur_edi;
516 516 rp->r_rbp = (int32_t)lxrp->lxur_ebp;
517 517 rp->r_rax = (int32_t)lxrp->lxur_eax;
518 518 lwpd->br_syscall_num = (int)lxrp->lxur_orig_eax;
519 519 rp->r_rip = (int32_t)lxrp->lxur_eip;
520 520 rp->r_rfl = (int32_t)PSLMERGE(rp->r_rfl, lxrp->lxur_eflags);
521 521 rp->r_rsp = (int32_t)lxrp->lxur_esp;
522 522 rp->r_ss = (int32_t)fix_segreg(lxrp->lxur_xss, IS_NOT_CS,
523 523 DATAMODEL_ILP32);
524 524
525 525 kpreempt_disable();
526 526 PCB_SET_UPDATE_SEGS(pcb);
527 527 pcb->pcb_ds = fix_segreg(lxrp->lxur_xds, IS_NOT_CS, DATAMODEL_ILP32);
528 528 pcb->pcb_es = fix_segreg(lxrp->lxur_xes, IS_NOT_CS, DATAMODEL_ILP32);
529 529 pcb->pcb_fs = fix_segreg(lxrp->lxur_xfs, IS_NOT_CS, DATAMODEL_ILP32);
530 530 pcb->pcb_gs = fix_segreg(lxrp->lxur_xgs, IS_NOT_CS, DATAMODEL_ILP32);
531 531 kpreempt_enable();
532 532 #else /* __i386 */
533 533 rp->r_ebx = lxrp->lxur_ebx;
534 534 rp->r_ecx = lxrp->lxur_ecx;
535 535 rp->r_edx = lxrp->lxur_edx;
536 536 rp->r_esi = lxrp->lxur_esi;
537 537 rp->r_edi = lxrp->lxur_edi;
538 538 rp->r_ebp = lxrp->lxur_ebp;
539 539 rp->r_eax = lxrp->lxur_eax;
540 540 lwpd->br_syscall_num = (int)lxrp->lxur_orig_eax;
541 541 rp->r_eip = lxrp->lxur_eip;
542 542 rp->r_efl = PSLMERGE(rp->r_efl, lxrp->lxur_eflags);
543 543 rp->r_esp = lxrp->lxur_esp;
544 544 rp->r_ss = fix_segreg(lxrp->lxur_xss, IS_NOT_CS, DATAMODEL_ILP32);
545 545
546 546 rp->r_ds = fix_segreg(lxrp->lxur_xds, IS_NOT_CS, DATAMODEL_ILP32);
547 547 rp->r_es = fix_segreg(lxrp->lxur_xes, IS_NOT_CS, DATAMODEL_ILP32);
548 548 rp->r_fs = fix_segreg(lxrp->lxur_xfs, IS_NOT_CS, DATAMODEL_ILP32);
549 549 rp->r_gs = fix_segreg(lxrp->lxur_xgs, IS_NOT_CS, DATAMODEL_ILP32);
550 550 #endif /* __amd64 */
551 551
552 552 return (0);
553 553 }
554 554
555 555 #ifdef __amd64
556 556
557 557 static void
558 558 lx_getfpregs64(lx_lwp_data_t *lwpd, lx_user_fpregs64_t *lfp)
559 559 {
560 560 fpregset_t fp;
561 561
562 562 getfpregs(lwpd->br_lwp, &fp);
563 563 /* Drop the extra illumos status/xstatus fields when copying state */
564 564 bcopy(&fp.fp_reg_set.fpchip_state, lfp, sizeof (*lfp));
565 565 }
566 566
567 567 static void
568 568 lx_setfpregs64(lx_lwp_data_t *lwpd, lx_user_fpregs64_t *lfp)
569 569 {
570 570 fpregset_t fp;
571 571
572 572 /*
573 573 * Since the Linux fpregs structure does not contain the same
574 574 * additional status register which illumos contains, we simply
575 575 * preserve the existing values when setting fp state.
576 576 */
577 577 getfpregs(lwpd->br_lwp, &fp);
578 578
579 579 /* Copy the identically formatted state */
580 580 bcopy(lfp, &fp.fp_reg_set.fpchip_state, sizeof (*lfp));
581 581
582 582 setfpregs(lwpd->br_lwp, &fp);
583 583 }
584 584
585 585 static int
586 586 lx_get_user_regs64_uc(klwp_t *lwp, void *ucp, lx_user_regs64_t *lxrp)
587 587 {
588 588 proc_t *p = lwptoproc(lwp);
589 589
590 590 switch (lwp_getdatamodel(lwp)) {
591 591 case DATAMODEL_LP64: {
592 592 ucontext_t uc;
593 593
594 594 if (lx_read_uc(p, ucp, &uc, sizeof (uc)) != 0) {
595 595 return (-1);
596 596 }
597 597
598 598 lxrp->lxur_r15 = LX_REG(&uc, REG_R15);
599 599 lxrp->lxur_r14 = LX_REG(&uc, REG_R14);
600 600 lxrp->lxur_r13 = LX_REG(&uc, REG_R13);
601 601 lxrp->lxur_r12 = LX_REG(&uc, REG_R12);
602 602 lxrp->lxur_rbp = LX_REG(&uc, REG_RBP);
603 603 lxrp->lxur_rbx = LX_REG(&uc, REG_RBX);
604 604 lxrp->lxur_r11 = LX_REG(&uc, REG_R11);
605 605 lxrp->lxur_r10 = LX_REG(&uc, REG_R10);
606 606 lxrp->lxur_r9 = LX_REG(&uc, REG_R9);
607 607 lxrp->lxur_r8 = LX_REG(&uc, REG_R8);
608 608 lxrp->lxur_rax = LX_REG(&uc, REG_RAX);
609 609 lxrp->lxur_rcx = LX_REG(&uc, REG_RCX);
610 610 lxrp->lxur_rdx = LX_REG(&uc, REG_RDX);
611 611 lxrp->lxur_rsi = LX_REG(&uc, REG_RSI);
612 612 lxrp->lxur_rdi = LX_REG(&uc, REG_RDI);
613 613 lxrp->lxur_orig_rax = 0;
614 614 lxrp->lxur_rip = LX_REG(&uc, REG_RIP);
615 615 lxrp->lxur_rflags = LX_REG(&uc, REG_RFL);
616 616 lxrp->lxur_rsp = LX_REG(&uc, REG_RSP);
617 617 lxrp->lxur_xss = LX_REG(&uc, REG_SS);
618 618 lxrp->lxur_xfs_base = LX_REG(&uc, REG_FSBASE);
619 619 lxrp->lxur_xgs_base = LX_REG(&uc, REG_GSBASE);
620 620
621 621 lxrp->lxur_xds = LX_REG(&uc, REG_DS);
622 622 lxrp->lxur_xes = LX_REG(&uc, REG_ES);
623 623 lxrp->lxur_xfs = LX_REG(&uc, REG_FS);
624 624 lxrp->lxur_xgs = LX_REG(&uc, REG_GS);
625 625
626 626 /* emulated %cs, see defines */
627 627 lxrp->lxur_xcs = LX_CS_64BIT;
628 628 return (0);
629 629 }
630 630
631 631 case DATAMODEL_ILP32: {
632 632 ucontext32_t uc;
633 633
634 634 if (lx_read_uc(p, ucp, &uc, sizeof (uc)) != 0) {
635 635 return (-1);
636 636 }
637 637
638 638 lxrp->lxur_r15 = 0;
639 639 lxrp->lxur_r14 = 0;
640 640 lxrp->lxur_r13 = 0;
641 641 lxrp->lxur_r12 = 0;
642 642 lxrp->lxur_r11 = 0;
643 643 lxrp->lxur_r10 = 0;
644 644 lxrp->lxur_r9 = 0;
645 645 lxrp->lxur_r8 = 0;
646 646 lxrp->lxur_rbp = LX_REG(&uc, EBP);
647 647 lxrp->lxur_rbx = LX_REG(&uc, EBX);
648 648 lxrp->lxur_rax = LX_REG(&uc, EAX);
649 649 lxrp->lxur_orig_rax = 0;
650 650 lxrp->lxur_rcx = LX_REG(&uc, ECX);
651 651 lxrp->lxur_rdx = LX_REG(&uc, EDX);
652 652 lxrp->lxur_rsi = LX_REG(&uc, ESI);
653 653 lxrp->lxur_rdi = LX_REG(&uc, EDI);
654 654 lxrp->lxur_rip = LX_REG(&uc, EIP);
655 655
656 656 lxrp->lxur_rflags = LX_REG(&uc, EFL);
657 657 lxrp->lxur_rsp = LX_REG(&uc, UESP);
658 658 lxrp->lxur_xss = LX_REG(&uc, SS);
659 659 lxrp->lxur_xfs_base = 0;
660 660 lxrp->lxur_xgs_base = 0;
661 661
662 662 lxrp->lxur_xds = LX_REG(&uc, DS);
663 663 lxrp->lxur_xes = LX_REG(&uc, ES);
664 664 lxrp->lxur_xfs = LX_REG(&uc, FS);
665 665 lxrp->lxur_xgs = LX_REG(&uc, GS);
666 666
667 667 /* See comment above re: %cs register */
668 668 lxrp->lxur_xcs = LX_CS_32BIT;
669 669 return (0);
670 670 }
671 671
672 672 default:
673 673 break;
674 674 }
675 675
676 676 return (-1);
677 677 }
678 678
679 679 static int
680 680 lx_get_user_regs64(lx_lwp_data_t *lwpd, lx_user_regs64_t *lxrp)
681 681 {
682 682 klwp_t *lwp = lwpd->br_lwp;
683 683 struct regs *rp = lwptoregs(lwp);
684 684 struct pcb *pcb = &lwp->lwp_pcb;
685 685 void *ucp;
686 686
687 687 switch (lx_regs_location(lwpd, &ucp, B_FALSE)) {
688 688 case LX_REG_LOC_UNAVAIL:
689 689 return (-1);
690 690
691 691 case LX_REG_LOC_UCP:
692 692 return (lx_get_user_regs64_uc(lwp, ucp, lxrp));
693 693
694 694 case LX_REG_LOC_LWP:
695 695 /* transformation below */
696 696 break;
697 697
698 698 default:
699 699 VERIFY(0);
700 700 break;
701 701 }
702 702
703 703 lxrp->lxur_r15 = rp->r_r15;
704 704 lxrp->lxur_r14 = rp->r_r14;
705 705 lxrp->lxur_r13 = rp->r_r13;
706 706 lxrp->lxur_r12 = rp->r_r12;
707 707 lxrp->lxur_rbp = rp->r_rbp;
708 708 lxrp->lxur_rbx = rp->r_rbx;
709 709 lxrp->lxur_r11 = rp->r_r11;
710 710 lxrp->lxur_r10 = rp->r_r10;
711 711 lxrp->lxur_r9 = rp->r_r9;
712 712 lxrp->lxur_r8 = rp->r_r8;
713 713 lxrp->lxur_rax = rp->r_rax;
714 714 lxrp->lxur_rcx = rp->r_rcx;
715 715 lxrp->lxur_rdx = rp->r_rdx;
716 716 lxrp->lxur_rsi = rp->r_rsi;
717 717 lxrp->lxur_rdi = rp->r_rdi;
718 718 lxrp->lxur_orig_rax = 0;
719 719 lxrp->lxur_rip = rp->r_rip;
720 720
721 721 lxrp->lxur_rflags = rp->r_rfl;
722 722 lxrp->lxur_rsp = rp->r_rsp;
723 723 lxrp->lxur_xss = rp->r_ss;
724 724 lxrp->lxur_xfs_base = pcb->pcb_fsbase;
725 725 lxrp->lxur_xgs_base = pcb->pcb_gsbase;
726 726
727 727 /* emulated %cs, see defines */
728 728 switch (lwp_getdatamodel(lwp)) {
729 729 case DATAMODEL_LP64:
730 730 lxrp->lxur_xcs = LX_CS_64BIT;
731 731 break;
732 732 case DATAMODEL_ILP32:
733 733 lxrp->lxur_xcs = LX_CS_32BIT;
734 734 break;
735 735 default:
736 736 VERIFY(0);
737 737 break;
738 738 }
739 739
740 740 kpreempt_disable();
741 741 if (PCB_NEED_UPDATE_SEGS(pcb)) {
742 742 lxrp->lxur_xds = pcb->pcb_ds;
743 743 lxrp->lxur_xes = pcb->pcb_es;
744 744 lxrp->lxur_xfs = pcb->pcb_fs;
745 745 lxrp->lxur_xgs = pcb->pcb_gs;
746 746 } else {
747 747 lxrp->lxur_xds = rp->r_ds;
748 748 lxrp->lxur_xes = rp->r_es;
749 749 lxrp->lxur_xfs = rp->r_fs;
750 750 lxrp->lxur_xgs = rp->r_gs;
751 751 }
752 752 kpreempt_enable();
753 753
754 754 if (lwpd->br_ptrace_whatstop == LX_PR_SYSENTRY) {
755 755 lxrp->lxur_rax = -lx_errno(ENOTSUP, EINVAL);
756 756 lxrp->lxur_orig_rax = lwpd->br_syscall_num;
757 757 } else if (lwpd->br_ptrace_whatstop == LX_PR_SYSEXIT) {
758 758 lxrp->lxur_orig_rax = lwpd->br_syscall_num;
759 759 }
760 760
761 761 return (0);
762 762 }
763 763
764 764 static int
765 765 lx_set_user_regs64_uc(klwp_t *lwp, void *ucp, lx_user_regs64_t *lxrp)
766 766 {
767 767 proc_t *p = lwptoproc(lwp);
768 768
769 769 switch (lwp_getdatamodel(lwp)) {
770 770 case DATAMODEL_LP64: {
771 771 ucontext_t uc;
772 772
773 773 if (lx_read_uc(p, ucp, &uc, sizeof (uc)) != 0) {
774 774 return (-1);
775 775 }
776 776
777 777 /*
778 778 * Note: we currently ignore "lxur_orig_rax" here since this
779 779 * path should not be used for system call stops.
780 780 */
781 781 LX_REG(&uc, REG_R15) = lxrp->lxur_r15;
782 782 LX_REG(&uc, REG_R14) = lxrp->lxur_r14;
783 783 LX_REG(&uc, REG_R13) = lxrp->lxur_r13;
784 784 LX_REG(&uc, REG_R12) = lxrp->lxur_r12;
785 785 LX_REG(&uc, REG_RBP) = lxrp->lxur_rbp;
786 786 LX_REG(&uc, REG_RBX) = lxrp->lxur_rbx;
787 787 LX_REG(&uc, REG_R11) = lxrp->lxur_r11;
788 788 LX_REG(&uc, REG_R10) = lxrp->lxur_r10;
789 789 LX_REG(&uc, REG_R9) = lxrp->lxur_r9;
790 790 LX_REG(&uc, REG_R8) = lxrp->lxur_r8;
791 791 LX_REG(&uc, REG_RAX) = lxrp->lxur_rax;
792 792 LX_REG(&uc, REG_RCX) = lxrp->lxur_rcx;
793 793 LX_REG(&uc, REG_RDX) = lxrp->lxur_rdx;
794 794 LX_REG(&uc, REG_RSI) = lxrp->lxur_rsi;
795 795 LX_REG(&uc, REG_RDI) = lxrp->lxur_rdi;
796 796 LX_REG(&uc, REG_RIP) = lxrp->lxur_rip;
797 797 LX_REG(&uc, REG_RFL) = PSLMERGE(LX_REG(&uc, REG_RFL),
798 798 lxrp->lxur_rflags);
799 799 LX_REG(&uc, REG_RSP) = lxrp->lxur_rsp;
800 800 LX_REG(&uc, REG_SS) = fix_segreg(lxrp->lxur_xss, IS_NOT_CS,
801 801 DATAMODEL_LP64);
802 802 LX_REG(&uc, REG_FSBASE) = lxrp->lxur_xfs_base;
803 803 LX_REG(&uc, REG_GSBASE) = lxrp->lxur_xgs_base;
804 804
805 805 /* %cs is ignored because of our lies */
806 806 LX_REG(&uc, REG_DS) = fix_segreg(lxrp->lxur_xds, IS_NOT_CS,
807 807 DATAMODEL_LP64);
808 808 LX_REG(&uc, REG_ES) = fix_segreg(lxrp->lxur_xes, IS_NOT_CS,
809 809 DATAMODEL_LP64);
810 810 LX_REG(&uc, REG_FS) = fix_segreg(lxrp->lxur_xfs, IS_NOT_CS,
811 811 DATAMODEL_LP64);
812 812 LX_REG(&uc, REG_GS) = fix_segreg(lxrp->lxur_xgs, IS_NOT_CS,
813 813 DATAMODEL_LP64);
814 814
815 815 if (lx_write_uc(p, ucp, &uc, sizeof (uc)) != 0) {
816 816 return (-1);
817 817 }
818 818
819 819 return (0);
820 820 }
821 821
822 822 case DATAMODEL_ILP32: {
823 823 ucontext32_t uc;
824 824
825 825 if (lx_read_uc(p, ucp, &uc, sizeof (uc)) != 0) {
826 826 return (-1);
827 827 }
828 828
829 829 /*
830 830 * Note: we currently ignore "lxur_orig_rax" here since this
831 831 * path should not be used for system call stops.
832 832 */
833 833 LX_REG(&uc, EBP) = (int32_t)lxrp->lxur_rbp;
834 834 LX_REG(&uc, EBX) = (int32_t)lxrp->lxur_rbx;
835 835 LX_REG(&uc, EAX) = (int32_t)lxrp->lxur_rax;
836 836 LX_REG(&uc, ECX) = (int32_t)lxrp->lxur_rcx;
837 837 LX_REG(&uc, EDX) = (int32_t)lxrp->lxur_rdx;
838 838 LX_REG(&uc, ESI) = (int32_t)lxrp->lxur_rsi;
839 839 LX_REG(&uc, EDI) = (int32_t)lxrp->lxur_rdi;
840 840 LX_REG(&uc, EIP) = (int32_t)lxrp->lxur_rip;
841 841 LX_REG(&uc, EFL) = (int32_t)PSLMERGE(LX_REG(&uc, EFL),
842 842 lxrp->lxur_rflags);
843 843 LX_REG(&uc, UESP) = (int32_t)lxrp->lxur_rsp;
844 844 LX_REG(&uc, SS) = (int32_t)fix_segreg(lxrp->lxur_xss,
845 845 IS_NOT_CS, DATAMODEL_ILP32);
846 846
847 847 /* %cs is ignored because of our lies */
848 848 LX_REG(&uc, DS) = (int32_t)fix_segreg(lxrp->lxur_xds,
849 849 IS_NOT_CS, DATAMODEL_ILP32);
850 850 LX_REG(&uc, ES) = (int32_t)fix_segreg(lxrp->lxur_xes,
851 851 IS_NOT_CS, DATAMODEL_ILP32);
852 852 LX_REG(&uc, FS) = (int32_t)fix_segreg(lxrp->lxur_xfs,
853 853 IS_NOT_CS, DATAMODEL_ILP32);
854 854 LX_REG(&uc, GS) = (int32_t)fix_segreg(lxrp->lxur_xgs,
855 855 IS_NOT_CS, DATAMODEL_ILP32);
856 856
857 857 if (lx_write_uc(p, ucp, &uc, sizeof (uc)) != 0) {
858 858 return (-1);
859 859 }
860 860 return (0);
861 861 }
862 862
863 863 default:
864 864 break;
865 865 }
866 866
867 867 return (-1);
868 868 }
869 869
870 870 static int
871 871 lx_set_user_regs64(lx_lwp_data_t *lwpd, lx_user_regs64_t *lxrp)
872 872 {
873 873 klwp_t *lwp = lwpd->br_lwp;
874 874 struct regs *rp = lwptoregs(lwp);
875 875 struct pcb *pcb = &lwp->lwp_pcb;
876 876 void *ucp;
877 877
878 878 switch (lx_regs_location(lwpd, &ucp, B_TRUE)) {
879 879 case LX_REG_LOC_UNAVAIL:
880 880 return (-1);
881 881
882 882 case LX_REG_LOC_UCP:
883 883 return (lx_set_user_regs64_uc(lwp, ucp, lxrp));
884 884
885 885 case LX_REG_LOC_LWP:
886 886 /* transformation below */
887 887 break;
888 888
889 889 default:
890 890 VERIFY(0);
891 891 break;
892 892 }
893 893
894 894 rp->r_r15 = lxrp->lxur_r15;
895 895 rp->r_r14 = lxrp->lxur_r14;
896 896 rp->r_r13 = lxrp->lxur_r13;
897 897 rp->r_r12 = lxrp->lxur_r12;
898 898 rp->r_rbp = lxrp->lxur_rbp;
899 899 rp->r_rbx = lxrp->lxur_rbx;
900 900 rp->r_r11 = lxrp->lxur_r11;
901 901 rp->r_r10 = lxrp->lxur_r10;
902 902 rp->r_r9 = lxrp->lxur_r9;
903 903 rp->r_r8 = lxrp->lxur_r8;
904 904 rp->r_rax = lxrp->lxur_rax;
905 905 rp->r_rcx = lxrp->lxur_rcx;
906 906 rp->r_rdx = lxrp->lxur_rdx;
907 907 rp->r_rsi = lxrp->lxur_rsi;
908 908 rp->r_rdi = lxrp->lxur_rdi;
909 909 lwpd->br_syscall_num = (int)lxrp->lxur_orig_rax;
910 910 rp->r_rip = lxrp->lxur_rip;
911 911 rp->r_rfl = PSLMERGE(rp->r_rfl, lxrp->lxur_rflags);
912 912 rp->r_rsp = lxrp->lxur_rsp;
913 913 rp->r_ss = fix_segreg(lxrp->lxur_xss, IS_NOT_CS, DATAMODEL_LP64);
914 914 pcb->pcb_fsbase = lxrp->lxur_xfs_base;
915 915 pcb->pcb_gsbase = lxrp->lxur_xgs_base;
916 916
917 917 kpreempt_disable();
918 918 PCB_SET_UPDATE_SEGS(pcb);
919 919 pcb->pcb_ds = fix_segreg(lxrp->lxur_xds, IS_NOT_CS, DATAMODEL_LP64);
920 920 pcb->pcb_es = fix_segreg(lxrp->lxur_xes, IS_NOT_CS, DATAMODEL_LP64);
921 921 pcb->pcb_fs = fix_segreg(lxrp->lxur_xfs, IS_NOT_CS, DATAMODEL_LP64);
922 922 pcb->pcb_gs = fix_segreg(lxrp->lxur_xgs, IS_NOT_CS, DATAMODEL_LP64);
923 923 kpreempt_enable();
924 924
925 925 return (0);
926 926 }
927 927
928 928 #endif /* __amd64 */
929 929
930 930 static int
931 931 lx_peekuser32(lx_lwp_data_t *lwpd, uintptr_t offset, uint32_t *res)
932 932 {
933 933 lx_user32_t lxu;
934 934 boolean_t valid = B_FALSE;
935 935
936 936 bzero(&lxu, sizeof (lxu));
937 937 if (offset < sizeof (lx_user_regs32_t)) {
938 938 if (lx_get_user_regs32(lwpd, &lxu.lxu_regs) == 0) {
939 939 valid = B_TRUE;
940 940 }
941 941 }
942 942 if (valid) {
943 943 uint32_t *data = (uint32_t *)&lxu;
944 944 *res = data[offset / sizeof (uint32_t)];
945 945 return (0);
946 946 }
947 947 return (-1);
948 948 }
949 949
950 950 #ifdef __amd64
951 951 static int
952 952 lx_peekuser64(lx_lwp_data_t *lwpd, uintptr_t offset, uintptr_t *res)
953 953 {
954 954 lx_user64_t lxu;
955 955 boolean_t valid = B_FALSE;
956 956
957 957 bzero(&lxu, sizeof (lxu));
958 958 if (offset < sizeof (lx_user_regs64_t)) {
959 959 if (lx_get_user_regs64(lwpd, &lxu.lxu_regs) == 0) {
960 960 valid = B_TRUE;
961 961 }
962 962 }
963 963 if (valid) {
964 964 uintptr_t *data = (uintptr_t *)&lxu;
965 965 *res = data[offset / sizeof (uintptr_t)];
966 966 return (0);
967 967 }
968 968 return (-1);
969 969 }
970 970 #endif /* __amd64 */
971 971
972 972 int
973 973 lx_user_regs_copyin(lx_lwp_data_t *lwpd, void *uregsp)
974 974 {
975 975 model_t target_model = lwp_getdatamodel(lwpd->br_lwp);
976 976
977 977 switch (get_udatamodel()) {
978 978 case DATAMODEL_ILP32:
979 979 if (target_model == DATAMODEL_ILP32) {
980 980 lx_user_regs32_t regs;
981 981
982 982 if (copyin(uregsp, ®s, sizeof (regs)) != 0) {
983 983 return (EFAULT);
984 984 }
985 985 if (lx_set_user_regs32(lwpd, ®s) != 0) {
986 986 return (EIO);
987 987 }
988 988 return (0);
989 989 }
990 990 break;
991 991
992 992 #ifdef __amd64
993 993 case DATAMODEL_LP64:
994 994 if (target_model == DATAMODEL_ILP32 ||
995 995 target_model == DATAMODEL_LP64) {
996 996 lx_user_regs64_t regs;
997 997
998 998 if (copyin(uregsp, ®s, sizeof (regs)) != 0) {
999 999 return (EFAULT);
1000 1000 }
1001 1001 if (lx_set_user_regs64(lwpd, ®s) != 0) {
1002 1002 return (EIO);
1003 1003 }
1004 1004 return (0);
1005 1005 }
1006 1006 break;
1007 1007 #endif /* __amd64 */
1008 1008
1009 1009 default:
1010 1010 break;
1011 1011 }
1012 1012 return (EIO);
1013 1013 }
1014 1014
1015 1015 int
1016 1016 lx_user_regs_copyout(lx_lwp_data_t *lwpd, void *uregsp)
1017 1017 {
1018 1018 model_t target_model = lwp_getdatamodel(lwpd->br_lwp);
1019 1019
1020 1020 switch (get_udatamodel()) {
1021 1021 case DATAMODEL_ILP32:
1022 1022 if (target_model == DATAMODEL_ILP32) {
1023 1023 lx_user_regs32_t regs;
1024 1024
1025 1025 if (lx_get_user_regs32(lwpd, ®s) != 0) {
1026 1026 return (EIO);
1027 1027 }
1028 1028 if (copyout(®s, uregsp, sizeof (regs)) != 0) {
1029 1029 return (EFAULT);
1030 1030 }
1031 1031 return (0);
1032 1032 }
1033 1033 break;
1034 1034
1035 1035 #ifdef __amd64
1036 1036 case DATAMODEL_LP64:
1037 1037 if (target_model == DATAMODEL_ILP32 ||
1038 1038 target_model == DATAMODEL_LP64) {
1039 1039 lx_user_regs64_t regs;
1040 1040
1041 1041 if (lx_get_user_regs64(lwpd, ®s) != 0) {
1042 1042 return (EIO);
1043 1043 }
1044 1044 if (copyout(®s, uregsp, sizeof (regs)) != 0) {
1045 1045 return (EFAULT);
1046 1046 }
1047 1047 return (0);
1048 1048 }
1049 1049 break;
1050 1050 #endif /* __amd64 */
1051 1051
1052 1052 default:
1053 1053 break;
1054 1054 }
1055 1055 return (EIO);
1056 1056 }
1057 1057
1058 1058 int
1059 1059 lx_user_fpregs_copyin(lx_lwp_data_t *lwpd, void *uregsp)
1060 1060 {
1061 1061 model_t target_model = lwp_getdatamodel(lwpd->br_lwp);
1062 1062
1063 1063 switch (get_udatamodel()) {
1064 1064 case DATAMODEL_ILP32:
1065 1065 if (target_model == DATAMODEL_ILP32) {
1066 1066 lx_user_fpregs32_t regs;
1067 1067
1068 1068 if (copyin(uregsp, ®s, sizeof (regs)) != 0) {
1069 1069 return (EFAULT);
1070 1070 }
1071 1071 lx_setfpregs32(lwpd, ®s);
1072 1072 return (0);
1073 1073 }
1074 1074 break;
1075 1075
1076 1076 #ifdef __amd64
1077 1077 case DATAMODEL_LP64:
1078 1078 if (target_model == DATAMODEL_ILP32 ||
1079 1079 target_model == DATAMODEL_LP64) {
1080 1080 lx_user_fpregs64_t regs;
1081 1081
1082 1082 if (copyin(uregsp, ®s, sizeof (regs)) != 0) {
1083 1083 return (EFAULT);
1084 1084 }
1085 1085 lx_setfpregs64(lwpd, ®s);
1086 1086 return (0);
1087 1087 }
1088 1088 break;
1089 1089 #endif /* __amd64 */
1090 1090
1091 1091 default:
1092 1092 break;
1093 1093 }
1094 1094 return (EIO);
1095 1095 }
1096 1096
1097 1097 int
1098 1098 lx_user_fpregs_copyout(lx_lwp_data_t *lwpd, void *uregsp)
1099 1099 {
1100 1100 model_t target_model = lwp_getdatamodel(lwpd->br_lwp);
1101 1101
1102 1102 switch (get_udatamodel()) {
1103 1103 case DATAMODEL_ILP32:
1104 1104 if (target_model == DATAMODEL_ILP32) {
1105 1105 lx_user_fpregs32_t regs;
1106 1106
1107 1107 lx_getfpregs32(lwpd, ®s);
1108 1108 if (copyout(®s, uregsp, sizeof (regs)) != 0) {
1109 1109 return (EFAULT);
1110 1110 }
1111 1111 return (0);
1112 1112 }
1113 1113 break;
1114 1114
1115 1115 #ifdef __amd64
1116 1116 case DATAMODEL_LP64:
1117 1117 if (target_model == DATAMODEL_ILP32 ||
1118 1118 target_model == DATAMODEL_LP64) {
1119 1119 lx_user_fpregs64_t regs;
1120 1120
1121 1121 lx_getfpregs64(lwpd, ®s);
1122 1122 if (copyout(®s, uregsp, sizeof (regs)) != 0) {
1123 1123 return (EFAULT);
1124 1124 }
1125 1125 return (0);
1126 1126 }
1127 1127 break;
1128 1128 #endif /* __amd64 */
1129 1129
1130 1130 default:
1131 1131 break;
1132 1132 }
1133 1133 return (EIO);
1134 1134 }
1135 1135
1136 1136 /* ARGSUSED */
1137 1137 int
1138 1138 lx_user_fpxregs_copyin(lx_lwp_data_t *lwpd, void *uregsp)
1139 1139 {
1140 1140 /* Punt on fpxregs for now */
1141 1141 return (EIO);
1142 1142 }
1143 1143
1144 1144 /* ARGSUSED */
1145 1145 int
1146 1146 lx_user_fpxregs_copyout(lx_lwp_data_t *lwpd, void *uregsp)
1147 1147 {
1148 1148 /* Punt on fpxregs for now */
1149 1149 return (EIO);
1150 1150 }
1151 1151
1152 1152 int
1153 1153 lx_ptrace_peekuser(lx_lwp_data_t *lwpd, uintptr_t offset, void *uptr)
1154 1154 {
1155 1155 model_t target_model = lwp_getdatamodel(lwpd->br_lwp);
1156 1156
1157 1157 switch (get_udatamodel()) {
1158 1158 case DATAMODEL_ILP32:
1159 1159 if ((offset & (sizeof (uint32_t) - 1)) != 0) {
1160 1160 /* Must be aligned to 32bit boundary */
1161 1161 break;
1162 1162 }
1163 1163 if (target_model == DATAMODEL_ILP32) {
1164 1164 uint32_t res;
1165 1165
1166 1166 if (lx_peekuser32(lwpd, offset, &res) != 0) {
1167 1167 return (EIO);
1168 1168 }
1169 1169 if (copyout(&res, uptr, sizeof (res)) != 0) {
1170 1170 return (EFAULT);
1171 1171 }
1172 1172 return (0);
1173 1173 }
1174 1174 break;
1175 1175
1176 1176 #ifdef __amd64
1177 1177 case DATAMODEL_LP64:
1178 1178 if ((offset & (sizeof (uintptr_t) - 1)) != 0) {
1179 1179 /* Must be aligned to 64bit boundary */
1180 1180 break;
1181 1181 }
1182 1182 if (target_model == DATAMODEL_ILP32 ||
1183 1183 target_model == DATAMODEL_LP64) {
1184 1184 uintptr_t res;
1185 1185
1186 1186 if (lx_peekuser64(lwpd, offset, &res) != 0) {
1187 1187 return (EIO);
1188 1188 }
1189 1189 if (copyout(&res, uptr, sizeof (res)) != 0) {
1190 1190 return (EFAULT);
1191 1191 }
1192 1192 return (0);
1193 1193 }
1194 1194 break;
1195 1195 #endif /* __amd64 */
1196 1196
1197 1197 default:
1198 1198 break;
1199 1199 }
1200 1200 return (EIO);
1201 1201 }
1202 1202
1203 1203 /* ARGSUSED */
1204 1204 int
1205 1205 lx_ptrace_pokeuser(lx_lwp_data_t *lwpd, uintptr_t offset, void *uptr)
1206 1206 {
1207 1207 return (EIO);
1208 1208 }
1209 1209
1210 1210
1211 1211 /*
1212 1212 * Load registers and repoint the stack and program counter. This function is
1213 1213 * used by the B_JUMP_TO_LINUX brand system call to revector to a Linux
1214 1214 * entrypoint.
1215 1215 */
1216 1216 int
1217 1217 lx_runexe(klwp_t *lwp, void *ucp)
1218 1218 {
1219 1219 lx_lwp_data_t *lwpd = lwptolxlwp(lwp);
1220 1220
1221 1221 /*
1222 1222 * We should only make it here when transitioning to Linux from
1223 1223 * the NATIVE or INIT mode.
1224 1224 */
1225 1225 VERIFY(lwpd->br_stack_mode == LX_STACK_MODE_NATIVE ||
1226 1226 lwpd->br_stack_mode == LX_STACK_MODE_INIT);
1227 1227
1228 1228 #if defined(__amd64)
1229 1229 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
1230 1230 struct pcb *pcb = &lwp->lwp_pcb;
1231 1231
1232 1232 /*
1233 1233 * Preserve the %fs/%gsbase value for this LWP, as set and used
1234 1234 * by native illumos code.
1235 1235 */
1236 1236 lwpd->br_ntv_fsbase = pcb->pcb_fsbase;
1237 1237 lwpd->br_ntv_gsbase = pcb->pcb_gsbase;
1238 1238
1239 1239 return (getsetcontext(SETCONTEXT, ucp));
1240 1240 } else {
1241 1241 return (getsetcontext32(SETCONTEXT, ucp));
1242 1242 }
1243 1243 #else
1244 1244 return (getsetcontext(SETCONTEXT, ucp));
1245 1245 #endif
1246 1246 }
1247 1247
1248 1248 /*
1249 1249 * The usermode emulation code is illumos library code. This routine ensures
1250 1250 * the segment registers are set up correctly for native illumos code. It
1251 1251 * should be called _after_ we have stored the outgoing Linux machine state
1252 1252 * but _before_ we return from the kernel to any illumos native code; e.g. the
1253 1253 * usermode emulation library, or any interposed signal handlers.
1254 1254 *
1255 1255 * See the comment on lwp_segregs_save() for how we handle the usermode
1256 1256 * registers when we come into the kernel and see update_sregs() for how we
1257 1257 * restore.
1258 1258 */
1259 1259 void
1260 1260 lx_switch_to_native(klwp_t *lwp)
1261 1261 {
1262 1262 #if defined(__amd64)
1263 1263 model_t datamodel = lwp_getdatamodel(lwp);
1264 1264
1265 1265 switch (datamodel) {
1266 1266 case DATAMODEL_ILP32: {
1267 1267 struct pcb *pcb = &lwp->lwp_pcb;
1268 1268
1269 1269 /*
1270 1270 * For 32-bit processes, we ensure that the correct %gs value
1271 1271 * is loaded:
1272 1272 */
1273 1273 kpreempt_disable();
1274 1274 if (PCB_NEED_UPDATE_SEGS(pcb)) {
1275 1275 /*
1276 1276 * If we are already flushing the segment registers,
1277 1277 * then ensure we are flushing the native %gs.
1278 1278 */
1279 1279 pcb->pcb_gs = LWPGS_SEL;
1280 1280 } else {
1281 1281 struct regs *rp = lwptoregs(lwp);
1282 1282
1283 1283 /*
1284 1284 * If we are not flushing the segment registers yet,
1285 1285 * only do so if %gs is not correct already:
1286 1286 */
1287 1287 if (rp->r_gs != LWPGS_SEL) {
1288 1288 pcb->pcb_gs = LWPGS_SEL;
1289 1289
1290 1290 /*
1291 1291 * Ensure we go out via update_sregs.
1292 1292 */
1293 1293 PCB_SET_UPDATE_SEGS(pcb);
1294 1294 }
1295 1295 }
1296 1296 kpreempt_enable();
1297 1297 break;
1298 1298 }
1299 1299
1300 1300 case DATAMODEL_LP64: {
1301 1301 lx_lwp_data_t *lwpd = lwptolxlwp(lwp);
1302 1302
1303 1303 /*
1304 1304 * For 64-bit processes we ensure that the correct %fsbase
1305 1305 * value is loaded:
1306 1306 */
1307 1307 if (lwpd->br_ntv_fsbase != 0) {
1308 1308 struct pcb *pcb = &lwp->lwp_pcb;
1309 1309
1310 1310 kpreempt_disable();
1311 1311 if (pcb->pcb_fsbase != lwpd->br_ntv_fsbase) {
1312 1312 pcb->pcb_fsbase = lwpd->br_ntv_fsbase;
1313 1313
1314 1314 /*
1315 1315 * Ensure we go out via update_sregs.
1316 1316 */
1317 1317 PCB_SET_UPDATE_SEGS(pcb);
1318 1318 }
1319 1319 kpreempt_enable();
1320 1320 }
1321 1321 /*
1322 1322 * ... and the correct %gsbase
1323 1323 */
1324 1324 if (lwpd->br_ntv_gsbase != 0) {
1325 1325 struct pcb *pcb = &lwp->lwp_pcb;
1326 1326
1327 1327 kpreempt_disable();
1328 1328 if (pcb->pcb_gsbase != lwpd->br_ntv_gsbase) {
1329 1329 pcb->pcb_gsbase = lwpd->br_ntv_gsbase;
1330 1330
1331 1331 /*
1332 1332 * Ensure we go out via update_sregs.
1333 1333 */
1334 1334 PCB_SET_UPDATE_SEGS(pcb);
1335 1335 }
1336 1336 kpreempt_enable();
1337 1337 }
1338 1338 break;
1339 1339 }
1340 1340
1341 1341 default:
1342 1342 cmn_err(CE_PANIC, "unknown data model: %d", datamodel);
1343 1343 }
1344 1344 #elif defined(__i386)
1345 1345 struct regs *rp = lwptoregs(lwp);
1346 1346
1347 1347 rp->r_gs = LWPGS_SEL;
1348 1348 #else
1349 1349 #error "unknown x86"
1350 1350 #endif
1351 1351 }
1352 1352
1353 1353 #if defined(__amd64)
1354 1354 /*
1355 1355 * Call frame for the 64-bit usermode emulation handler:
1356 1356 * lx_emulate(ucontext_t *ucp, int syscall_num, uintptr_t *args)
1357 1357 *
1358 1358 * old sp: --------------------------------------------------------------
1359 1359 * | - ucontext_t (register state for emulation)
1360 1360 * | - uintptr_t[6] (system call arguments array)
1361 1361 * V --------------------------------------------------------------
1362 1362 * new sp: - bogus return address
1363 1363 *
1364 1364 * Arguments are passed in registers, per the AMD64 ABI: %rdi, %rsi and %rdx.
1365 1365 */
1366 1366 void
1367 1367 lx_emulate_user(klwp_t *lwp, int syscall_num, uintptr_t *args)
1368 1368 {
1369 1369 lx_lwp_data_t *lwpd = lwptolxlwp(lwp);
1370 1370 struct regs *rp = lwptoregs(lwp);
1371 1371 label_t lab;
1372 1372 uintptr_t uc_addr;
1373 1373 uintptr_t args_addr;
1374 1374 uintptr_t top;
1375 1375 /*
1376 1376 * Variables used after on_fault() returns for a fault
1377 1377 * must be volatile.
1378 1378 */
1379 1379 volatile size_t frsz;
1380 1380 volatile uintptr_t sp;
1381 1381 volatile proc_t *p = lwptoproc(lwp);
1382 1382 volatile int watched;
1383 1383
1384 1384 /*
1385 1385 * We should not be able to get here unless we are running Linux
1386 1386 * code for a system call we cannot emulate in the kernel.
1387 1387 */
1388 1388 VERIFY(lwpd->br_stack_mode == LX_STACK_MODE_BRAND);
1389 1389
1390 1390 /*
1391 1391 * The AMD64 ABI requires us to align the return address on the stack
1392 1392 * so that when the called function pushes %rbp, the stack is 16-byte
1393 1393 * aligned.
1394 1394 *
1395 1395 * This routine, like the amd64 version of sendsig(), depends on
1396 1396 * STACK_ALIGN being 16 and STACK_ENTRY_ALIGN being 8.
1397 1397 */
1398 1398 #if STACK_ALIGN != 16 || STACK_ENTRY_ALIGN != 8
1399 1399 #error "lx_emulate_user() amd64 did not find the expected stack alignments"
1400 1400 #endif
1401 1401
1402 1402 /*
1403 1403 * We begin at the current native stack pointer, and reserve space for
1404 1404 * the ucontext_t we are copying onto the stack, as well as the call
1405 1405 * arguments for the usermode emulation handler.
1406 1406 *
1407 1407 * We 16-byte align the entire frame, and then unalign it again by
1408 1408 * adding space for the return address.
1409 1409 */
1410 1410 frsz = SA(sizeof (ucontext_t)) + SA(6 * sizeof (uintptr_t)) +
1411 1411 sizeof (uintptr_t);
1412 1412 VERIFY((frsz & (STACK_ALIGN - 1UL)) == 8);
1413 1413 VERIFY((frsz & (STACK_ENTRY_ALIGN - 1UL)) == 0);
1414 1414
1415 1415 if (lwpd->br_ntv_stack == lwpd->br_ntv_stack_current) {
1416 1416 /*
1417 1417 * Nobody else is using the stack right now, so start at the
1418 1418 * top.
1419 1419 */
1420 1420 top = lwpd->br_ntv_stack_current;
1421 1421 } else {
1422 1422 /*
1423 1423 * Drop below the 128-byte reserved region of the stack frame
1424 1424 * we are interrupting.
1425 1425 */
1426 1426 top = lwpd->br_ntv_stack_current - STACK_RESERVE;
1427 1427 }
1428 1428 top = top & ~(STACK_ALIGN - 1);
1429 1429 sp = top - frsz;
1430 1430
1431 1431 uc_addr = top - SA(sizeof (ucontext_t));
1432 1432 args_addr = uc_addr - SA(6 * sizeof (uintptr_t));
1433 1433
1434 1434 watched = watch_disable_addr((caddr_t)sp, frsz, S_WRITE);
1435 1435
1436 1436 /*
1437 1437 * Save the register state we preserved on the way into this brand
1438 1438 * system call and drop it on the native stack.
1439 1439 */
1440 1440 {
1441 1441 /*
|
↓ open down ↓ |
1441 lines elided |
↑ open up ↑ |
1442 1442 * Note: the amd64 ucontext_t is 864 bytes.
1443 1443 */
1444 1444 ucontext_t uc;
1445 1445
1446 1446 /*
1447 1447 * We do not want to save the signal mask for an emulation
1448 1448 * context. Some emulated system calls alter the signal mask;
1449 1449 * restoring it when the emulation is complete would clobber
1450 1450 * those intentional side effects.
1451 1451 */
1452 - savecontext(&uc, NULL);
1452 + /* XXX KEBE ASKS PLUMB UP EXTENDED?!? */
1453 + savecontext(&uc, NULL, 0);
1453 1454
1454 1455 if (on_fault(&lab)) {
1455 1456 goto badstack;
1456 1457 }
1457 1458
1458 1459 /*
1459 1460 * Mark this as a system call emulation context:
1460 1461 */
1461 1462 uc.uc_brand_data[0] = (void *)((uintptr_t)
1462 1463 uc.uc_brand_data[0] | LX_UC_FRAME_IS_SYSCALL);
1463 1464
1464 1465 copyout_noerr(&uc, (void *)(uintptr_t)uc_addr, sizeof (uc));
1465 1466 }
1466 1467
1467 1468 DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
1468 1469 uintptr_t, lwp->lwp_oldcontext, uintptr_t, uc_addr);
1469 1470 lwp->lwp_oldcontext = (uintptr_t)uc_addr;
1470 1471
1471 1472 /*
1472 1473 * Copy the system call arguments out to userland:
1473 1474 */
1474 1475 copyout_noerr(args, (void *)(uintptr_t)args_addr,
1475 1476 6 * sizeof (uintptr_t));
1476 1477
1477 1478 /*
1478 1479 * Drop the bogus return address on the stack.
1479 1480 */
1480 1481 suword64_noerr((void *)sp, 0);
1481 1482
1482 1483 no_fault();
1483 1484 if (watched) {
1484 1485 watch_enable_addr((caddr_t)sp, frsz, S_WRITE);
1485 1486 }
1486 1487
1487 1488 /*
1488 1489 * Pass the arguments to lx_emulate() in the appropriate registers.
1489 1490 */
1490 1491 rp->r_rdi = uc_addr;
1491 1492 rp->r_rsi = syscall_num;
1492 1493 rp->r_rdx = args_addr;
1493 1494
1494 1495 /*
1495 1496 * In order to be able to restore %edx, we need to JUSTRETURN.
1496 1497 */
1497 1498 lwp->lwp_eosys = JUSTRETURN;
1498 1499 curthread->t_post_sys = 1;
1499 1500 aston(curthread);
1500 1501
1501 1502 /*
1502 1503 * Set stack pointer and return address to the usermode emulation
1503 1504 * handler:
1504 1505 */
1505 1506 lwpd->br_stack_mode = LX_STACK_MODE_NATIVE;
1506 1507 lx_lwp_set_native_stack_current(lwpd, sp);
1507 1508
1508 1509 /*
1509 1510 * Divert execution, on our return, to the usermode emulation stack
1510 1511 * and handler:
1511 1512 */
1512 1513 rp->r_fp = 0;
1513 1514 rp->r_sp = sp;
1514 1515 rp->r_pc = ptolxproc(p)->l_handler;
1515 1516
1516 1517 /*
1517 1518 * Fix up segment registers, etc.
1518 1519 */
1519 1520 lx_switch_to_native(lwp);
1520 1521
1521 1522 return;
1522 1523
1523 1524 badstack:
1524 1525 no_fault();
1525 1526 if (watched) {
1526 1527 watch_enable_addr((caddr_t)sp, frsz, S_WRITE);
1527 1528 }
1528 1529
1529 1530 #ifdef DEBUG
1530 1531 printf("lx_emulate_user: bad native stack cmd=%s, pid=%d, sp=0x%lx\n",
1531 1532 PTOU(p)->u_comm, p->p_pid, sp);
1532 1533 #endif
1533 1534
1534 1535 exit(CLD_KILLED, SIGSEGV);
1535 1536 }
1536 1537
1537 1538 #if defined(_SYSCALL32_IMPL)
1538 1539 /*
1539 1540 * Call frame for the 32-bit usermode emulation handler:
1540 1541 * lx_emulate(ucontext_t *ucp, int syscall_num, uintptr_t *args)
1541 1542 *
1542 1543 * old sp: --------------------------------------------------------------
1543 1544 * | - ucontext_t (register state for emulation)
1544 1545 * | - uintptr_t[6] (system call arguments array)
1545 1546 * | --------------------------------------------------------------
1546 1547 * | - arg2: uintptr_t * (pointer to arguments array above)
1547 1548 * | - arg1: int (system call number)
1548 1549 * V - arg0: ucontext_t * (pointer to context above)
1549 1550 * new sp: - bogus return address
1550 1551 */
1551 1552 struct lx_emu_frame32 {
1552 1553 caddr32_t retaddr; /* 0 */
1553 1554 caddr32_t ucontextp; /* 4 */
1554 1555 int32_t syscall_num; /* 8 */
1555 1556 caddr32_t argsp; /* c */
1556 1557 };
1557 1558
1558 1559 /*
1559 1560 * This function arranges for the lwp to execute the usermode emulation handler
1560 1561 * for this system call. The mechanism is similar to signal handling, and this
1561 1562 * function is modelled on sendsig32().
1562 1563 */
1563 1564 void
1564 1565 lx_emulate_user32(klwp_t *lwp, int syscall_num, uintptr_t *args)
1565 1566 {
1566 1567 lx_lwp_data_t *lwpd = lwptolxlwp(lwp);
1567 1568 struct regs *rp = lwptoregs(lwp);
1568 1569 label_t lab;
1569 1570 caddr32_t uc_addr;
1570 1571 caddr32_t args_addr;
1571 1572 caddr32_t top;
1572 1573 /*
1573 1574 * Variables used after on_fault() returns for a fault
1574 1575 * must be volatile.
1575 1576 */
1576 1577 volatile size_t frsz;
1577 1578 volatile caddr32_t sp;
1578 1579 volatile proc_t *p = lwptoproc(lwp);
1579 1580 volatile int watched;
1580 1581
1581 1582 /*
1582 1583 * We should not be able to get here unless we are running Linux
1583 1584 * code for a system call we cannot emulate in the kernel.
1584 1585 */
1585 1586 VERIFY(lwpd->br_stack_mode == LX_STACK_MODE_BRAND);
1586 1587
1587 1588 /*
1588 1589 * We begin at the current native stack pointer, and reserve space for
1589 1590 * the ucontext_t we are copying onto the stack, as well as the call
1590 1591 * arguments for the usermode emulation handler.
1591 1592 */
1592 1593 frsz = SA32(sizeof (ucontext32_t)) + SA32(6 * sizeof (uint32_t)) +
1593 1594 SA32(sizeof (struct lx_emu_frame32));
1594 1595 VERIFY((frsz & (STACK_ALIGN32 - 1)) == 0);
1595 1596
1596 1597 top = (caddr32_t)(lwpd->br_ntv_stack_current & ~(STACK_ALIGN32 - 1));
1597 1598 sp = top - frsz;
1598 1599
1599 1600 uc_addr = top - SA32(sizeof (ucontext32_t));
1600 1601 args_addr = uc_addr - SA32(6 * sizeof (uint32_t));
1601 1602
1602 1603 watched = watch_disable_addr((caddr_t)(uintptr_t)sp, frsz, S_WRITE);
1603 1604
1604 1605 /*
1605 1606 * Save the register state we preserved on the way into this brand
1606 1607 * system call and drop it on the native stack.
1607 1608 */
1608 1609 {
1609 1610 /*
|
↓ open down ↓ |
147 lines elided |
↑ open up ↑ |
1610 1611 * Note: ucontext32_t is 512 bytes.
1611 1612 */
1612 1613 ucontext32_t uc;
1613 1614
1614 1615 /*
1615 1616 * We do not want to save the signal mask for an emulation
1616 1617 * context. Some emulated system calls alter the signal mask;
1617 1618 * restoring it when the emulation is complete would clobber
1618 1619 * those intentional side effects.
1619 1620 */
1620 - savecontext32(&uc, NULL);
1621 + /* XXX KEBE ASKS PLUMB UP EXTENDED?!? */
1622 + savecontext32(&uc, NULL, 0);
1621 1623
1622 1624 if (on_fault(&lab)) {
1623 1625 goto badstack;
1624 1626 }
1625 1627
1626 1628 /*
1627 1629 * Mark this as a system call emulation context:
1628 1630 */
1629 1631 uc.uc_brand_data[0] |= LX_UC_FRAME_IS_SYSCALL;
1630 1632 copyout_noerr(&uc, (void *)(uintptr_t)uc_addr, sizeof (uc));
1631 1633 }
1632 1634
1633 1635 DTRACE_PROBE3(oldcontext__set, klwp_t *, lwp,
1634 1636 uintptr_t, lwp->lwp_oldcontext, uintptr_t, uc_addr);
1635 1637 lwp->lwp_oldcontext = (uintptr_t)uc_addr;
1636 1638
1637 1639 /*
1638 1640 * Copy the system call arguments out to userland:
1639 1641 */
1640 1642 {
1641 1643 uint32_t args32[6];
1642 1644
1643 1645 args32[0] = args[0];
1644 1646 args32[1] = args[1];
1645 1647 args32[2] = args[2];
1646 1648 args32[3] = args[3];
1647 1649 args32[4] = args[4];
1648 1650 args32[5] = args[5];
1649 1651
1650 1652 copyout_noerr(&args32, (void *)(uintptr_t)args_addr,
1651 1653 sizeof (args32));
1652 1654 }
1653 1655
1654 1656 /*
1655 1657 * Assemble the call frame on the stack.
1656 1658 */
1657 1659 {
1658 1660 struct lx_emu_frame32 frm;
1659 1661
1660 1662 frm.retaddr = 0;
1661 1663 frm.ucontextp = uc_addr;
1662 1664 frm.argsp = args_addr;
1663 1665 frm.syscall_num = syscall_num;
1664 1666
1665 1667 copyout_noerr(&frm, (void *)(uintptr_t)sp, sizeof (frm));
1666 1668 }
1667 1669
1668 1670 no_fault();
1669 1671 if (watched) {
1670 1672 watch_enable_addr((caddr_t)(uintptr_t)sp, frsz, S_WRITE);
1671 1673 }
1672 1674
1673 1675 /*
1674 1676 * Set stack pointer and return address to the usermode emulation
1675 1677 * handler:
1676 1678 */
1677 1679 lwpd->br_stack_mode = LX_STACK_MODE_NATIVE;
1678 1680 lx_lwp_set_native_stack_current(lwpd, sp);
1679 1681
1680 1682 /*
1681 1683 * Divert execution, on our return, to the usermode emulation stack
1682 1684 * and handler:
1683 1685 */
1684 1686 rp->r_fp = 0;
1685 1687 rp->r_sp = sp;
1686 1688 rp->r_pc = ptolxproc(p)->l_handler;
1687 1689
1688 1690 /*
1689 1691 * Fix up segment registers, etc.
1690 1692 */
1691 1693 lx_switch_to_native(lwp);
1692 1694
1693 1695 return;
1694 1696
1695 1697 badstack:
1696 1698 no_fault();
1697 1699 if (watched) {
1698 1700 watch_enable_addr((caddr_t)(uintptr_t)sp, frsz, S_WRITE);
1699 1701 }
1700 1702
1701 1703 #ifdef DEBUG
1702 1704 printf("lx_emulate_user32: bad native stack cmd=%s, pid=%d, sp=0x%x\n",
1703 1705 PTOU(p)->u_comm, p->p_pid, sp);
1704 1706 #endif
1705 1707
1706 1708 exit(CLD_KILLED, SIGSEGV);
1707 1709 }
1708 1710 #endif /* _SYSCALL32_IMPL */
1709 1711
1710 1712 #else /* !__amd64 (__i386) */
1711 1713
1712 1714 /* ARGSUSED */
1713 1715 void
1714 1716 lx_emulate_user(klwp_t *lwp, int syscall_num, uintptr_t *args)
1715 1717 {
1716 1718 cmn_err(CE_WARN, "%s: no 32-bit kernel support", __FUNCTION__);
1717 1719 exit(CLD_KILLED, SIGSYS);
1718 1720 }
1719 1721
1720 1722 #endif /* __amd64 */
|
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX