Print this page
manifest
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/proc/prcontrol.c
+++ new/usr/src/uts/common/fs/proc/prcontrol.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright 2015, Joyent, Inc.
29 + * Copyright 2023 Oxide Computer Company
29 30 */
30 31
31 32 #include <sys/types.h>
32 33 #include <sys/uio.h>
33 34 #include <sys/param.h>
34 35 #include <sys/cmn_err.h>
35 36 #include <sys/cred.h>
36 37 #include <sys/policy.h>
37 38 #include <sys/debug.h>
38 39 #include <sys/errno.h>
39 40 #include <sys/file.h>
40 41 #include <sys/inline.h>
41 42 #include <sys/kmem.h>
42 43 #include <sys/proc.h>
43 44 #include <sys/brand.h>
44 45 #include <sys/regset.h>
45 46 #include <sys/sysmacros.h>
46 47 #include <sys/systm.h>
47 48 #include <sys/vfs.h>
48 49 #include <sys/vnode.h>
49 50 #include <sys/signal.h>
50 51 #include <sys/auxv.h>
51 52 #include <sys/user.h>
52 53 #include <sys/class.h>
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
53 54 #include <sys/fault.h>
54 55 #include <sys/syscall.h>
55 56 #include <sys/procfs.h>
56 57 #include <sys/zone.h>
57 58 #include <sys/copyops.h>
58 59 #include <sys/schedctl.h>
59 60 #include <vm/as.h>
60 61 #include <vm/seg.h>
61 62 #include <fs/proc/prdata.h>
62 63 #include <sys/contract/process_impl.h>
64 +#include <sys/stdalign.h>
63 65
64 66 static void pr_settrace(proc_t *, sigset_t *);
65 67 static int pr_setfpregs(prnode_t *, prfpregset_t *);
66 -#if defined(__sparc)
67 68 static int pr_setxregs(prnode_t *, prxregset_t *);
68 -static int pr_setasrs(prnode_t *, asrset_t);
69 -#endif
70 69 static int pr_setvaddr(prnode_t *, caddr_t);
71 70 static int pr_clearsig(prnode_t *);
72 71 static int pr_clearflt(prnode_t *);
73 72 static int pr_watch(prnode_t *, prwatch_t *, int *);
74 73 static int pr_agent(prnode_t *, prgregset_t, int *);
75 74 static int pr_rdwr(proc_t *, enum uio_rw, priovec_t *);
76 75 static int pr_scred(proc_t *, prcred_t *, cred_t *, boolean_t);
77 76 static int pr_spriv(proc_t *, prpriv_t *, cred_t *);
78 77 static int pr_szoneid(proc_t *, zoneid_t, cred_t *);
79 78 static void pauselwps(proc_t *);
80 79 static void unpauselwps(proc_t *);
81 80
81 +/*
82 + * This union represents the size of commands that are generally fixed size in
83 + * /proc. There are some commands that are variable size because the actual data
84 + * is structured. Of things in the latter category, some of these are the same
85 + * across all architectures (e.g. prcred_t, prpriv_t) and some vary and are
86 + * opaque (e.g. the prxregset_t).
87 + */
82 88 typedef union {
83 89 long sig; /* PCKILL, PCUNKILL */
84 90 long nice; /* PCNICE */
85 91 long timeo; /* PCTWSTOP */
86 92 ulong_t flags; /* PCRUN, PCSET, PCUNSET */
87 93 caddr_t vaddr; /* PCSVADDR */
88 94 siginfo_t siginfo; /* PCSSIG */
89 95 sigset_t sigset; /* PCSTRACE, PCSHOLD */
90 96 fltset_t fltset; /* PCSFAULT */
91 97 sysset_t sysset; /* PCSENTRY, PCSEXIT */
92 98 prgregset_t prgregset; /* PCSREG, PCAGENT */
93 99 prfpregset_t prfpregset; /* PCSFPREG */
94 -#if defined(__sparc)
95 - prxregset_t prxregset; /* PCSXREG */
96 - asrset_t asrset; /* PCSASRS */
97 -#endif
98 100 prwatch_t prwatch; /* PCWATCH */
99 101 priovec_t priovec; /* PCREAD, PCWRITE */
100 102 prcred_t prcred; /* PCSCRED */
101 103 prpriv_t prpriv; /* PCSPRIV */
102 104 long przoneid; /* PCSZONE */
103 105 } arg_t;
104 106
105 -static int pr_control(long, arg_t *, prnode_t *, cred_t *);
107 +static boolean_t
108 +prwritectl_pcscredx_sizef(const void *datap, size_t *sizep)
109 +{
110 + const prcred_t *cred = datap;
106 111
107 -static size_t
108 -ctlsize(long cmd, size_t resid, arg_t *argp)
112 + if (cred->pr_ngroups < 0 || cred->pr_ngroups > ngroups_max) {
113 + return (B_FALSE);
114 + }
115 +
116 + if (cred->pr_ngroups == 0) {
117 + *sizep = 0;
118 + } else {
119 + *sizep = (cred->pr_ngroups - 1) * sizeof (gid_t);
120 + }
121 + return (B_TRUE);
122 +}
123 +
124 +static boolean_t
125 +prwritectl_pcspriv_sizef(const void *datap, size_t *sizep)
109 126 {
110 - size_t size = sizeof (long);
111 - size_t rnd;
112 - int ngrp;
127 + const prpriv_t *priv = datap;
128 + *sizep = priv_prgetprivsize(priv) - sizeof (prpriv_t);
129 + return (B_TRUE);
130 +}
113 131
114 - switch (cmd) {
115 - case PCNULL:
116 - case PCSTOP:
117 - case PCDSTOP:
118 - case PCWSTOP:
119 - case PCCSIG:
120 - case PCCFAULT:
121 - break;
122 - case PCSSIG:
123 - size += sizeof (siginfo_t);
124 - break;
125 - case PCTWSTOP:
126 - size += sizeof (long);
127 - break;
128 - case PCKILL:
129 - case PCUNKILL:
130 - case PCNICE:
131 - size += sizeof (long);
132 - break;
133 - case PCRUN:
134 - case PCSET:
135 - case PCUNSET:
136 - size += sizeof (ulong_t);
137 - break;
138 - case PCSVADDR:
139 - size += sizeof (caddr_t);
140 - break;
141 - case PCSTRACE:
142 - case PCSHOLD:
143 - size += sizeof (sigset_t);
144 - break;
145 - case PCSFAULT:
146 - size += sizeof (fltset_t);
147 - break;
148 - case PCSENTRY:
149 - case PCSEXIT:
150 - size += sizeof (sysset_t);
151 - break;
152 - case PCSREG:
153 - case PCAGENT:
154 - size += sizeof (prgregset_t);
155 - break;
156 - case PCSFPREG:
157 - size += sizeof (prfpregset_t);
158 - break;
159 -#if defined(__sparc)
160 - case PCSXREG:
161 - size += sizeof (prxregset_t);
162 - break;
163 - case PCSASRS:
164 - size += sizeof (asrset_t);
165 - break;
132 +/*
133 + * This structure represents a single /proc write command that we support and
134 + * metadata about how to ensure we have sufficient data for it. To determine the
135 + * data that we need to read, this combines information from three different
136 + * sources for a given named command in 'pcs_cmd'. The main goal is to first
137 + * make sure we have the right minimum amount of information so we can read and
138 + * validate the data around variable length structures.
139 + *
140 + * o Most commands have a fixed static size. This is represented in the
141 + * pcs_size member. This also is used to represent the base structure size
142 + * in the case of entries like PCSCREDX.
143 + *
144 + * o Other commands have an unknown minimum size to determine how much data
145 + * there is and they use the pcs_minf() function to determine the right
146 + * value. This is often unknown at compile time because it is say a
147 + * machdep or ISA based feature (ala PCSXREGS) and we'd rather not #ifdef
148 + * this code to death. This may be skipped and is for most things. The value
149 + * it returns is added to the static value.
150 + *
151 + * o The final piece is the pcs_sizef() function pointer which determines the
152 + * total required size for this. It is given a pointer that has at least
153 + * pcs_size and pcs_minf() bytes. This is used to determine the total
154 + * expected size of the structure. Callers must not dereference data beyond
155 + * what they've indicated previously. This should only return exra bytes
156 + * that are required beyond what was already indicated between the two
157 + * functions.
158 + *
159 + * In all cases, the core prwritectl() logic will determine if there is
160 + * sufficient step along the way for each of these to proceed.
161 + */
162 +typedef struct proc_control_info {
163 + long pcs_cmd;
164 + size_t pcs_size;
165 + boolean_t (*pcs_minf)(size_t *);
166 + boolean_t (*pcs_sizef)(const void *, size_t *);
167 +} proc_control_info_t;
168 +
169 +static const proc_control_info_t proc_ctl_info[] = {
170 + { PCNULL, 0, NULL, NULL },
171 + { PCSTOP, 0, NULL, NULL },
172 + { PCDSTOP, 0, NULL, NULL },
173 + { PCWSTOP, 0, NULL, NULL },
174 + { PCCSIG, 0, NULL, NULL },
175 + { PCCFAULT, 0, NULL, NULL },
176 + { PCSSIG, sizeof (siginfo_t), NULL, NULL },
177 + { PCTWSTOP, sizeof (long), NULL, NULL },
178 + { PCKILL, sizeof (long), NULL, NULL },
179 + { PCUNKILL, sizeof (long), NULL, NULL },
180 + { PCNICE, sizeof (long), NULL, NULL },
181 + { PCRUN, sizeof (ulong_t), NULL, NULL },
182 + { PCSET, sizeof (ulong_t), NULL, NULL },
183 + { PCUNSET, sizeof (ulong_t), NULL, NULL },
184 + { PCSTRACE, sizeof (sigset_t), NULL, NULL },
185 + { PCSHOLD, sizeof (sigset_t), NULL, NULL },
186 + { PCSFAULT, sizeof (fltset_t), NULL, NULL },
187 + { PCSENTRY, sizeof (sysset_t), NULL, NULL },
188 + { PCSEXIT, sizeof (sysset_t), NULL, NULL },
189 + { PCSREG, sizeof (prgregset_t), NULL, NULL },
190 + { PCAGENT, sizeof (prgregset_t), NULL, NULL },
191 + { PCSFPREG, sizeof (prfpregset_t), NULL, NULL },
192 + { PCSXREG, 0, prwriteminxreg, prwritesizexreg },
193 + { PCWATCH, sizeof (prwatch_t), NULL },
194 + { PCREAD, sizeof (priovec_t), NULL, NULL },
195 + { PCWRITE, sizeof (priovec_t), NULL, NULL },
196 + { PCSCRED, sizeof (prcred_t), NULL, NULL },
197 + { PCSCREDX, sizeof (prcred_t), NULL, prwritectl_pcscredx_sizef },
198 + { PCSPRIV, sizeof (prpriv_t), NULL, prwritectl_pcspriv_sizef },
199 + { PCSZONE, sizeof (long), NULL },
200 +};
201 +
202 +/*
203 + * We need a default buffer that we're going to allocate when we need memory to
204 + * read control operations. This is on average large enough to hold multiple
205 + * control operations. We leave this as a smaller value on debug builds just
206 + * to exercise our reallocation logic.
207 + */
208 +#ifdef DEBUG
209 +#define PROC_CTL_DEFSIZE 32
210 +#else
211 +#define PROC_CTL_DEFSIZE 1024
166 212 #endif
167 - case PCWATCH:
168 - size += sizeof (prwatch_t);
169 - break;
170 - case PCREAD:
171 - case PCWRITE:
172 - size += sizeof (priovec_t);
173 - break;
174 - case PCSCRED:
175 - size += sizeof (prcred_t);
176 - break;
177 - case PCSCREDX:
178 - /*
179 - * We cannot derefence the pr_ngroups fields if it
180 - * we don't have enough data.
181 - */
182 - if (resid < size + sizeof (prcred_t) - sizeof (gid_t))
183 - return (0);
184 - ngrp = argp->prcred.pr_ngroups;
185 - if (ngrp < 0 || ngrp > ngroups_max)
186 - return (0);
187 213
188 - /* The result can be smaller than sizeof (prcred_t) */
189 - size += sizeof (prcred_t) - sizeof (gid_t);
190 - size += ngrp * sizeof (gid_t);
191 - break;
192 - case PCSPRIV:
193 - if (resid >= size + sizeof (prpriv_t))
194 - size += priv_prgetprivsize(&argp->prpriv);
195 - else
196 - return (0);
197 - break;
198 - case PCSZONE:
199 - size += sizeof (long);
200 - break;
201 - default:
214 +/*
215 + * This structure is used to track all of the information that we have around a
216 + * prwritectl call. This is used to reduce function parameters and make state
217 + * clear.
218 + */
219 +typedef struct {
220 + void *prwc_buf;
221 + size_t prwc_buflen;
222 + size_t prwc_curvalid;
223 + uio_t *prwc_uiop;
224 + prnode_t *prwc_pnp;
225 + boolean_t prwc_locked;
226 + boolean_t prwc_need32;
227 + void *prwc_buf32;
228 +} prwritectl_t;
229 +
230 +/*
231 + * Attempt to read in at least needed data. If we need to read in data, then we
232 + * will try to fill in as much data as required.
233 + */
234 +static int
235 +prwritectl_readin(prwritectl_t *prwc, size_t needed)
236 +{
237 + int ret;
238 + size_t toread;
239 + void *start;
240 +
241 + /*
242 + * If we have as much data as we need then we're good to go.
243 + */
244 + if (prwc->prwc_curvalid > needed) {
245 + ASSERT3U(prwc->prwc_buflen, >=, prwc->prwc_curvalid);
246 + ASSERT3U(prwc->prwc_buflen, >=, needed);
202 247 return (0);
203 248 }
204 249
205 - /* Round up to a multiple of long, unless exact amount written */
206 - if (size < resid) {
207 - rnd = size & (sizeof (long) - 1);
250 + /*
251 + * We don't have all of our data. We must make sure of several things:
252 + *
253 + * 1. That there actually is enough data in the uio_t for what we
254 + * need, considering what we've already read.
255 + * 2. If the process is locked, at this point, we want to unlock it
256 + * before we deal with any I/O or memory allocation. Otherwise we
257 + * can wreak havoc with p_lock / paging.
258 + * 3. We need to make sure that our buffer is large enough to actually
259 + * fit it all.
260 + * 4. Only at that point can we actually perform the read.
261 + */
262 + if (needed - prwc->prwc_curvalid > prwc->prwc_uiop->uio_resid) {
263 + return (EINVAL);
264 + }
208 265
209 - if (rnd != 0)
210 - size += sizeof (long) - rnd;
266 + if (prwc->prwc_locked) {
267 + prunlock(prwc->prwc_pnp);
268 + prwc->prwc_locked = B_FALSE;
211 269 }
212 270
213 - if (size > resid)
214 - return (0);
215 - return (size);
271 + if (needed > prwc->prwc_buflen) {
272 + size_t new_len = P2ROUNDUP(needed, PROC_CTL_DEFSIZE);
273 + prwc->prwc_buf = kmem_rezalloc(prwc->prwc_buf,
274 + prwc->prwc_buflen, new_len, KM_SLEEP);
275 + if (prwc->prwc_need32) {
276 + prwc->prwc_buf32 = kmem_rezalloc(prwc->prwc_buf32,
277 + prwc->prwc_buflen, new_len, KM_SLEEP);
278 + }
279 + prwc->prwc_buflen = new_len;
280 + }
281 +
282 + toread = MIN(prwc->prwc_buflen - prwc->prwc_curvalid,
283 + prwc->prwc_uiop->uio_resid);
284 + ASSERT3U(toread, >=, needed - prwc->prwc_curvalid);
285 + start = (void *)((uintptr_t)prwc->prwc_buf + prwc->prwc_curvalid);
286 + if ((ret = uiomove(start, toread, UIO_WRITE, prwc->prwc_uiop)) != 0) {
287 + return (ret);
288 + }
289 +
290 + prwc->prwc_curvalid += toread;
291 + return (0);
216 292 }
217 293
294 +static const proc_control_info_t *
295 +prwritectl_cmd_identify(const prwritectl_t *prwc,
296 + const proc_control_info_t *info, size_t ninfo, size_t cmdsize)
297 +{
298 + long cmd;
299 +
300 + ASSERT(cmdsize == sizeof (int32_t) || cmdsize == sizeof (long));
301 + if (cmdsize == 4) {
302 + cmd = (long)*(int32_t *)prwc->prwc_buf;
303 + } else {
304 + cmd = *(long *)prwc->prwc_buf;
305 + }
306 +
307 +
308 + for (size_t i = 0; i < ninfo; i++) {
309 + if (info[i].pcs_cmd == cmd) {
310 + return (&info[i]);
311 + }
312 + }
313 +
314 + return (NULL);
315 +}
316 +
218 317 /*
219 318 * Control operations (lots).
319 + *
320 + * Users can submit one or more commands to us in the uio_t. They are required
321 + * to always be complete messages. The first one that fails will cause all
322 + * subsequent things to fail. Processing this can be a little tricky as the
323 + * actual data size that may be required is variable, not all structures are
324 + * fixed sizes and some vary based on the instructing set (e.g. x86 vs.
325 + * something else).
326 + *
327 + * The way that we handle process locking deserves some consideration. Prior to
328 + * the colonization of prwritectl and the support for dynamic sizing of data,
329 + * the logic would try to read in a large chunk of data and keep a process
330 + * locked throughout that period and then unlock it before reading more data. As
331 + * such, we mimic that logically and basically lock it before executing the
332 + * first (or any subsequent) command and then only unlock it either when we're
333 + * done entirely or we need to allocate memory or read from the process.
334 + *
335 + * This function is a common implementation for both the ILP32 and LP64 entry
336 + * points as they are mostly the same except for the sizing and control function
337 + * we call.
220 338 */
221 339 int
222 -prwritectl(vnode_t *vp, uio_t *uiop, cred_t *cr)
340 +prwritectl_common(vnode_t *vp, uio_t *uiop, cred_t *cr,
341 + const proc_control_info_t *proc_info, size_t ninfo, size_t cmdsize,
342 + int (*pr_controlf)(long, void *, prnode_t *, cred_t *))
223 343 {
224 -#define MY_BUFFER_SIZE \
225 - 100 > 1 + sizeof (arg_t) / sizeof (long) ? \
226 - 100 : 1 + sizeof (arg_t) / sizeof (long)
227 - long buf[MY_BUFFER_SIZE];
228 - long *bufp;
229 - size_t resid = 0;
230 - size_t size;
231 - prnode_t *pnp = VTOP(vp);
232 - int error;
233 - int locked = 0;
344 + int ret;
345 + prwritectl_t prwc;
234 346
235 - while (uiop->uio_resid) {
347 + VERIFY(cmdsize == sizeof (int32_t) || cmdsize == sizeof (long));
348 +
349 + bzero(&prwc, sizeof (prwc));
350 + prwc.prwc_pnp = VTOP(vp);
351 + prwc.prwc_uiop = uiop;
352 + prwc.prwc_need32 = cmdsize == sizeof (int32_t);
353 +
354 + /*
355 + * We may have multiple commands to read and want to try to minimize the
356 + * amount of reading that we do. Our callers expect us to have a
357 + * contiguous buffer for a command's actual implementation. However, we
358 + * must have at least a single long worth of data, otherwise it's not
359 + * worth continuing.
360 + */
361 + while (uiop->uio_resid > 0 || prwc.prwc_curvalid > 0) {
362 + const proc_control_info_t *proc_cmd;
363 + void *data;
364 +
236 365 /*
237 - * Read several commands in one gulp.
366 + * Check if we have enough data to identify a command. If not,
367 + * we read as much as we can in one gulp.
238 368 */
239 - bufp = buf;
240 - if (resid) { /* move incomplete command to front of buffer */
241 - long *tail;
369 + if ((ret = prwritectl_readin(&prwc, cmdsize)) != 0) {
370 + goto out;
371 + }
242 372
243 - if (resid >= sizeof (buf))
244 - break;
245 - tail = (long *)((char *)buf + sizeof (buf) - resid);
246 - do {
247 - *bufp++ = *tail++;
248 - } while ((resid -= sizeof (long)) != 0);
373 + /*
374 + * Identify the command and figure out how how much data we
375 + * should have read in the kernel. Some commands have a variable
376 + * length and we need to make sure the minimum is met before
377 + * asking how much there is in general. Most things know what
378 + * the minimum length is and this pcs_minf() is not implemented.
379 + * However things that are ISA-specific require us to ask that
380 + * first.
381 + *
382 + * We also must be aware that there may not actually be enough
383 + * data present in the uio_t.
384 + */
385 + if ((proc_cmd = prwritectl_cmd_identify(&prwc, proc_info,
386 + ninfo, cmdsize)) == NULL) {
387 + ret = EINVAL;
388 + goto out;
249 389 }
250 - resid = sizeof (buf) - ((char *)bufp - (char *)buf);
251 - if (resid > uiop->uio_resid)
252 - resid = uiop->uio_resid;
253 - if (error = uiomove((caddr_t)bufp, resid, UIO_WRITE, uiop))
254 - return (error);
255 - resid += (char *)bufp - (char *)buf;
256 - bufp = buf;
257 390
258 - do { /* loop over commands in buffer */
259 - long cmd = bufp[0];
260 - arg_t *argp = (arg_t *)&bufp[1];
391 + size_t needed_data = cmdsize + proc_cmd->pcs_size;
392 + if (proc_cmd->pcs_minf != NULL) {
393 + size_t min;
261 394
262 - size = ctlsize(cmd, resid, argp);
263 - if (size == 0) /* incomplete or invalid command */
264 - break;
395 + if (!proc_cmd->pcs_minf(&min)) {
396 + ret = EINVAL;
397 + goto out;
398 + }
399 +
400 + needed_data += min;
401 + }
402 +
403 + if (proc_cmd->pcs_sizef != NULL) {
404 + size_t extra;
405 +
265 406 /*
266 - * Perform the specified control operation.
407 + * Make sure we have the minimum amount of data that
408 + * they asked us to between the static and minf
409 + * function.
267 410 */
268 - if (!locked) {
269 - if ((error = prlock(pnp, ZNO)) != 0)
270 - return (error);
271 - locked = 1;
411 + if ((ret = prwritectl_readin(&prwc, needed_data)) !=
412 + 0) {
413 + goto out;
272 414 }
273 - if (error = pr_control(cmd, argp, pnp, cr)) {
274 - if (error == -1) /* -1 is timeout */
275 - locked = 0;
276 - else
277 - return (error);
415 +
416 + VERIFY3U(prwc.prwc_curvalid, >, cmdsize);
417 + data = (void *)((uintptr_t)prwc.prwc_buf + cmdsize);
418 + if (!proc_cmd->pcs_sizef(data, &extra)) {
419 + ret = EINVAL;
420 + goto out;
278 421 }
279 - bufp = (long *)((char *)bufp + size);
280 - } while ((resid -= size) != 0);
281 422
282 - if (locked) {
283 - prunlock(pnp);
284 - locked = 0;
423 + needed_data += extra;
285 424 }
425 +
426 + /*
427 + * Now that we know how much data we're supposed to have,
428 + * finally ensure we have the total amount we need.
429 + */
430 + if ((ret = prwritectl_readin(&prwc, needed_data)) != 0) {
431 + goto out;
432 + }
433 +
434 + /*
435 + * /proc has traditionally assumed control writes come in
436 + * multiples of a long. This is 4 bytes for ILP32 and 8 bytes
437 + * for LP64. When calculating the required size for a structure,
438 + * it would always round that up to the next long. However, the
439 + * exact combination of circumstances changes with the
440 + * introduction of the 64-bit kernel. For 64-bit processes we
441 + * round up when the current command we're processing isn't the
442 + * last one.
443 + *
444 + * Because of our tracking structures and caching we need to
445 + * look beyond the uio_t to make this determination. In
446 + * particular, the uio_t can have a zero resid, but we may still
447 + * have additional data to read as indicated by prwc_curvalid
448 + * exceeded the current command size. In the end, we must check
449 + * both of these cases.
450 + */
451 + if ((needed_data % cmdsize) != 0) {
452 + if (cmdsize == sizeof (int32_t) ||
453 + prwc.prwc_curvalid > needed_data ||
454 + prwc.prwc_uiop->uio_resid > 0) {
455 + needed_data = P2ROUNDUP(needed_data,
456 + cmdsize);
457 + if ((ret = prwritectl_readin(&prwc,
458 + needed_data)) != 0) {
459 + goto out;
460 + }
461 + }
462 + }
463 +
464 + if (!prwc.prwc_locked) {
465 + ret = prlock(prwc.prwc_pnp, ZNO);
466 + if (ret != 0) {
467 + goto out;
468 + }
469 + prwc.prwc_locked = B_TRUE;
470 + }
471 +
472 + /*
473 + * Run our actual command. When there is an error, then the
474 + * underlying pr_control call will have unlocked the prnode_t
475 + * on our behalf. pr_control can return -1, which is a special
476 + * error indicating a timeout occurred. In such a case the node
477 + * is unlocked; however, that we are supposed to continue
478 + * processing commands regardless.
479 + *
480 + * Finally, we must deal with with one actual wrinkle. The LP64
481 + * based logic always guarantees that we have data that is
482 + * 8-byte aligned. However, the ILP32 logic is 4-byte aligned
483 + * and the rest of the /proc code assumes it can always
484 + * dereference it. If we're not aligned, we have to bcopy it to
485 + * a temporary buffer.
486 + */
487 + data = (void *)((uintptr_t)prwc.prwc_buf + cmdsize);
488 +#ifdef DEBUG
489 + if (cmdsize == sizeof (long)) {
490 + VERIFY0((uintptr_t)data % alignof (long));
491 + }
492 +#endif
493 + if (prwc.prwc_need32 && ((uintptr_t)data % alignof (long)) !=
494 + 0 && needed_data > cmdsize) {
495 + bcopy(data, prwc.prwc_buf32, needed_data - cmdsize);
496 + data = prwc.prwc_buf32;
497 + }
498 + ret = pr_controlf(proc_cmd->pcs_cmd, data, prwc.prwc_pnp, cr);
499 + if (ret != 0) {
500 + prwc.prwc_locked = B_FALSE;
501 + if (ret > 0) {
502 + goto out;
503 + }
504 + }
505 +
506 + /*
507 + * Finally, now that we have processed this command, we need to
508 + * move on. To make our life simple, we basically shift all the
509 + * data in our buffer over to indicate it's been consumed. While
510 + * a little wasteful, this simplifies buffer management and
511 + * guarantees that command processing uses a semi-sanitized
512 + * state. Visually, this is the following transformation:
513 + *
514 + * 0 20 prwc.prwc_curvalid
515 + * +------------------+----------------+
516 + * | needed_data | remaining_data |
517 + * +------------------+----------------+
518 + *
519 + * In the above example we are shifting all the data over by 20,
520 + * so remaining data starts at 0. This leaves us needed_data
521 + * bytes to clean up from what was valid.
522 + */
523 + if (prwc.prwc_buf32 != NULL) {
524 + bzero(prwc.prwc_buf32, needed_data - cmdsize);
525 + }
526 +
527 + if (prwc.prwc_curvalid > needed_data) {
528 + size_t save_size = prwc.prwc_curvalid - needed_data;
529 + void *first_save = (void *)((uintptr_t)prwc.prwc_buf +
530 + needed_data);
531 + memmove(prwc.prwc_buf, first_save, save_size);
532 + void *first_zero = (void *)((uintptr_t)prwc.prwc_buf +
533 + save_size);
534 + bzero(first_zero, needed_data);
535 + } else {
536 + bzero(prwc.prwc_buf, prwc.prwc_curvalid);
537 + }
538 + prwc.prwc_curvalid -= needed_data;
286 539 }
287 - return (resid? EINVAL : 0);
540 +
541 + /*
542 + * We've managed to successfully process everything. We can actually say
543 + * this was successful now.
544 + */
545 + ret = 0;
546 +
547 +out:
548 + if (prwc.prwc_locked) {
549 + prunlock(prwc.prwc_pnp);
550 + prwc.prwc_locked = B_FALSE;
551 + }
552 +
553 + if (prwc.prwc_buf != NULL) {
554 + kmem_free(prwc.prwc_buf, prwc.prwc_buflen);
555 + }
556 +
557 + if (prwc.prwc_buf32 != NULL) {
558 + VERIFY(prwc.prwc_need32);
559 + kmem_free(prwc.prwc_buf32, prwc.prwc_buflen);
560 + }
561 +
562 + return (ret);
288 563 }
289 564
290 565 static int
291 -pr_control(long cmd, arg_t *argp, prnode_t *pnp, cred_t *cr)
566 +pr_control(long cmd, void *generic, prnode_t *pnp, cred_t *cr)
292 567 {
293 568 prcommon_t *pcp;
294 569 proc_t *p;
295 570 int unlocked;
296 571 int error = 0;
572 + arg_t *argp = generic;
297 573
298 574 if (cmd == PCNULL)
299 575 return (0);
300 576
301 577 pcp = pnp->pr_common;
302 578 p = pcp->prc_proc;
303 579 ASSERT(p != NULL);
304 580
305 581 /* System processes defy control. */
306 582 if (p->p_flag & SSYS) {
307 583 prunlock(pnp);
308 584 return (EBUSY);
309 585 }
310 586
311 587 switch (cmd) {
312 588
313 589 default:
314 590 error = EINVAL;
315 591 break;
316 592
317 593 case PCSTOP: /* direct process or lwp to stop and wait for stop */
318 594 case PCDSTOP: /* direct process or lwp to stop, don't wait */
319 595 case PCWSTOP: /* wait for process or lwp to stop */
320 596 case PCTWSTOP: /* wait for process or lwp to stop, with timeout */
321 597 {
322 598 time_t timeo;
323 599
324 600 /*
325 601 * Can't apply to a system process.
326 602 */
327 603 if (p->p_as == &kas) {
328 604 error = EBUSY;
329 605 break;
330 606 }
331 607
332 608 if (cmd == PCSTOP || cmd == PCDSTOP)
333 609 pr_stop(pnp);
334 610
335 611 if (cmd == PCDSTOP)
336 612 break;
337 613
338 614 /*
339 615 * If an lwp is waiting for itself or its process,
340 616 * don't wait. The stopped lwp would never see the
341 617 * fact that it is stopped.
342 618 */
343 619 if ((pcp->prc_flags & PRC_LWP)?
344 620 (pcp->prc_thread == curthread) : (p == curproc)) {
345 621 if (cmd == PCWSTOP || cmd == PCTWSTOP)
346 622 error = EBUSY;
347 623 break;
348 624 }
349 625
350 626 timeo = (cmd == PCTWSTOP)? (time_t)argp->timeo : 0;
351 627 if ((error = pr_wait_stop(pnp, timeo)) != 0)
352 628 return (error);
353 629
354 630 break;
355 631 }
356 632
357 633 case PCRUN: /* make lwp or process runnable */
358 634 error = pr_setrun(pnp, argp->flags);
359 635 break;
360 636
361 637 case PCSTRACE: /* set signal trace mask */
362 638 pr_settrace(p, &argp->sigset);
363 639 break;
364 640
365 641 case PCSSIG: /* set current signal */
366 642 error = pr_setsig(pnp, &argp->siginfo);
367 643 if (argp->siginfo.si_signo == SIGKILL && error == 0) {
368 644 prunlock(pnp);
369 645 pr_wait_die(pnp);
370 646 return (-1);
371 647 }
372 648 break;
373 649
374 650 case PCKILL: /* send signal */
375 651 error = pr_kill(pnp, (int)argp->sig, cr);
376 652 if (error == 0 && argp->sig == SIGKILL) {
377 653 prunlock(pnp);
378 654 pr_wait_die(pnp);
379 655 return (-1);
380 656 }
381 657 break;
382 658
383 659 case PCUNKILL: /* delete a pending signal */
384 660 error = pr_unkill(pnp, (int)argp->sig);
385 661 break;
386 662
387 663 case PCNICE: /* set nice priority */
388 664 error = pr_nice(p, (int)argp->nice, cr);
389 665 break;
390 666
391 667 case PCSENTRY: /* set syscall entry bit mask */
392 668 case PCSEXIT: /* set syscall exit bit mask */
393 669 pr_setentryexit(p, &argp->sysset, cmd == PCSENTRY);
394 670 break;
395 671
396 672 case PCSET: /* set process flags */
397 673 error = pr_set(p, argp->flags);
398 674 break;
399 675
400 676 case PCUNSET: /* unset process flags */
401 677 error = pr_unset(p, argp->flags);
402 678 break;
403 679
404 680 case PCSREG: /* set general registers */
405 681 {
406 682 kthread_t *t = pr_thread(pnp);
407 683
408 684 if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
409 685 thread_unlock(t);
410 686 error = EBUSY;
411 687 } else {
412 688 thread_unlock(t);
413 689 mutex_exit(&p->p_lock);
414 690 prsetprregs(ttolwp(t), argp->prgregset, 0);
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
415 691 mutex_enter(&p->p_lock);
416 692 }
417 693 break;
418 694 }
419 695
420 696 case PCSFPREG: /* set floating-point registers */
421 697 error = pr_setfpregs(pnp, &argp->prfpregset);
422 698 break;
423 699
424 700 case PCSXREG: /* set extra registers */
425 -#if defined(__sparc)
426 - error = pr_setxregs(pnp, &argp->prxregset);
427 -#else
428 - error = EINVAL;
429 -#endif
701 + error = pr_setxregs(pnp, (prxregset_t *)argp);
430 702 break;
431 703
432 -#if defined(__sparc)
433 - case PCSASRS: /* set ancillary state registers */
434 - error = pr_setasrs(pnp, argp->asrset);
435 - break;
436 -#endif
437 -
438 704 case PCSVADDR: /* set virtual address at which to resume */
439 705 error = pr_setvaddr(pnp, argp->vaddr);
440 706 break;
441 707
442 708 case PCSHOLD: /* set signal-hold mask */
443 709 pr_sethold(pnp, &argp->sigset);
444 710 break;
445 711
446 712 case PCSFAULT: /* set mask of traced faults */
447 713 pr_setfault(p, &argp->fltset);
448 714 break;
449 715
450 716 case PCCSIG: /* clear current signal */
451 717 error = pr_clearsig(pnp);
452 718 break;
453 719
454 720 case PCCFAULT: /* clear current fault */
455 721 error = pr_clearflt(pnp);
456 722 break;
457 723
458 724 case PCWATCH: /* set or clear watched areas */
459 725 error = pr_watch(pnp, &argp->prwatch, &unlocked);
460 726 if (error && unlocked)
461 727 return (error);
462 728 break;
463 729
464 730 case PCAGENT: /* create the /proc agent lwp in the target process */
465 731 error = pr_agent(pnp, argp->prgregset, &unlocked);
466 732 if (error && unlocked)
467 733 return (error);
468 734 break;
469 735
470 736 case PCREAD: /* read from the address space */
471 737 error = pr_rdwr(p, UIO_READ, &argp->priovec);
472 738 break;
473 739
474 740 case PCWRITE: /* write to the address space */
475 741 error = pr_rdwr(p, UIO_WRITE, &argp->priovec);
476 742 break;
477 743
478 744 case PCSCRED: /* set the process credentials */
479 745 case PCSCREDX:
480 746 error = pr_scred(p, &argp->prcred, cr, cmd == PCSCREDX);
481 747 break;
482 748
483 749 case PCSPRIV: /* set the process privileges */
484 750 error = pr_spriv(p, &argp->prpriv, cr);
485 751 break;
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
486 752 case PCSZONE: /* set the process's zoneid credentials */
487 753 error = pr_szoneid(p, (zoneid_t)argp->przoneid, cr);
488 754 break;
489 755 }
490 756
491 757 if (error)
492 758 prunlock(pnp);
493 759 return (error);
494 760 }
495 761
762 +int
763 +prwritectl(vnode_t *vp, uio_t *uiop, cred_t *cr)
764 +{
765 + return (prwritectl_common(vp, uiop, cr, proc_ctl_info,
766 + ARRAY_SIZE(proc_ctl_info), sizeof (long), pr_control));
767 +}
768 +
496 769 #ifdef _SYSCALL32_IMPL
497 770
498 771 typedef union {
499 772 int32_t sig; /* PCKILL, PCUNKILL */
500 773 int32_t nice; /* PCNICE */
501 774 int32_t timeo; /* PCTWSTOP */
502 775 uint32_t flags; /* PCRUN, PCSET, PCUNSET */
503 776 caddr32_t vaddr; /* PCSVADDR */
504 777 siginfo32_t siginfo; /* PCSSIG */
505 778 sigset_t sigset; /* PCSTRACE, PCSHOLD */
506 779 fltset_t fltset; /* PCSFAULT */
507 780 sysset_t sysset; /* PCSENTRY, PCSEXIT */
508 781 prgregset32_t prgregset; /* PCSREG, PCAGENT */
509 782 prfpregset32_t prfpregset; /* PCSFPREG */
510 -#if defined(__sparc)
511 - prxregset_t prxregset; /* PCSXREG */
512 -#endif
513 783 prwatch32_t prwatch; /* PCWATCH */
514 784 priovec32_t priovec; /* PCREAD, PCWRITE */
515 785 prcred32_t prcred; /* PCSCRED */
516 786 prpriv_t prpriv; /* PCSPRIV */
517 787 int32_t przoneid; /* PCSZONE */
518 788 } arg32_t;
519 789
520 -static int pr_control32(int32_t, arg32_t *, prnode_t *, cred_t *);
521 790 static int pr_setfpregs32(prnode_t *, prfpregset32_t *);
522 791
523 -/*
524 - * Note that while ctlsize32() can use argp, it must do so only in a way
525 - * that assumes 32-bit rather than 64-bit alignment as argp is a pointer
526 - * to an array of 32-bit values and only 32-bit alignment is ensured.
527 - */
528 -static size_t
529 -ctlsize32(int32_t cmd, size_t resid, arg32_t *argp)
792 +static boolean_t
793 +prwritectl_pcscredx32_sizef(const void *datap, size_t *sizep)
530 794 {
531 - size_t size = sizeof (int32_t);
532 - size_t rnd;
533 - int ngrp;
795 + const prcred32_t *cred = datap;
534 796
535 - switch (cmd) {
536 - case PCNULL:
537 - case PCSTOP:
538 - case PCDSTOP:
539 - case PCWSTOP:
540 - case PCCSIG:
541 - case PCCFAULT:
542 - break;
543 - case PCSSIG:
544 - size += sizeof (siginfo32_t);
545 - break;
546 - case PCTWSTOP:
547 - size += sizeof (int32_t);
548 - break;
549 - case PCKILL:
550 - case PCUNKILL:
551 - case PCNICE:
552 - size += sizeof (int32_t);
553 - break;
554 - case PCRUN:
555 - case PCSET:
556 - case PCUNSET:
557 - size += sizeof (uint32_t);
558 - break;
559 - case PCSVADDR:
560 - size += sizeof (caddr32_t);
561 - break;
562 - case PCSTRACE:
563 - case PCSHOLD:
564 - size += sizeof (sigset_t);
565 - break;
566 - case PCSFAULT:
567 - size += sizeof (fltset_t);
568 - break;
569 - case PCSENTRY:
570 - case PCSEXIT:
571 - size += sizeof (sysset_t);
572 - break;
573 - case PCSREG:
574 - case PCAGENT:
575 - size += sizeof (prgregset32_t);
576 - break;
577 - case PCSFPREG:
578 - size += sizeof (prfpregset32_t);
579 - break;
580 -#if defined(__sparc)
581 - case PCSXREG:
582 - size += sizeof (prxregset_t);
583 - break;
584 -#endif
585 - case PCWATCH:
586 - size += sizeof (prwatch32_t);
587 - break;
588 - case PCREAD:
589 - case PCWRITE:
590 - size += sizeof (priovec32_t);
591 - break;
592 - case PCSCRED:
593 - size += sizeof (prcred32_t);
594 - break;
595 - case PCSCREDX:
596 - /*
597 - * We cannot derefence the pr_ngroups fields if it
598 - * we don't have enough data.
599 - */
600 - if (resid < size + sizeof (prcred32_t) - sizeof (gid32_t))
601 - return (0);
602 - ngrp = argp->prcred.pr_ngroups;
603 - if (ngrp < 0 || ngrp > ngroups_max)
604 - return (0);
605 -
606 - /* The result can be smaller than sizeof (prcred32_t) */
607 - size += sizeof (prcred32_t) - sizeof (gid32_t);
608 - size += ngrp * sizeof (gid32_t);
609 - break;
610 - case PCSPRIV:
611 - if (resid >= size + sizeof (prpriv_t))
612 - size += priv_prgetprivsize(&argp->prpriv);
613 - else
614 - return (0);
615 - break;
616 - case PCSZONE:
617 - size += sizeof (int32_t);
618 - break;
619 - default:
620 - return (0);
797 + if (cred->pr_ngroups < 0 || cred->pr_ngroups > ngroups_max) {
798 + return (B_FALSE);
621 799 }
622 800
623 - /* Round up to a multiple of int32_t */
624 - rnd = size & (sizeof (int32_t) - 1);
625 -
626 - if (rnd != 0)
627 - size += sizeof (int32_t) - rnd;
628 -
629 - if (size > resid)
630 - return (0);
631 - return (size);
801 + if (cred->pr_ngroups == 0) {
802 + *sizep = 0;
803 + } else {
804 + *sizep = (cred->pr_ngroups - 1) * sizeof (gid32_t);
805 + }
806 + return (B_TRUE);
632 807 }
633 808
634 809 /*
635 - * Control operations (lots).
810 + * When dealing with ILP32 code, we are not at a point where we can assume
811 + * 64-bit aligned data. Any functions that are operating here must be aware of
812 + * that.
636 813 */
637 -int
638 -prwritectl32(struct vnode *vp, struct uio *uiop, cred_t *cr)
639 -{
640 -#define MY_BUFFER_SIZE32 \
641 - 100 > 1 + sizeof (arg32_t) / sizeof (int32_t) ? \
642 - 100 : 1 + sizeof (arg32_t) / sizeof (int32_t)
643 - int32_t buf[MY_BUFFER_SIZE32];
644 - int32_t *bufp;
645 - arg32_t arg;
646 - size_t resid = 0;
647 - size_t size;
648 - prnode_t *pnp = VTOP(vp);
649 - int error;
650 - int locked = 0;
814 +static const proc_control_info_t proc_ctl_info32[] = {
815 + { PCNULL, 0, NULL, NULL },
816 + { PCSTOP, 0, NULL, NULL },
817 + { PCDSTOP, 0, NULL, NULL },
818 + { PCWSTOP, 0, NULL, NULL },
819 + { PCCSIG, 0, NULL, NULL },
820 + { PCCFAULT, 0, NULL, NULL },
821 + { PCSSIG, sizeof (siginfo32_t), NULL, NULL },
822 + { PCTWSTOP, sizeof (int32_t), NULL, NULL },
823 + { PCKILL, sizeof (int32_t), NULL, NULL },
824 + { PCUNKILL, sizeof (int32_t), NULL, NULL },
825 + { PCNICE, sizeof (int32_t), NULL, NULL },
826 + { PCRUN, sizeof (uint32_t), NULL, NULL },
827 + { PCSET, sizeof (uint32_t), NULL, NULL },
828 + { PCUNSET, sizeof (uint32_t), NULL, NULL },
829 + { PCSVADDR, sizeof (caddr32_t), NULL, NULL },
830 + { PCSTRACE, sizeof (sigset_t), NULL, NULL },
831 + { PCSHOLD, sizeof (sigset_t), NULL, NULL },
832 + { PCSFAULT, sizeof (fltset_t), NULL, NULL },
833 + { PCSENTRY, sizeof (sysset_t), NULL, NULL },
834 + { PCSEXIT, sizeof (sysset_t), NULL, NULL },
835 + { PCSREG, sizeof (prgregset32_t), NULL, NULL },
836 + { PCAGENT, sizeof (prgregset32_t), NULL, NULL },
837 + { PCSFPREG, sizeof (prfpregset32_t), NULL, NULL },
838 + { PCSXREG, 0, prwriteminxreg, prwritesizexreg },
839 + { PCWATCH, sizeof (prwatch32_t), NULL },
840 + { PCREAD, sizeof (priovec32_t), NULL, NULL },
841 + { PCWRITE, sizeof (priovec32_t), NULL, NULL },
842 + { PCSCRED, sizeof (prcred32_t), NULL, NULL },
843 + { PCSCREDX, sizeof (prcred32_t), NULL, prwritectl_pcscredx32_sizef },
844 + { PCSPRIV, sizeof (prpriv_t), NULL, prwritectl_pcspriv_sizef },
845 + { PCSZONE, sizeof (long), NULL },
846 +};
651 847
652 - while (uiop->uio_resid) {
653 - /*
654 - * Read several commands in one gulp.
655 - */
656 - bufp = buf;
657 - if (resid) { /* move incomplete command to front of buffer */
658 - int32_t *tail;
659 -
660 - if (resid >= sizeof (buf))
661 - break;
662 - tail = (int32_t *)((char *)buf + sizeof (buf) - resid);
663 - do {
664 - *bufp++ = *tail++;
665 - } while ((resid -= sizeof (int32_t)) != 0);
666 - }
667 - resid = sizeof (buf) - ((char *)bufp - (char *)buf);
668 - if (resid > uiop->uio_resid)
669 - resid = uiop->uio_resid;
670 - if (error = uiomove((caddr_t)bufp, resid, UIO_WRITE, uiop))
671 - return (error);
672 - resid += (char *)bufp - (char *)buf;
673 - bufp = buf;
674 -
675 - do { /* loop over commands in buffer */
676 - int32_t cmd = bufp[0];
677 - arg32_t *argp = (arg32_t *)&bufp[1];
678 -
679 - size = ctlsize32(cmd, resid, argp);
680 - if (size == 0) /* incomplete or invalid command */
681 - break;
682 - /*
683 - * Perform the specified control operation.
684 - */
685 - if (!locked) {
686 - if ((error = prlock(pnp, ZNO)) != 0)
687 - return (error);
688 - locked = 1;
689 - }
690 -
691 - /*
692 - * Since some members of the arg32_t union contain
693 - * 64-bit values (which must be 64-bit aligned), we
694 - * can't simply pass a pointer to the structure as
695 - * it may be unaligned. Note that we do pass the
696 - * potentially unaligned structure to ctlsize32()
697 - * above, but that uses it a way that makes no
698 - * assumptions about alignment.
699 - */
700 - ASSERT(size - sizeof (cmd) <= sizeof (arg));
701 - bcopy(argp, &arg, size - sizeof (cmd));
702 -
703 - if (error = pr_control32(cmd, &arg, pnp, cr)) {
704 - if (error == -1) /* -1 is timeout */
705 - locked = 0;
706 - else
707 - return (error);
708 - }
709 - bufp = (int32_t *)((char *)bufp + size);
710 - } while ((resid -= size) != 0);
711 -
712 - if (locked) {
713 - prunlock(pnp);
714 - locked = 0;
715 - }
716 - }
717 - return (resid? EINVAL : 0);
718 -}
719 -
720 848 static int
721 -pr_control32(int32_t cmd, arg32_t *argp, prnode_t *pnp, cred_t *cr)
849 +pr_control32(long cmd, void *generic, prnode_t *pnp, cred_t *cr)
722 850 {
723 851 prcommon_t *pcp;
724 852 proc_t *p;
725 853 int unlocked;
726 854 int error = 0;
855 + arg32_t *argp = generic;
727 856
728 857 if (cmd == PCNULL)
729 858 return (0);
730 859
731 860 pcp = pnp->pr_common;
732 861 p = pcp->prc_proc;
733 862 ASSERT(p != NULL);
734 863
735 864 if (p->p_flag & SSYS) {
736 865 prunlock(pnp);
737 866 return (EBUSY);
738 867 }
739 868
740 869 switch (cmd) {
741 870
742 871 default:
743 872 error = EINVAL;
744 873 break;
745 874
746 875 case PCSTOP: /* direct process or lwp to stop and wait for stop */
747 876 case PCDSTOP: /* direct process or lwp to stop, don't wait */
748 877 case PCWSTOP: /* wait for process or lwp to stop */
749 878 case PCTWSTOP: /* wait for process or lwp to stop, with timeout */
750 879 {
751 880 time_t timeo;
752 881
753 882 /*
754 883 * Can't apply to a system process.
755 884 */
756 885 if (p->p_as == &kas) {
757 886 error = EBUSY;
758 887 break;
759 888 }
760 889
761 890 if (cmd == PCSTOP || cmd == PCDSTOP)
762 891 pr_stop(pnp);
763 892
764 893 if (cmd == PCDSTOP)
765 894 break;
766 895
767 896 /*
768 897 * If an lwp is waiting for itself or its process,
769 898 * don't wait. The lwp will never see the fact that
770 899 * itself is stopped.
771 900 */
772 901 if ((pcp->prc_flags & PRC_LWP)?
773 902 (pcp->prc_thread == curthread) : (p == curproc)) {
774 903 if (cmd == PCWSTOP || cmd == PCTWSTOP)
775 904 error = EBUSY;
776 905 break;
777 906 }
778 907
779 908 timeo = (cmd == PCTWSTOP)? (time_t)argp->timeo : 0;
780 909 if ((error = pr_wait_stop(pnp, timeo)) != 0)
781 910 return (error);
782 911
783 912 break;
784 913 }
785 914
786 915 case PCRUN: /* make lwp or process runnable */
787 916 error = pr_setrun(pnp, (ulong_t)argp->flags);
788 917 break;
789 918
790 919 case PCSTRACE: /* set signal trace mask */
791 920 pr_settrace(p, &argp->sigset);
792 921 break;
793 922
794 923 case PCSSIG: /* set current signal */
795 924 if (PROCESS_NOT_32BIT(p))
796 925 error = EOVERFLOW;
797 926 else {
798 927 int sig = (int)argp->siginfo.si_signo;
799 928 siginfo_t siginfo;
800 929
801 930 bzero(&siginfo, sizeof (siginfo));
802 931 siginfo_32tok(&argp->siginfo, (k_siginfo_t *)&siginfo);
803 932 error = pr_setsig(pnp, &siginfo);
804 933 if (sig == SIGKILL && error == 0) {
805 934 prunlock(pnp);
806 935 pr_wait_die(pnp);
807 936 return (-1);
808 937 }
809 938 }
810 939 break;
811 940
812 941 case PCKILL: /* send signal */
813 942 error = pr_kill(pnp, (int)argp->sig, cr);
814 943 if (error == 0 && argp->sig == SIGKILL) {
815 944 prunlock(pnp);
816 945 pr_wait_die(pnp);
817 946 return (-1);
818 947 }
819 948 break;
820 949
821 950 case PCUNKILL: /* delete a pending signal */
822 951 error = pr_unkill(pnp, (int)argp->sig);
823 952 break;
824 953
825 954 case PCNICE: /* set nice priority */
826 955 error = pr_nice(p, (int)argp->nice, cr);
827 956 break;
828 957
829 958 case PCSENTRY: /* set syscall entry bit mask */
830 959 case PCSEXIT: /* set syscall exit bit mask */
831 960 pr_setentryexit(p, &argp->sysset, cmd == PCSENTRY);
832 961 break;
833 962
834 963 case PCSET: /* set process flags */
835 964 error = pr_set(p, (long)argp->flags);
836 965 break;
837 966
838 967 case PCUNSET: /* unset process flags */
839 968 error = pr_unset(p, (long)argp->flags);
840 969 break;
841 970
842 971 case PCSREG: /* set general registers */
843 972 if (PROCESS_NOT_32BIT(p))
844 973 error = EOVERFLOW;
845 974 else {
846 975 kthread_t *t = pr_thread(pnp);
847 976
848 977 if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
849 978 thread_unlock(t);
850 979 error = EBUSY;
851 980 } else {
852 981 prgregset_t prgregset;
853 982 klwp_t *lwp = ttolwp(t);
854 983
855 984 thread_unlock(t);
856 985 mutex_exit(&p->p_lock);
857 986 prgregset_32ton(lwp, argp->prgregset,
858 987 prgregset);
859 988 prsetprregs(lwp, prgregset, 0);
860 989 mutex_enter(&p->p_lock);
861 990 }
862 991 }
|
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
863 992 break;
864 993
865 994 case PCSFPREG: /* set floating-point registers */
866 995 if (PROCESS_NOT_32BIT(p))
867 996 error = EOVERFLOW;
868 997 else
869 998 error = pr_setfpregs32(pnp, &argp->prfpregset);
870 999 break;
871 1000
872 1001 case PCSXREG: /* set extra registers */
873 -#if defined(__sparc)
874 1002 if (PROCESS_NOT_32BIT(p))
875 1003 error = EOVERFLOW;
876 1004 else
877 - error = pr_setxregs(pnp, &argp->prxregset);
878 -#else
879 - error = EINVAL;
880 -#endif
1005 + error = pr_setxregs(pnp, (prxregset_t *)argp);
881 1006 break;
882 1007
883 1008 case PCSVADDR: /* set virtual address at which to resume */
884 1009 if (PROCESS_NOT_32BIT(p))
885 1010 error = EOVERFLOW;
886 1011 else
887 1012 error = pr_setvaddr(pnp,
888 1013 (caddr_t)(uintptr_t)argp->vaddr);
889 1014 break;
890 1015
891 1016 case PCSHOLD: /* set signal-hold mask */
892 1017 pr_sethold(pnp, &argp->sigset);
893 1018 break;
894 1019
895 1020 case PCSFAULT: /* set mask of traced faults */
896 1021 pr_setfault(p, &argp->fltset);
897 1022 break;
898 1023
899 1024 case PCCSIG: /* clear current signal */
900 1025 error = pr_clearsig(pnp);
901 1026 break;
902 1027
903 1028 case PCCFAULT: /* clear current fault */
904 1029 error = pr_clearflt(pnp);
905 1030 break;
906 1031
907 1032 case PCWATCH: /* set or clear watched areas */
908 1033 if (PROCESS_NOT_32BIT(p))
909 1034 error = EOVERFLOW;
910 1035 else {
911 1036 prwatch_t prwatch;
912 1037
913 1038 prwatch.pr_vaddr = argp->prwatch.pr_vaddr;
914 1039 prwatch.pr_size = argp->prwatch.pr_size;
915 1040 prwatch.pr_wflags = argp->prwatch.pr_wflags;
916 1041 prwatch.pr_pad = argp->prwatch.pr_pad;
917 1042 error = pr_watch(pnp, &prwatch, &unlocked);
918 1043 if (error && unlocked)
919 1044 return (error);
920 1045 }
921 1046 break;
922 1047
923 1048 case PCAGENT: /* create the /proc agent lwp in the target process */
924 1049 if (PROCESS_NOT_32BIT(p))
925 1050 error = EOVERFLOW;
926 1051 else {
927 1052 prgregset_t prgregset;
928 1053 kthread_t *t = pr_thread(pnp);
929 1054 klwp_t *lwp = ttolwp(t);
930 1055 thread_unlock(t);
931 1056 mutex_exit(&p->p_lock);
932 1057 prgregset_32ton(lwp, argp->prgregset, prgregset);
933 1058 mutex_enter(&p->p_lock);
934 1059 error = pr_agent(pnp, prgregset, &unlocked);
935 1060 if (error && unlocked)
936 1061 return (error);
937 1062 }
938 1063 break;
939 1064
940 1065 case PCREAD: /* read from the address space */
941 1066 case PCWRITE: /* write to the address space */
942 1067 if (PROCESS_NOT_32BIT(p) || (pnp->pr_flags & PR_OFFMAX))
943 1068 error = EOVERFLOW;
944 1069 else {
945 1070 enum uio_rw rw = (cmd == PCREAD)? UIO_READ : UIO_WRITE;
946 1071 priovec_t priovec;
947 1072
948 1073 priovec.pio_base =
949 1074 (void *)(uintptr_t)argp->priovec.pio_base;
950 1075 priovec.pio_len = (size_t)argp->priovec.pio_len;
951 1076 priovec.pio_offset = (off_t)
952 1077 (uint32_t)argp->priovec.pio_offset;
953 1078 error = pr_rdwr(p, rw, &priovec);
954 1079 }
955 1080 break;
956 1081
957 1082 case PCSCRED: /* set the process credentials */
958 1083 case PCSCREDX:
959 1084 {
960 1085 /*
961 1086 * All the fields in these structures are exactly the
962 1087 * same and so the structures are compatible. In case
963 1088 * this ever changes, we catch this with the ASSERT
964 1089 * below.
965 1090 */
966 1091 prcred_t *prcred = (prcred_t *)&argp->prcred;
967 1092
968 1093 #ifndef __lint
969 1094 ASSERT(sizeof (prcred_t) == sizeof (prcred32_t));
970 1095 #endif
971 1096
972 1097 error = pr_scred(p, prcred, cr, cmd == PCSCREDX);
973 1098 break;
974 1099 }
975 1100
976 1101 case PCSPRIV: /* set the process privileges */
977 1102 error = pr_spriv(p, &argp->prpriv, cr);
978 1103 break;
979 1104
|
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
980 1105 case PCSZONE: /* set the process's zoneid */
981 1106 error = pr_szoneid(p, (zoneid_t)argp->przoneid, cr);
982 1107 break;
983 1108 }
984 1109
985 1110 if (error)
986 1111 prunlock(pnp);
987 1112 return (error);
988 1113 }
989 1114
1115 +int
1116 +prwritectl32(struct vnode *vp, struct uio *uiop, cred_t *cr)
1117 +{
1118 + return (prwritectl_common(vp, uiop, cr, proc_ctl_info32,
1119 + ARRAY_SIZE(proc_ctl_info32), sizeof (int32_t), pr_control32));
1120 +}
990 1121 #endif /* _SYSCALL32_IMPL */
991 1122
992 1123 /*
993 1124 * Return the specific or chosen thread/lwp for a control operation.
994 1125 * Returns with the thread locked via thread_lock(t).
995 1126 */
996 1127 kthread_t *
997 1128 pr_thread(prnode_t *pnp)
998 1129 {
999 1130 prcommon_t *pcp = pnp->pr_common;
1000 1131 kthread_t *t;
1001 1132
1002 1133 if (pcp->prc_flags & PRC_LWP) {
1003 1134 t = pcp->prc_thread;
1004 1135 ASSERT(t != NULL);
1005 1136 thread_lock(t);
1006 1137 } else {
1007 1138 proc_t *p = pcp->prc_proc;
1008 1139 t = prchoose(p); /* returns locked thread */
1009 1140 ASSERT(t != NULL);
1010 1141 }
1011 1142
1012 1143 return (t);
1013 1144 }
1014 1145
1015 1146 /*
1016 1147 * Direct the process or lwp to stop.
1017 1148 */
1018 1149 void
1019 1150 pr_stop(prnode_t *pnp)
1020 1151 {
1021 1152 prcommon_t *pcp = pnp->pr_common;
1022 1153 proc_t *p = pcp->prc_proc;
1023 1154 kthread_t *t;
1024 1155 vnode_t *vp;
1025 1156
1026 1157 /*
1027 1158 * If already stopped, do nothing; otherwise flag
1028 1159 * it to be stopped the next time it tries to run.
1029 1160 * If sleeping at interruptible priority, set it
1030 1161 * running so it will stop within cv_wait_sig().
1031 1162 *
1032 1163 * Take care to cooperate with jobcontrol: if an lwp
1033 1164 * is stopped due to the default action of a jobcontrol
1034 1165 * stop signal, flag it to be stopped the next time it
1035 1166 * starts due to a SIGCONT signal.
1036 1167 */
1037 1168 if (pcp->prc_flags & PRC_LWP)
1038 1169 t = pcp->prc_thread;
1039 1170 else
1040 1171 t = p->p_tlist;
1041 1172 ASSERT(t != NULL);
1042 1173
1043 1174 do {
1044 1175 int notify;
1045 1176
1046 1177 notify = 0;
1047 1178 thread_lock(t);
1048 1179 if (!ISTOPPED(t)) {
1049 1180 t->t_proc_flag |= TP_PRSTOP;
1050 1181 t->t_sig_check = 1; /* do ISSIG */
1051 1182 }
1052 1183
1053 1184 /* Move the thread from wait queue to run queue */
1054 1185 if (ISWAITING(t))
1055 1186 setrun_locked(t);
1056 1187
1057 1188 if (ISWAKEABLE(t)) {
1058 1189 if (t->t_wchan0 == NULL)
1059 1190 setrun_locked(t);
1060 1191 else if (!VSTOPPED(t)) {
1061 1192 /*
1062 1193 * Mark it virtually stopped.
1063 1194 */
1064 1195 t->t_proc_flag |= TP_PRVSTOP;
1065 1196 notify = 1;
1066 1197 }
1067 1198 }
1068 1199 /*
1069 1200 * force the thread into the kernel
1070 1201 * if it is not already there.
1071 1202 */
1072 1203 prpokethread(t);
1073 1204 thread_unlock(t);
1074 1205 if (notify &&
1075 1206 (vp = p->p_lwpdir[t->t_dslot].ld_entry->le_trace) != NULL)
1076 1207 prnotify(vp);
1077 1208 if (pcp->prc_flags & PRC_LWP)
1078 1209 break;
1079 1210 } while ((t = t->t_forw) != p->p_tlist);
1080 1211
1081 1212 /*
1082 1213 * We do this just in case the thread we asked
1083 1214 * to stop is in holdlwps() (called from cfork()).
1084 1215 */
1085 1216 cv_broadcast(&p->p_holdlwps);
1086 1217 }
1087 1218
1088 1219 /*
1089 1220 * Sleep until the lwp stops, but cooperate with
1090 1221 * jobcontrol: Don't wake up if the lwp is stopped
1091 1222 * due to the default action of a jobcontrol stop signal.
1092 1223 * If this is the process file descriptor, sleep
1093 1224 * until all of the process's lwps stop.
1094 1225 */
1095 1226 int
1096 1227 pr_wait_stop(prnode_t *pnp, time_t timeo)
1097 1228 {
1098 1229 prcommon_t *pcp = pnp->pr_common;
1099 1230 proc_t *p = pcp->prc_proc;
1100 1231 timestruc_t rqtime;
1101 1232 timestruc_t *rqtp = NULL;
1102 1233 int timecheck = 0;
1103 1234 kthread_t *t;
1104 1235 int error;
1105 1236
1106 1237 if (timeo > 0) { /* millisecond timeout */
1107 1238 /*
1108 1239 * Determine the precise future time of the requested timeout.
1109 1240 */
1110 1241 timestruc_t now;
1111 1242
1112 1243 timecheck = timechanged;
1113 1244 gethrestime(&now);
1114 1245 rqtp = &rqtime;
1115 1246 rqtp->tv_sec = timeo / MILLISEC;
1116 1247 rqtp->tv_nsec = (timeo % MILLISEC) * MICROSEC;
1117 1248 timespecadd(rqtp, &now);
1118 1249 }
1119 1250
1120 1251 if (pcp->prc_flags & PRC_LWP) { /* lwp file descriptor */
1121 1252 t = pcp->prc_thread;
1122 1253 ASSERT(t != NULL);
1123 1254 thread_lock(t);
1124 1255 while (!ISTOPPED(t) && !VSTOPPED(t)) {
1125 1256 thread_unlock(t);
1126 1257 mutex_enter(&pcp->prc_mutex);
1127 1258 prunlock(pnp);
1128 1259 error = pr_wait(pcp, rqtp, timecheck);
1129 1260 if (error) /* -1 is timeout */
1130 1261 return (error);
1131 1262 if ((error = prlock(pnp, ZNO)) != 0)
1132 1263 return (error);
1133 1264 ASSERT(p == pcp->prc_proc);
1134 1265 ASSERT(t == pcp->prc_thread);
1135 1266 thread_lock(t);
1136 1267 }
1137 1268 thread_unlock(t);
1138 1269 } else { /* process file descriptor */
1139 1270 t = prchoose(p); /* returns locked thread */
1140 1271 ASSERT(t != NULL);
1141 1272 ASSERT(MUTEX_HELD(&p->p_lock));
1142 1273 while ((!ISTOPPED(t) && !VSTOPPED(t) && !SUSPENDED(t)) ||
1143 1274 (p->p_flag & SEXITLWPS)) {
1144 1275 thread_unlock(t);
1145 1276 mutex_enter(&pcp->prc_mutex);
1146 1277 prunlock(pnp);
1147 1278 error = pr_wait(pcp, rqtp, timecheck);
1148 1279 if (error) /* -1 is timeout */
1149 1280 return (error);
1150 1281 if ((error = prlock(pnp, ZNO)) != 0)
1151 1282 return (error);
1152 1283 ASSERT(p == pcp->prc_proc);
1153 1284 t = prchoose(p); /* returns locked t */
1154 1285 ASSERT(t != NULL);
1155 1286 }
1156 1287 thread_unlock(t);
1157 1288 }
1158 1289
1159 1290 ASSERT(!(pcp->prc_flags & PRC_DESTROY) && p->p_stat != SZOMB &&
1160 1291 t != NULL && t->t_state != TS_ZOMB);
1161 1292
1162 1293 return (0);
1163 1294 }
1164 1295
1165 1296 int
1166 1297 pr_setrun(prnode_t *pnp, ulong_t flags)
1167 1298 {
1168 1299 prcommon_t *pcp = pnp->pr_common;
1169 1300 proc_t *p = pcp->prc_proc;
1170 1301 kthread_t *t;
1171 1302 klwp_t *lwp;
1172 1303
1173 1304 /*
1174 1305 * Cannot set an lwp running if it is not stopped.
1175 1306 * Also, no lwp other than the /proc agent lwp can
1176 1307 * be set running so long as the /proc agent lwp exists.
1177 1308 */
1178 1309 t = pr_thread(pnp); /* returns locked thread */
1179 1310 if ((!ISTOPPED(t) && !VSTOPPED(t) &&
1180 1311 !(t->t_proc_flag & TP_PRSTOP)) ||
1181 1312 (p->p_agenttp != NULL &&
1182 1313 (t != p->p_agenttp || !(pcp->prc_flags & PRC_LWP)))) {
1183 1314 thread_unlock(t);
1184 1315 return (EBUSY);
1185 1316 }
1186 1317 thread_unlock(t);
1187 1318 if (flags & ~(PRCSIG|PRCFAULT|PRSTEP|PRSTOP|PRSABORT))
1188 1319 return (EINVAL);
1189 1320 lwp = ttolwp(t);
1190 1321 if ((flags & PRCSIG) && lwp->lwp_cursig != SIGKILL) {
1191 1322 /*
1192 1323 * Discard current siginfo_t, if any.
1193 1324 */
1194 1325 lwp->lwp_cursig = 0;
1195 1326 lwp->lwp_extsig = 0;
1196 1327 if (lwp->lwp_curinfo) {
1197 1328 siginfofree(lwp->lwp_curinfo);
1198 1329 lwp->lwp_curinfo = NULL;
1199 1330 }
1200 1331 }
1201 1332 if (flags & PRCFAULT)
1202 1333 lwp->lwp_curflt = 0;
1203 1334 /*
1204 1335 * We can't hold p->p_lock when we touch the lwp's registers.
1205 1336 * It may be swapped out and we will get a page fault.
1206 1337 */
1207 1338 if (flags & PRSTEP) {
1208 1339 mutex_exit(&p->p_lock);
1209 1340 prstep(lwp, 0);
1210 1341 mutex_enter(&p->p_lock);
1211 1342 }
1212 1343 if (flags & PRSTOP) {
1213 1344 t->t_proc_flag |= TP_PRSTOP;
1214 1345 t->t_sig_check = 1; /* do ISSIG */
1215 1346 }
1216 1347 if (flags & PRSABORT)
1217 1348 lwp->lwp_sysabort = 1;
1218 1349 thread_lock(t);
1219 1350 if ((pcp->prc_flags & PRC_LWP) || (flags & (PRSTEP|PRSTOP))) {
1220 1351 /*
1221 1352 * Here, we are dealing with a single lwp.
1222 1353 */
1223 1354 if (ISTOPPED(t)) {
1224 1355 t->t_schedflag |= TS_PSTART;
1225 1356 t->t_dtrace_stop = 0;
1226 1357 setrun_locked(t);
1227 1358 } else if (flags & PRSABORT) {
1228 1359 t->t_proc_flag &=
1229 1360 ~(TP_PRSTOP|TP_PRVSTOP|TP_STOPPING);
1230 1361 setrun_locked(t);
1231 1362 } else if (!(flags & PRSTOP)) {
1232 1363 t->t_proc_flag &=
1233 1364 ~(TP_PRSTOP|TP_PRVSTOP|TP_STOPPING);
1234 1365 }
1235 1366 thread_unlock(t);
1236 1367 } else {
1237 1368 /*
1238 1369 * Here, we are dealing with the whole process.
1239 1370 */
1240 1371 if (ISTOPPED(t)) {
1241 1372 /*
1242 1373 * The representative lwp is stopped on an event
1243 1374 * of interest. We demote it to PR_REQUESTED and
1244 1375 * choose another representative lwp. If the new
1245 1376 * representative lwp is not stopped on an event of
1246 1377 * interest (other than PR_REQUESTED), we set the
1247 1378 * whole process running, else we leave the process
1248 1379 * stopped showing the next event of interest.
1249 1380 */
1250 1381 kthread_t *tx = NULL;
1251 1382
1252 1383 if (!(flags & PRSABORT) &&
1253 1384 t->t_whystop == PR_SYSENTRY &&
1254 1385 t->t_whatstop == SYS_lwp_exit)
1255 1386 tx = t; /* remember the exiting lwp */
1256 1387 t->t_whystop = PR_REQUESTED;
1257 1388 t->t_whatstop = 0;
1258 1389 thread_unlock(t);
1259 1390 t = prchoose(p); /* returns locked t */
1260 1391 ASSERT(ISTOPPED(t) || VSTOPPED(t));
1261 1392 if (VSTOPPED(t) ||
1262 1393 t->t_whystop == PR_REQUESTED) {
1263 1394 thread_unlock(t);
1264 1395 allsetrun(p);
1265 1396 } else {
1266 1397 thread_unlock(t);
1267 1398 /*
1268 1399 * As a special case, if the old representative
1269 1400 * lwp was stopped on entry to _lwp_exit()
1270 1401 * (and we are not aborting the system call),
1271 1402 * we set the old representative lwp running.
1272 1403 * We do this so that the next process stop
1273 1404 * will find the exiting lwp gone.
1274 1405 */
1275 1406 if (tx != NULL) {
1276 1407 thread_lock(tx);
1277 1408 tx->t_schedflag |= TS_PSTART;
1278 1409 t->t_dtrace_stop = 0;
1279 1410 setrun_locked(tx);
1280 1411 thread_unlock(tx);
1281 1412 }
1282 1413 }
1283 1414 } else {
1284 1415 /*
1285 1416 * No event of interest; set all of the lwps running.
1286 1417 */
1287 1418 if (flags & PRSABORT) {
1288 1419 t->t_proc_flag &=
1289 1420 ~(TP_PRSTOP|TP_PRVSTOP|TP_STOPPING);
1290 1421 setrun_locked(t);
1291 1422 }
1292 1423 thread_unlock(t);
1293 1424 allsetrun(p);
1294 1425 }
|
↓ open down ↓ |
295 lines elided |
↑ open up ↑ |
1295 1426 }
1296 1427 return (0);
1297 1428 }
1298 1429
1299 1430 /*
1300 1431 * Wait until process/lwp stops or until timer expires.
1301 1432 * Return EINTR for an interruption, -1 for timeout, else 0.
1302 1433 */
1303 1434 int
1304 1435 pr_wait(prcommon_t *pcp, /* prcommon referring to process/lwp */
1305 - timestruc_t *ts, /* absolute time of timeout, if any */
1306 - int timecheck)
1436 + timestruc_t *ts, /* absolute time of timeout, if any */
1437 + int timecheck)
1307 1438 {
1308 1439 int rval;
1309 1440
1310 1441 ASSERT(MUTEX_HELD(&pcp->prc_mutex));
1311 1442 rval = cv_waituntil_sig(&pcp->prc_wait, &pcp->prc_mutex, ts, timecheck);
1312 1443 mutex_exit(&pcp->prc_mutex);
1313 1444 switch (rval) {
1314 1445 case 0:
1315 1446 return (EINTR);
1316 1447 case -1:
1317 1448 return (-1);
1318 1449 default:
1319 1450 return (0);
1320 1451 }
1321 1452 }
1322 1453
1323 1454 /*
1324 1455 * Make all threads in the process runnable.
1325 1456 */
1326 1457 void
1327 1458 allsetrun(proc_t *p)
1328 1459 {
1329 1460 kthread_t *t;
1330 1461
1331 1462 ASSERT(MUTEX_HELD(&p->p_lock));
1332 1463
1333 1464 if ((t = p->p_tlist) != NULL) {
1334 1465 do {
1335 1466 thread_lock(t);
1336 1467 ASSERT(!(t->t_proc_flag & TP_LWPEXIT));
1337 1468 t->t_proc_flag &= ~(TP_PRSTOP|TP_PRVSTOP|TP_STOPPING);
1338 1469 if (ISTOPPED(t)) {
1339 1470 t->t_schedflag |= TS_PSTART;
1340 1471 t->t_dtrace_stop = 0;
1341 1472 setrun_locked(t);
1342 1473 }
1343 1474 thread_unlock(t);
1344 1475 } while ((t = t->t_forw) != p->p_tlist);
1345 1476 }
1346 1477 }
1347 1478
1348 1479 /*
1349 1480 * Wait for the process to die.
1350 1481 * We do this after sending SIGKILL because we know it will
1351 1482 * die soon and we want subsequent operations to return ENOENT.
1352 1483 */
1353 1484 void
1354 1485 pr_wait_die(prnode_t *pnp)
1355 1486 {
1356 1487 proc_t *p;
1357 1488
1358 1489 mutex_enter(&pidlock);
1359 1490 while ((p = pnp->pr_common->prc_proc) != NULL && p->p_stat != SZOMB) {
1360 1491 if (!cv_wait_sig(&p->p_srwchan_cv, &pidlock))
1361 1492 break;
1362 1493 }
1363 1494 mutex_exit(&pidlock);
1364 1495 }
1365 1496
1366 1497 static void
1367 1498 pr_settrace(proc_t *p, sigset_t *sp)
1368 1499 {
1369 1500 prdelset(sp, SIGKILL);
1370 1501 prassignset(&p->p_sigmask, sp);
1371 1502 if (!sigisempty(&p->p_sigmask))
1372 1503 p->p_proc_flag |= P_PR_TRACE;
1373 1504 else if (prisempty(&p->p_fltmask)) {
1374 1505 user_t *up = PTOU(p);
1375 1506 if (up->u_systrap == 0)
1376 1507 p->p_proc_flag &= ~P_PR_TRACE;
1377 1508 }
1378 1509 }
1379 1510
1380 1511 int
1381 1512 pr_setsig(prnode_t *pnp, siginfo_t *sip)
1382 1513 {
1383 1514 int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG;
1384 1515 int sig = sip->si_signo;
1385 1516 prcommon_t *pcp = pnp->pr_common;
1386 1517 proc_t *p = pcp->prc_proc;
1387 1518 kthread_t *t;
1388 1519 klwp_t *lwp;
1389 1520 int error = 0;
1390 1521
1391 1522 t = pr_thread(pnp); /* returns locked thread */
1392 1523 thread_unlock(t);
1393 1524 lwp = ttolwp(t);
1394 1525 if (sig < 0 || sig >= nsig)
1395 1526 /* Zero allowed here */
1396 1527 error = EINVAL;
1397 1528 else if (lwp->lwp_cursig == SIGKILL)
1398 1529 /* "can't happen", but just in case */
1399 1530 error = EBUSY;
1400 1531 else if ((lwp->lwp_cursig = (uchar_t)sig) == 0) {
1401 1532 lwp->lwp_extsig = 0;
1402 1533 /*
1403 1534 * Discard current siginfo_t, if any.
1404 1535 */
1405 1536 if (lwp->lwp_curinfo) {
1406 1537 siginfofree(lwp->lwp_curinfo);
1407 1538 lwp->lwp_curinfo = NULL;
1408 1539 }
1409 1540 } else {
1410 1541 kthread_t *tx;
1411 1542 sigqueue_t *sqp;
1412 1543
1413 1544 /* drop p_lock to do kmem_alloc(KM_SLEEP) */
1414 1545 mutex_exit(&p->p_lock);
1415 1546 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
1416 1547 mutex_enter(&p->p_lock);
1417 1548
1418 1549 if (lwp->lwp_curinfo == NULL)
1419 1550 lwp->lwp_curinfo = sqp;
1420 1551 else
1421 1552 kmem_free(sqp, sizeof (sigqueue_t));
1422 1553 /*
1423 1554 * Copy contents of info to current siginfo_t.
1424 1555 */
1425 1556 bcopy(sip, &lwp->lwp_curinfo->sq_info,
1426 1557 sizeof (lwp->lwp_curinfo->sq_info));
1427 1558 /*
1428 1559 * Prevent contents published by si_zoneid-unaware /proc
1429 1560 * consumers from being incorrectly filtered. Because
1430 1561 * an uninitialized si_zoneid is the same as
1431 1562 * GLOBAL_ZONEID, this means that you can't pr_setsig a
1432 1563 * process in a non-global zone with a siginfo which
1433 1564 * appears to come from the global zone.
1434 1565 */
1435 1566 if (SI_FROMUSER(sip) && sip->si_zoneid == 0)
1436 1567 lwp->lwp_curinfo->sq_info.si_zoneid =
1437 1568 p->p_zone->zone_id;
1438 1569 /*
1439 1570 * Side-effects for SIGKILL and jobcontrol signals.
1440 1571 */
1441 1572 if (sig == SIGKILL) {
1442 1573 p->p_flag |= SKILLED;
1443 1574 p->p_flag &= ~SEXTKILLED;
1444 1575 } else if (sig == SIGCONT) {
1445 1576 p->p_flag |= SSCONT;
1446 1577 sigdelq(p, NULL, SIGSTOP);
1447 1578 sigdelq(p, NULL, SIGTSTP);
1448 1579 sigdelq(p, NULL, SIGTTOU);
1449 1580 sigdelq(p, NULL, SIGTTIN);
1450 1581 sigdiffset(&p->p_sig, &stopdefault);
1451 1582 sigdiffset(&p->p_extsig, &stopdefault);
1452 1583 if ((tx = p->p_tlist) != NULL) {
1453 1584 do {
1454 1585 sigdelq(p, tx, SIGSTOP);
1455 1586 sigdelq(p, tx, SIGTSTP);
1456 1587 sigdelq(p, tx, SIGTTOU);
1457 1588 sigdelq(p, tx, SIGTTIN);
1458 1589 sigdiffset(&tx->t_sig, &stopdefault);
1459 1590 sigdiffset(&tx->t_extsig, &stopdefault);
1460 1591 } while ((tx = tx->t_forw) != p->p_tlist);
1461 1592 }
1462 1593 } else if (sigismember(&stopdefault, sig)) {
1463 1594 if (PTOU(p)->u_signal[sig-1] == SIG_DFL &&
1464 1595 (sig == SIGSTOP || !p->p_pgidp->pid_pgorphaned))
1465 1596 p->p_flag &= ~SSCONT;
1466 1597 sigdelq(p, NULL, SIGCONT);
1467 1598 sigdelset(&p->p_sig, SIGCONT);
1468 1599 sigdelset(&p->p_extsig, SIGCONT);
1469 1600 if ((tx = p->p_tlist) != NULL) {
1470 1601 do {
1471 1602 sigdelq(p, tx, SIGCONT);
1472 1603 sigdelset(&tx->t_sig, SIGCONT);
1473 1604 sigdelset(&tx->t_extsig, SIGCONT);
1474 1605 } while ((tx = tx->t_forw) != p->p_tlist);
1475 1606 }
1476 1607 }
1477 1608 thread_lock(t);
1478 1609 if (ISWAKEABLE(t) || ISWAITING(t)) {
1479 1610 /* Set signaled sleeping/waiting lwp running */
1480 1611 setrun_locked(t);
1481 1612 } else if (t->t_state == TS_STOPPED && sig == SIGKILL) {
1482 1613 /* If SIGKILL, set stopped lwp running */
1483 1614 p->p_stopsig = 0;
1484 1615 t->t_schedflag |= TS_XSTART | TS_PSTART | TS_BSTART;
1485 1616 t->t_dtrace_stop = 0;
1486 1617 setrun_locked(t);
1487 1618 }
1488 1619 t->t_sig_check = 1; /* so ISSIG will be done */
1489 1620 thread_unlock(t);
1490 1621 /*
1491 1622 * More jobcontrol side-effects.
1492 1623 */
1493 1624 if (sig == SIGCONT && (tx = p->p_tlist) != NULL) {
1494 1625 p->p_stopsig = 0;
1495 1626 do {
1496 1627 thread_lock(tx);
1497 1628 if (tx->t_state == TS_STOPPED &&
1498 1629 tx->t_whystop == PR_JOBCONTROL) {
1499 1630 tx->t_schedflag |= TS_XSTART;
1500 1631 setrun_locked(tx);
1501 1632 }
1502 1633 thread_unlock(tx);
1503 1634 } while ((tx = tx->t_forw) != p->p_tlist);
1504 1635 }
1505 1636 }
1506 1637 return (error);
1507 1638 }
1508 1639
1509 1640 int
1510 1641 pr_kill(prnode_t *pnp, int sig, cred_t *cr)
1511 1642 {
1512 1643 int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG;
1513 1644 prcommon_t *pcp = pnp->pr_common;
1514 1645 proc_t *p = pcp->prc_proc;
1515 1646 k_siginfo_t info;
1516 1647
1517 1648 if (sig <= 0 || sig >= nsig)
1518 1649 return (EINVAL);
1519 1650
1520 1651 bzero(&info, sizeof (info));
1521 1652 info.si_signo = sig;
1522 1653 info.si_code = SI_USER;
1523 1654 info.si_pid = curproc->p_pid;
1524 1655 info.si_ctid = PRCTID(curproc);
1525 1656 info.si_zoneid = getzoneid();
1526 1657 info.si_uid = crgetruid(cr);
1527 1658 sigaddq(p, (pcp->prc_flags & PRC_LWP)?
1528 1659 pcp->prc_thread : NULL, &info, KM_NOSLEEP);
1529 1660
1530 1661 return (0);
1531 1662 }
1532 1663
1533 1664 int
1534 1665 pr_unkill(prnode_t *pnp, int sig)
1535 1666 {
1536 1667 int nsig = PROC_IS_BRANDED(curproc)? BROP(curproc)->b_nsig : NSIG;
1537 1668 prcommon_t *pcp = pnp->pr_common;
1538 1669 proc_t *p = pcp->prc_proc;
1539 1670 sigqueue_t *infop = NULL;
1540 1671
1541 1672 if (sig <= 0 || sig >= nsig || sig == SIGKILL)
1542 1673 return (EINVAL);
1543 1674
1544 1675 if (pcp->prc_flags & PRC_LWP)
1545 1676 sigdeq(p, pcp->prc_thread, sig, &infop);
1546 1677 else
1547 1678 sigdeq(p, NULL, sig, &infop);
1548 1679
1549 1680 if (infop)
1550 1681 siginfofree(infop);
1551 1682
1552 1683 return (0);
1553 1684 }
1554 1685
1555 1686 int
1556 1687 pr_nice(proc_t *p, int nice, cred_t *cr)
1557 1688 {
1558 1689 kthread_t *t;
1559 1690 int err;
1560 1691 int error = 0;
1561 1692
1562 1693 t = p->p_tlist;
1563 1694 do {
1564 1695 ASSERT(!(t->t_proc_flag & TP_LWPEXIT));
1565 1696 err = CL_DONICE(t, cr, nice, (int *)NULL);
1566 1697 schedctl_set_cidpri(t);
1567 1698 if (error == 0)
1568 1699 error = err;
1569 1700 } while ((t = t->t_forw) != p->p_tlist);
1570 1701
1571 1702 return (error);
1572 1703 }
1573 1704
1574 1705 void
1575 1706 pr_setentryexit(proc_t *p, sysset_t *sysset, int entry)
1576 1707 {
1577 1708 user_t *up = PTOU(p);
1578 1709
1579 1710 if (entry) {
1580 1711 prassignset(&up->u_entrymask, sysset);
1581 1712 } else {
1582 1713 prassignset(&up->u_exitmask, sysset);
1583 1714 }
1584 1715 if (!prisempty(&up->u_entrymask) ||
1585 1716 !prisempty(&up->u_exitmask)) {
1586 1717 up->u_systrap = 1;
1587 1718 p->p_proc_flag |= P_PR_TRACE;
1588 1719 set_proc_sys(p); /* set pre and post-sys flags */
1589 1720 } else {
1590 1721 up->u_systrap = 0;
1591 1722 if (sigisempty(&p->p_sigmask) &&
1592 1723 prisempty(&p->p_fltmask))
1593 1724 p->p_proc_flag &= ~P_PR_TRACE;
1594 1725 }
1595 1726 }
1596 1727
1597 1728 #define ALLFLAGS \
1598 1729 (PR_FORK|PR_RLC|PR_KLC|PR_ASYNC|PR_BPTADJ|PR_MSACCT|PR_MSFORK|PR_PTRACE)
1599 1730
1600 1731 int
1601 1732 pr_set(proc_t *p, long flags)
1602 1733 {
1603 1734 if ((p->p_flag & SSYS) || p->p_as == &kas)
1604 1735 return (EBUSY);
1605 1736
1606 1737 if (flags & ~ALLFLAGS)
1607 1738 return (EINVAL);
1608 1739
1609 1740 if (flags & PR_FORK)
1610 1741 p->p_proc_flag |= P_PR_FORK;
1611 1742 if (flags & PR_RLC)
1612 1743 p->p_proc_flag |= P_PR_RUNLCL;
1613 1744 if (flags & PR_KLC)
1614 1745 p->p_proc_flag |= P_PR_KILLCL;
1615 1746 if (flags & PR_ASYNC)
1616 1747 p->p_proc_flag |= P_PR_ASYNC;
1617 1748 if (flags & PR_BPTADJ)
1618 1749 p->p_proc_flag |= P_PR_BPTADJ;
1619 1750 if (flags & PR_MSACCT)
1620 1751 if ((p->p_flag & SMSACCT) == 0)
1621 1752 estimate_msacct(p->p_tlist, gethrtime());
1622 1753 if (flags & PR_MSFORK)
1623 1754 p->p_flag |= SMSFORK;
1624 1755 if (flags & PR_PTRACE) {
1625 1756 p->p_proc_flag |= P_PR_PTRACE;
1626 1757 /* ptraced process must die if parent dead */
1627 1758 if (p->p_ppid == 1)
1628 1759 sigtoproc(p, NULL, SIGKILL);
1629 1760 }
1630 1761
1631 1762 return (0);
1632 1763 }
1633 1764
1634 1765 int
1635 1766 pr_unset(proc_t *p, long flags)
1636 1767 {
1637 1768 if ((p->p_flag & SSYS) || p->p_as == &kas)
1638 1769 return (EBUSY);
1639 1770
1640 1771 if (flags & ~ALLFLAGS)
1641 1772 return (EINVAL);
1642 1773
1643 1774 if (flags & PR_FORK)
1644 1775 p->p_proc_flag &= ~P_PR_FORK;
1645 1776 if (flags & PR_RLC)
1646 1777 p->p_proc_flag &= ~P_PR_RUNLCL;
1647 1778 if (flags & PR_KLC)
1648 1779 p->p_proc_flag &= ~P_PR_KILLCL;
1649 1780 if (flags & PR_ASYNC)
1650 1781 p->p_proc_flag &= ~P_PR_ASYNC;
1651 1782 if (flags & PR_BPTADJ)
1652 1783 p->p_proc_flag &= ~P_PR_BPTADJ;
1653 1784 if (flags & PR_MSACCT)
1654 1785 disable_msacct(p);
1655 1786 if (flags & PR_MSFORK)
1656 1787 p->p_flag &= ~SMSFORK;
1657 1788 if (flags & PR_PTRACE)
1658 1789 p->p_proc_flag &= ~P_PR_PTRACE;
1659 1790
1660 1791 return (0);
1661 1792 }
1662 1793
1663 1794 static int
1664 1795 pr_setfpregs(prnode_t *pnp, prfpregset_t *prfpregset)
1665 1796 {
1666 1797 proc_t *p = pnp->pr_common->prc_proc;
1667 1798 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1668 1799
1669 1800 if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
1670 1801 thread_unlock(t);
1671 1802 return (EBUSY);
1672 1803 }
1673 1804 if (!prhasfp()) {
1674 1805 thread_unlock(t);
1675 1806 return (EINVAL); /* No FP support */
1676 1807 }
1677 1808
1678 1809 /* drop p_lock while touching the lwp's stack */
1679 1810 thread_unlock(t);
1680 1811 mutex_exit(&p->p_lock);
1681 1812 prsetprfpregs(ttolwp(t), prfpregset);
1682 1813 mutex_enter(&p->p_lock);
1683 1814
1684 1815 return (0);
1685 1816 }
1686 1817
1687 1818 #ifdef _SYSCALL32_IMPL
1688 1819 static int
1689 1820 pr_setfpregs32(prnode_t *pnp, prfpregset32_t *prfpregset)
1690 1821 {
1691 1822 proc_t *p = pnp->pr_common->prc_proc;
1692 1823 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1693 1824
1694 1825 if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
1695 1826 thread_unlock(t);
1696 1827 return (EBUSY);
1697 1828 }
1698 1829 if (!prhasfp()) {
1699 1830 thread_unlock(t);
1700 1831 return (EINVAL); /* No FP support */
1701 1832 }
1702 1833
|
↓ open down ↓ |
386 lines elided |
↑ open up ↑ |
1703 1834 /* drop p_lock while touching the lwp's stack */
1704 1835 thread_unlock(t);
1705 1836 mutex_exit(&p->p_lock);
1706 1837 prsetprfpregs32(ttolwp(t), prfpregset);
1707 1838 mutex_enter(&p->p_lock);
1708 1839
1709 1840 return (0);
1710 1841 }
1711 1842 #endif /* _SYSCALL32_IMPL */
1712 1843
1713 -#if defined(__sparc)
1714 1844 /* ARGSUSED */
1715 1845 static int
1716 1846 pr_setxregs(prnode_t *pnp, prxregset_t *prxregset)
1717 1847 {
1848 + int error;
1718 1849 proc_t *p = pnp->pr_common->prc_proc;
1719 1850 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1720 1851
1721 1852 if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
1722 1853 thread_unlock(t);
1723 1854 return (EBUSY);
1724 1855 }
1725 1856 thread_unlock(t);
1726 1857
1727 1858 if (!prhasx(p))
1728 1859 return (EINVAL); /* No extra register support */
1729 1860
1730 1861 /* drop p_lock while touching the lwp's stack */
1731 1862 mutex_exit(&p->p_lock);
1732 - prsetprxregs(ttolwp(t), (caddr_t)prxregset);
1863 + error = prsetprxregs(ttolwp(t), prxregset);
1733 1864 mutex_enter(&p->p_lock);
1734 1865
1735 - return (0);
1866 + return (error);
1736 1867 }
1737 1868
1738 1869 static int
1739 -pr_setasrs(prnode_t *pnp, asrset_t asrset)
1740 -{
1741 - proc_t *p = pnp->pr_common->prc_proc;
1742 - kthread_t *t = pr_thread(pnp); /* returns locked thread */
1743 -
1744 - if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
1745 - thread_unlock(t);
1746 - return (EBUSY);
1747 - }
1748 - thread_unlock(t);
1749 -
1750 - /* drop p_lock while touching the lwp's stack */
1751 - mutex_exit(&p->p_lock);
1752 - prsetasregs(ttolwp(t), asrset);
1753 - mutex_enter(&p->p_lock);
1754 -
1755 - return (0);
1756 -}
1757 -#endif
1758 -
1759 -static int
1760 1870 pr_setvaddr(prnode_t *pnp, caddr_t vaddr)
1761 1871 {
1762 1872 proc_t *p = pnp->pr_common->prc_proc;
1763 1873 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1764 1874
1765 1875 if (!ISTOPPED(t) && !VSTOPPED(t) && !DSTOPPED(t)) {
1766 1876 thread_unlock(t);
1767 1877 return (EBUSY);
1768 1878 }
1769 1879
1770 1880 /* drop p_lock while touching the lwp's stack */
1771 1881 thread_unlock(t);
1772 1882 mutex_exit(&p->p_lock);
1773 1883 prsvaddr(ttolwp(t), vaddr);
1774 1884 mutex_enter(&p->p_lock);
1775 1885
1776 1886 return (0);
1777 1887 }
1778 1888
1779 1889 void
1780 1890 pr_sethold(prnode_t *pnp, sigset_t *sp)
1781 1891 {
1782 1892 proc_t *p = pnp->pr_common->prc_proc;
1783 1893 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1784 1894
1785 1895 schedctl_finish_sigblock(t);
1786 1896 sigutok(sp, &t->t_hold);
1787 1897 if (ISWAKEABLE(t) &&
1788 1898 (fsig(&p->p_sig, t) || fsig(&t->t_sig, t)))
1789 1899 setrun_locked(t);
1790 1900 t->t_sig_check = 1; /* so thread will see new holdmask */
1791 1901 thread_unlock(t);
1792 1902 }
1793 1903
1794 1904 void
1795 1905 pr_setfault(proc_t *p, fltset_t *fltp)
1796 1906 {
1797 1907 prassignset(&p->p_fltmask, fltp);
1798 1908 if (!prisempty(&p->p_fltmask))
1799 1909 p->p_proc_flag |= P_PR_TRACE;
1800 1910 else if (sigisempty(&p->p_sigmask)) {
1801 1911 user_t *up = PTOU(p);
1802 1912 if (up->u_systrap == 0)
1803 1913 p->p_proc_flag &= ~P_PR_TRACE;
1804 1914 }
1805 1915 }
1806 1916
1807 1917 static int
1808 1918 pr_clearsig(prnode_t *pnp)
1809 1919 {
1810 1920 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1811 1921 klwp_t *lwp = ttolwp(t);
1812 1922
1813 1923 thread_unlock(t);
1814 1924 if (lwp->lwp_cursig == SIGKILL)
1815 1925 return (EBUSY);
1816 1926
1817 1927 /*
1818 1928 * Discard current siginfo_t, if any.
1819 1929 */
1820 1930 lwp->lwp_cursig = 0;
1821 1931 lwp->lwp_extsig = 0;
1822 1932 if (lwp->lwp_curinfo) {
1823 1933 siginfofree(lwp->lwp_curinfo);
1824 1934 lwp->lwp_curinfo = NULL;
1825 1935 }
1826 1936
1827 1937 return (0);
1828 1938 }
1829 1939
1830 1940 static int
1831 1941 pr_clearflt(prnode_t *pnp)
1832 1942 {
1833 1943 kthread_t *t = pr_thread(pnp); /* returns locked thread */
1834 1944
1835 1945 thread_unlock(t);
1836 1946 ttolwp(t)->lwp_curflt = 0;
1837 1947
1838 1948 return (0);
1839 1949 }
1840 1950
1841 1951 static int
1842 1952 pr_watch(prnode_t *pnp, prwatch_t *pwp, int *unlocked)
1843 1953 {
1844 1954 proc_t *p = pnp->pr_common->prc_proc;
1845 1955 struct as *as = p->p_as;
1846 1956 uintptr_t vaddr = pwp->pr_vaddr;
1847 1957 size_t size = pwp->pr_size;
1848 1958 int wflags = pwp->pr_wflags;
1849 1959 ulong_t newpage = 0;
1850 1960 struct watched_area *pwa;
1851 1961 int error;
1852 1962
1853 1963 *unlocked = 0;
1854 1964
1855 1965 /*
1856 1966 * Can't apply to a system process.
1857 1967 */
1858 1968 if ((p->p_flag & SSYS) || p->p_as == &kas)
1859 1969 return (EBUSY);
1860 1970
1861 1971 /*
1862 1972 * Verify that the address range does not wrap
1863 1973 * and that only the proper flags were specified.
1864 1974 */
1865 1975 if ((wflags & ~WA_TRAPAFTER) == 0)
1866 1976 size = 0;
1867 1977 if (vaddr + size < vaddr ||
1868 1978 (wflags & ~(WA_READ|WA_WRITE|WA_EXEC|WA_TRAPAFTER)) != 0 ||
1869 1979 ((wflags & ~WA_TRAPAFTER) != 0 && size == 0))
1870 1980 return (EINVAL);
1871 1981
1872 1982 /*
1873 1983 * Don't let the address range go above as->a_userlimit.
1874 1984 * There is no error here, just a limitation.
1875 1985 */
1876 1986 if (vaddr >= (uintptr_t)as->a_userlimit)
1877 1987 return (0);
1878 1988 if (vaddr + size > (uintptr_t)as->a_userlimit)
1879 1989 size = (uintptr_t)as->a_userlimit - vaddr;
1880 1990
1881 1991 /*
1882 1992 * Compute maximum number of pages this will add.
1883 1993 */
1884 1994 if ((wflags & ~WA_TRAPAFTER) != 0) {
1885 1995 ulong_t pagespan = (vaddr + size) - (vaddr & PAGEMASK);
1886 1996 newpage = btopr(pagespan);
1887 1997 if (newpage > 2 * prnwatch)
1888 1998 return (E2BIG);
1889 1999 }
1890 2000
1891 2001 /*
1892 2002 * Force the process to be fully stopped.
1893 2003 */
1894 2004 if (p == curproc) {
1895 2005 prunlock(pnp);
1896 2006 while (holdwatch() != 0)
1897 2007 continue;
1898 2008 if ((error = prlock(pnp, ZNO)) != 0) {
1899 2009 continuelwps(p);
1900 2010 *unlocked = 1;
1901 2011 return (error);
1902 2012 }
1903 2013 } else {
1904 2014 pauselwps(p);
1905 2015 while (pr_allstopped(p, 0) > 0) {
1906 2016 /*
1907 2017 * This cv/mutex pair is persistent even
1908 2018 * if the process disappears after we
1909 2019 * unmark it and drop p->p_lock.
1910 2020 */
1911 2021 kcondvar_t *cv = &pr_pid_cv[p->p_slot];
1912 2022 kmutex_t *mp = &p->p_lock;
1913 2023
1914 2024 prunmark(p);
1915 2025 (void) cv_wait(cv, mp);
1916 2026 mutex_exit(mp);
1917 2027 if ((error = prlock(pnp, ZNO)) != 0) {
1918 2028 /*
1919 2029 * Unpause the process if it exists.
1920 2030 */
1921 2031 p = pr_p_lock(pnp);
1922 2032 mutex_exit(&pr_pidlock);
1923 2033 if (p != NULL) {
1924 2034 unpauselwps(p);
1925 2035 prunlock(pnp);
1926 2036 }
1927 2037 *unlocked = 1;
1928 2038 return (error);
1929 2039 }
1930 2040 }
1931 2041 }
1932 2042
1933 2043 /*
1934 2044 * Drop p->p_lock in order to perform the rest of this.
1935 2045 * The process is still locked with the P_PR_LOCK flag.
1936 2046 */
1937 2047 mutex_exit(&p->p_lock);
1938 2048
1939 2049 pwa = kmem_alloc(sizeof (struct watched_area), KM_SLEEP);
1940 2050 pwa->wa_vaddr = (caddr_t)vaddr;
1941 2051 pwa->wa_eaddr = (caddr_t)vaddr + size;
1942 2052 pwa->wa_flags = (ulong_t)wflags;
1943 2053
1944 2054 error = ((pwa->wa_flags & ~WA_TRAPAFTER) == 0)?
1945 2055 clear_watched_area(p, pwa) : set_watched_area(p, pwa);
1946 2056
1947 2057 if (p == curproc) {
1948 2058 setallwatch();
1949 2059 mutex_enter(&p->p_lock);
1950 2060 continuelwps(p);
1951 2061 } else {
1952 2062 mutex_enter(&p->p_lock);
1953 2063 unpauselwps(p);
1954 2064 }
1955 2065
1956 2066 return (error);
1957 2067 }
1958 2068
1959 2069 /* jobcontrol stopped, but with a /proc directed stop in effect */
1960 2070 #define JDSTOPPED(t) \
1961 2071 ((t)->t_state == TS_STOPPED && \
1962 2072 (t)->t_whystop == PR_JOBCONTROL && \
1963 2073 ((t)->t_proc_flag & TP_PRSTOP))
1964 2074
1965 2075 /*
1966 2076 * pr_agent() creates the agent lwp. If the process is exiting while
1967 2077 * we are creating an agent lwp, then exitlwps() waits until the
1968 2078 * agent has been created using prbarrier().
1969 2079 */
1970 2080 static int
1971 2081 pr_agent(prnode_t *pnp, prgregset_t prgregset, int *unlocked)
1972 2082 {
1973 2083 proc_t *p = pnp->pr_common->prc_proc;
1974 2084 prcommon_t *pcp;
1975 2085 kthread_t *t;
1976 2086 kthread_t *ct;
1977 2087 klwp_t *clwp;
1978 2088 k_sigset_t smask;
1979 2089 int cid;
1980 2090 void *bufp = NULL;
1981 2091 int error;
1982 2092
1983 2093 *unlocked = 0;
1984 2094
1985 2095 /*
1986 2096 * Cannot create the /proc agent lwp if :-
1987 2097 * - the process is not fully stopped or directed to stop.
1988 2098 * - there is an agent lwp already.
1989 2099 * - the process has been killed.
1990 2100 * - the process is exiting.
1991 2101 * - it's a vfork(2) parent.
1992 2102 */
1993 2103 t = prchoose(p); /* returns locked thread */
1994 2104 ASSERT(t != NULL);
1995 2105
1996 2106 if ((!ISTOPPED(t) && !VSTOPPED(t) && !SUSPENDED(t) && !JDSTOPPED(t)) ||
1997 2107 p->p_agenttp != NULL ||
1998 2108 (p->p_flag & (SKILLED | SEXITING | SVFWAIT))) {
1999 2109 thread_unlock(t);
2000 2110 return (EBUSY);
2001 2111 }
2002 2112
2003 2113 thread_unlock(t);
2004 2114 mutex_exit(&p->p_lock);
2005 2115
2006 2116 sigfillset(&smask);
2007 2117 sigdiffset(&smask, &cantmask);
2008 2118 clwp = lwp_create(lwp_rtt, NULL, 0, p, TS_STOPPED,
2009 2119 t->t_pri, &smask, NOCLASS, 0);
2010 2120 if (clwp == NULL) {
2011 2121 mutex_enter(&p->p_lock);
2012 2122 return (ENOMEM);
2013 2123 }
2014 2124 prsetprregs(clwp, prgregset, 1);
2015 2125
2016 2126 /*
2017 2127 * Because abandoning the agent inside the target process leads to
2018 2128 * a state that is essentially undebuggable, we record the psinfo of
2019 2129 * the process creating the agent and hang that off of the lwp.
2020 2130 */
2021 2131 clwp->lwp_spymaster = kmem_zalloc(sizeof (psinfo_t), KM_SLEEP);
2022 2132 mutex_enter(&curproc->p_lock);
2023 2133 prgetpsinfo(curproc, clwp->lwp_spymaster);
2024 2134 mutex_exit(&curproc->p_lock);
2025 2135
2026 2136 /*
2027 2137 * We overload pr_time in the spymaster to denote the time at which the
2028 2138 * agent was created.
2029 2139 */
2030 2140 gethrestime(&clwp->lwp_spymaster->pr_time);
2031 2141
2032 2142 retry:
2033 2143 cid = t->t_cid;
2034 2144 (void) CL_ALLOC(&bufp, cid, KM_SLEEP);
2035 2145 mutex_enter(&p->p_lock);
2036 2146 if (cid != t->t_cid) {
2037 2147 /*
2038 2148 * Someone just changed this thread's scheduling class,
2039 2149 * so try pre-allocating the buffer again. Hopefully we
2040 2150 * don't hit this often.
2041 2151 */
2042 2152 mutex_exit(&p->p_lock);
2043 2153 CL_FREE(cid, bufp);
2044 2154 goto retry;
2045 2155 }
2046 2156
2047 2157 clwp->lwp_ap = clwp->lwp_arg;
2048 2158 clwp->lwp_eosys = NORMALRETURN;
2049 2159 ct = lwptot(clwp);
2050 2160 ct->t_clfuncs = t->t_clfuncs;
2051 2161 CL_FORK(t, ct, bufp);
2052 2162 ct->t_cid = t->t_cid;
2053 2163 ct->t_proc_flag |= TP_PRSTOP;
2054 2164 /*
2055 2165 * Setting t_sysnum to zero causes post_syscall()
2056 2166 * to bypass all syscall checks and go directly to
2057 2167 * if (issig()) psig();
2058 2168 * so that the agent lwp will stop in issig_forreal()
2059 2169 * showing PR_REQUESTED.
2060 2170 */
2061 2171 ct->t_sysnum = 0;
2062 2172 ct->t_post_sys = 1;
2063 2173 ct->t_sig_check = 1;
2064 2174 p->p_agenttp = ct;
2065 2175 ct->t_proc_flag &= ~TP_HOLDLWP;
2066 2176
2067 2177 pcp = pnp->pr_pcommon;
2068 2178 mutex_enter(&pcp->prc_mutex);
2069 2179
2070 2180 lwp_create_done(ct);
2071 2181
2072 2182 /*
2073 2183 * Don't return until the agent is stopped on PR_REQUESTED.
2074 2184 */
2075 2185
2076 2186 for (;;) {
2077 2187 prunlock(pnp);
2078 2188 *unlocked = 1;
2079 2189
2080 2190 /*
2081 2191 * Wait for the agent to stop and notify us.
2082 2192 * If we've been interrupted, return that information.
2083 2193 */
2084 2194 error = pr_wait(pcp, NULL, 0);
2085 2195 if (error == EINTR) {
2086 2196 error = 0;
2087 2197 break;
2088 2198 }
2089 2199
2090 2200 /*
2091 2201 * Confirm that the agent LWP has stopped.
2092 2202 */
2093 2203
2094 2204 if ((error = prlock(pnp, ZNO)) != 0)
2095 2205 break;
2096 2206 *unlocked = 0;
2097 2207
2098 2208 /*
2099 2209 * Since we dropped the lock on the process, the agent
2100 2210 * may have disappeared or changed. Grab the current
2101 2211 * agent and check fail if it has disappeared.
2102 2212 */
2103 2213 if ((ct = p->p_agenttp) == NULL) {
2104 2214 error = ENOENT;
2105 2215 break;
2106 2216 }
2107 2217
2108 2218 mutex_enter(&pcp->prc_mutex);
2109 2219 thread_lock(ct);
2110 2220
2111 2221 if (ISTOPPED(ct)) {
2112 2222 thread_unlock(ct);
2113 2223 mutex_exit(&pcp->prc_mutex);
2114 2224 break;
2115 2225 }
2116 2226
2117 2227 thread_unlock(ct);
2118 2228 }
2119 2229
2120 2230 return (error ? error : -1);
2121 2231 }
2122 2232
2123 2233 static int
2124 2234 pr_rdwr(proc_t *p, enum uio_rw rw, priovec_t *pio)
2125 2235 {
2126 2236 caddr_t base = (caddr_t)pio->pio_base;
2127 2237 size_t cnt = pio->pio_len;
2128 2238 uintptr_t offset = (uintptr_t)pio->pio_offset;
2129 2239 struct uio auio;
2130 2240 struct iovec aiov;
2131 2241 int error = 0;
2132 2242
2133 2243 if ((p->p_flag & SSYS) || p->p_as == &kas)
2134 2244 error = EIO;
2135 2245 else if ((base + cnt) < base || (offset + cnt) < offset)
2136 2246 error = EINVAL;
2137 2247 else if (cnt != 0) {
2138 2248 aiov.iov_base = base;
2139 2249 aiov.iov_len = cnt;
2140 2250
2141 2251 auio.uio_loffset = offset;
2142 2252 auio.uio_iov = &aiov;
2143 2253 auio.uio_iovcnt = 1;
2144 2254 auio.uio_resid = cnt;
2145 2255 auio.uio_segflg = UIO_USERSPACE;
2146 2256 auio.uio_llimit = (longlong_t)MAXOFFSET_T;
2147 2257 auio.uio_fmode = FREAD|FWRITE;
2148 2258 auio.uio_extflg = UIO_COPY_DEFAULT;
2149 2259
2150 2260 mutex_exit(&p->p_lock);
2151 2261 error = prusrio(p, rw, &auio, 0);
2152 2262 mutex_enter(&p->p_lock);
2153 2263
2154 2264 /*
2155 2265 * We have no way to return the i/o count,
2156 2266 * like read() or write() would do, so we
2157 2267 * return an error if the i/o was truncated.
2158 2268 */
2159 2269 if (auio.uio_resid != 0 && error == 0)
2160 2270 error = EIO;
2161 2271 }
2162 2272
2163 2273 return (error);
2164 2274 }
2165 2275
2166 2276 static int
2167 2277 pr_scred(proc_t *p, prcred_t *prcred, cred_t *cr, boolean_t dogrps)
2168 2278 {
2169 2279 kthread_t *t;
2170 2280 cred_t *oldcred;
2171 2281 cred_t *newcred;
2172 2282 uid_t oldruid;
2173 2283 int error;
2174 2284 zone_t *zone = crgetzone(cr);
2175 2285
2176 2286 if (!VALID_UID(prcred->pr_euid, zone) ||
2177 2287 !VALID_UID(prcred->pr_ruid, zone) ||
2178 2288 !VALID_UID(prcred->pr_suid, zone) ||
2179 2289 !VALID_GID(prcred->pr_egid, zone) ||
2180 2290 !VALID_GID(prcred->pr_rgid, zone) ||
2181 2291 !VALID_GID(prcred->pr_sgid, zone))
2182 2292 return (EINVAL);
2183 2293
2184 2294 if (dogrps) {
2185 2295 int ngrp = prcred->pr_ngroups;
2186 2296 int i;
2187 2297
2188 2298 if (ngrp < 0 || ngrp > ngroups_max)
2189 2299 return (EINVAL);
2190 2300
2191 2301 for (i = 0; i < ngrp; i++) {
2192 2302 if (!VALID_GID(prcred->pr_groups[i], zone))
2193 2303 return (EINVAL);
2194 2304 }
2195 2305 }
2196 2306
2197 2307 error = secpolicy_allow_setid(cr, prcred->pr_euid, B_FALSE);
2198 2308
2199 2309 if (error == 0 && prcred->pr_ruid != prcred->pr_euid)
2200 2310 error = secpolicy_allow_setid(cr, prcred->pr_ruid, B_FALSE);
2201 2311
2202 2312 if (error == 0 && prcred->pr_suid != prcred->pr_euid &&
2203 2313 prcred->pr_suid != prcred->pr_ruid)
2204 2314 error = secpolicy_allow_setid(cr, prcred->pr_suid, B_FALSE);
2205 2315
2206 2316 if (error)
2207 2317 return (error);
2208 2318
2209 2319 mutex_exit(&p->p_lock);
2210 2320
2211 2321 /* hold old cred so it doesn't disappear while we dup it */
2212 2322 mutex_enter(&p->p_crlock);
2213 2323 crhold(oldcred = p->p_cred);
2214 2324 mutex_exit(&p->p_crlock);
2215 2325 newcred = crdup(oldcred);
2216 2326 oldruid = crgetruid(oldcred);
2217 2327 crfree(oldcred);
2218 2328
2219 2329 /* Error checking done above */
2220 2330 (void) crsetresuid(newcred, prcred->pr_ruid, prcred->pr_euid,
2221 2331 prcred->pr_suid);
2222 2332 (void) crsetresgid(newcred, prcred->pr_rgid, prcred->pr_egid,
2223 2333 prcred->pr_sgid);
2224 2334
2225 2335 if (dogrps) {
2226 2336 (void) crsetgroups(newcred, prcred->pr_ngroups,
2227 2337 prcred->pr_groups);
2228 2338
2229 2339 }
2230 2340
2231 2341 mutex_enter(&p->p_crlock);
2232 2342 oldcred = p->p_cred;
2233 2343 p->p_cred = newcred;
2234 2344 mutex_exit(&p->p_crlock);
2235 2345 crfree(oldcred);
2236 2346
2237 2347 /*
2238 2348 * Keep count of processes per uid consistent.
2239 2349 */
2240 2350 if (oldruid != prcred->pr_ruid) {
2241 2351 zoneid_t zoneid = crgetzoneid(newcred);
2242 2352
2243 2353 mutex_enter(&pidlock);
2244 2354 upcount_dec(oldruid, zoneid);
2245 2355 upcount_inc(prcred->pr_ruid, zoneid);
2246 2356 mutex_exit(&pidlock);
2247 2357 }
2248 2358
2249 2359 /*
2250 2360 * Broadcast the cred change to the threads.
2251 2361 */
2252 2362 mutex_enter(&p->p_lock);
2253 2363 t = p->p_tlist;
2254 2364 do {
2255 2365 t->t_pre_sys = 1; /* so syscall will get new cred */
2256 2366 } while ((t = t->t_forw) != p->p_tlist);
2257 2367
2258 2368 return (0);
2259 2369 }
2260 2370
2261 2371 /*
2262 2372 * Change process credentials to specified zone. Used to temporarily
2263 2373 * set a process to run in the global zone; only transitions between
2264 2374 * the process's actual zone and the global zone are allowed.
2265 2375 */
2266 2376 static int
2267 2377 pr_szoneid(proc_t *p, zoneid_t zoneid, cred_t *cr)
2268 2378 {
2269 2379 kthread_t *t;
2270 2380 cred_t *oldcred;
2271 2381 cred_t *newcred;
2272 2382 zone_t *zptr;
2273 2383 zoneid_t oldzoneid;
2274 2384
2275 2385 if (secpolicy_zone_config(cr) != 0)
2276 2386 return (EPERM);
2277 2387 if (zoneid != GLOBAL_ZONEID && zoneid != p->p_zone->zone_id)
2278 2388 return (EINVAL);
2279 2389 /*
2280 2390 * We cannot hold p_lock when we call zone_find_by_id since that can
2281 2391 * lead to a deadlock. zone_find_by_id() takes zonehash_lock.
2282 2392 * zone_enter() can hold the zonehash_lock and needs p_lock when it
2283 2393 * calls task_join.
2284 2394 */
2285 2395 mutex_exit(&p->p_lock);
2286 2396 if ((zptr = zone_find_by_id(zoneid)) == NULL) {
2287 2397 mutex_enter(&p->p_lock);
2288 2398 return (EINVAL);
2289 2399 }
2290 2400 mutex_enter(&p->p_crlock);
2291 2401 oldcred = p->p_cred;
2292 2402 crhold(oldcred);
2293 2403 mutex_exit(&p->p_crlock);
2294 2404 newcred = crdup(oldcred);
2295 2405 oldzoneid = crgetzoneid(oldcred);
2296 2406 crfree(oldcred);
2297 2407
2298 2408 crsetzone(newcred, zptr);
2299 2409 zone_rele(zptr);
2300 2410
2301 2411 mutex_enter(&p->p_crlock);
2302 2412 oldcred = p->p_cred;
2303 2413 p->p_cred = newcred;
2304 2414 mutex_exit(&p->p_crlock);
2305 2415 crfree(oldcred);
2306 2416
2307 2417 /*
2308 2418 * The target process is changing zones (according to its cred), so
2309 2419 * update the per-zone upcounts, which are based on process creds.
2310 2420 */
2311 2421 if (oldzoneid != zoneid) {
2312 2422 uid_t ruid = crgetruid(newcred);
2313 2423
2314 2424 mutex_enter(&pidlock);
2315 2425 upcount_dec(ruid, oldzoneid);
2316 2426 upcount_inc(ruid, zoneid);
2317 2427 mutex_exit(&pidlock);
2318 2428 }
2319 2429 /*
2320 2430 * Broadcast the cred change to the threads.
2321 2431 */
2322 2432 mutex_enter(&p->p_lock);
2323 2433 t = p->p_tlist;
2324 2434 do {
2325 2435 t->t_pre_sys = 1; /* so syscall will get new cred */
2326 2436 } while ((t = t->t_forw) != p->p_tlist);
2327 2437
2328 2438 return (0);
2329 2439 }
2330 2440
2331 2441 static int
2332 2442 pr_spriv(proc_t *p, prpriv_t *prpriv, cred_t *cr)
2333 2443 {
2334 2444 kthread_t *t;
2335 2445 int err;
2336 2446
2337 2447 ASSERT(MUTEX_HELD(&p->p_lock));
2338 2448
2339 2449 if ((err = priv_pr_spriv(p, prpriv, cr)) == 0) {
2340 2450 /*
2341 2451 * Broadcast the cred change to the threads.
2342 2452 */
2343 2453 t = p->p_tlist;
2344 2454 do {
2345 2455 t->t_pre_sys = 1; /* so syscall will get new cred */
2346 2456 } while ((t = t->t_forw) != p->p_tlist);
2347 2457 }
2348 2458
2349 2459 return (err);
2350 2460 }
2351 2461
2352 2462 /*
2353 2463 * Return -1 if the process is the parent of a vfork(1) whose child has yet to
2354 2464 * terminate or perform an exec(2).
2355 2465 *
2356 2466 * Returns 0 if the process is fully stopped except for the current thread (if
2357 2467 * we are operating on our own process), 1 otherwise.
2358 2468 *
2359 2469 * If the watchstop flag is set, then we ignore threads with TP_WATCHSTOP set.
2360 2470 * See holdwatch() for details.
2361 2471 */
2362 2472 int
2363 2473 pr_allstopped(proc_t *p, int watchstop)
2364 2474 {
2365 2475 kthread_t *t;
2366 2476 int rv = 0;
2367 2477
2368 2478 ASSERT(MUTEX_HELD(&p->p_lock));
2369 2479
2370 2480 if (p->p_flag & SVFWAIT) /* waiting for vfork'd child to exec */
2371 2481 return (-1);
2372 2482
2373 2483 if ((t = p->p_tlist) != NULL) {
2374 2484 do {
2375 2485 if (t == curthread || VSTOPPED(t) ||
2376 2486 (watchstop && (t->t_proc_flag & TP_WATCHSTOP)))
2377 2487 continue;
2378 2488 thread_lock(t);
2379 2489 switch (t->t_state) {
2380 2490 case TS_ZOMB:
2381 2491 case TS_STOPPED:
2382 2492 break;
2383 2493 case TS_SLEEP:
2384 2494 if (!(t->t_flag & T_WAKEABLE) ||
2385 2495 t->t_wchan0 == NULL)
2386 2496 rv = 1;
2387 2497 break;
2388 2498 default:
2389 2499 rv = 1;
2390 2500 break;
2391 2501 }
2392 2502 thread_unlock(t);
2393 2503 } while (rv == 0 && (t = t->t_forw) != p->p_tlist);
2394 2504 }
2395 2505
2396 2506 return (rv);
2397 2507 }
2398 2508
2399 2509 /*
2400 2510 * Cause all lwps in the process to pause (for watchpoint operations).
2401 2511 */
2402 2512 static void
2403 2513 pauselwps(proc_t *p)
2404 2514 {
2405 2515 kthread_t *t;
2406 2516
2407 2517 ASSERT(MUTEX_HELD(&p->p_lock));
2408 2518 ASSERT(p != curproc);
2409 2519
2410 2520 if ((t = p->p_tlist) != NULL) {
2411 2521 do {
2412 2522 thread_lock(t);
2413 2523 t->t_proc_flag |= TP_PAUSE;
2414 2524 aston(t);
2415 2525 if ((ISWAKEABLE(t) && (t->t_wchan0 == NULL)) ||
2416 2526 ISWAITING(t)) {
2417 2527 setrun_locked(t);
2418 2528 }
2419 2529 prpokethread(t);
2420 2530 thread_unlock(t);
2421 2531 } while ((t = t->t_forw) != p->p_tlist);
2422 2532 }
2423 2533 }
2424 2534
2425 2535 /*
2426 2536 * undo the effects of pauselwps()
2427 2537 */
2428 2538 static void
2429 2539 unpauselwps(proc_t *p)
2430 2540 {
2431 2541 kthread_t *t;
2432 2542
2433 2543 ASSERT(MUTEX_HELD(&p->p_lock));
2434 2544 ASSERT(p != curproc);
2435 2545
2436 2546 if ((t = p->p_tlist) != NULL) {
2437 2547 do {
2438 2548 thread_lock(t);
2439 2549 t->t_proc_flag &= ~TP_PAUSE;
2440 2550 if (t->t_state == TS_STOPPED) {
2441 2551 t->t_schedflag |= TS_UNPAUSE;
2442 2552 t->t_dtrace_stop = 0;
2443 2553 setrun_locked(t);
2444 2554 }
2445 2555 thread_unlock(t);
2446 2556 } while ((t = t->t_forw) != p->p_tlist);
2447 2557 }
2448 2558 }
2449 2559
2450 2560 /*
2451 2561 * Cancel all watched areas. Called from prclose().
2452 2562 */
2453 2563 proc_t *
2454 2564 pr_cancel_watch(prnode_t *pnp)
2455 2565 {
2456 2566 proc_t *p = pnp->pr_pcommon->prc_proc;
2457 2567 struct as *as;
2458 2568 kthread_t *t;
2459 2569
2460 2570 ASSERT(MUTEX_HELD(&p->p_lock) && (p->p_proc_flag & P_PR_LOCK));
2461 2571
2462 2572 if (!pr_watch_active(p))
2463 2573 return (p);
2464 2574
2465 2575 /*
2466 2576 * Pause the process before dealing with the watchpoints.
2467 2577 */
2468 2578 if (p == curproc) {
2469 2579 prunlock(pnp);
2470 2580 while (holdwatch() != 0)
2471 2581 continue;
2472 2582 p = pr_p_lock(pnp);
2473 2583 mutex_exit(&pr_pidlock);
2474 2584 ASSERT(p == curproc);
2475 2585 } else {
2476 2586 pauselwps(p);
2477 2587 while (p != NULL && pr_allstopped(p, 0) > 0) {
2478 2588 /*
2479 2589 * This cv/mutex pair is persistent even
2480 2590 * if the process disappears after we
2481 2591 * unmark it and drop p->p_lock.
2482 2592 */
2483 2593 kcondvar_t *cv = &pr_pid_cv[p->p_slot];
2484 2594 kmutex_t *mp = &p->p_lock;
2485 2595
2486 2596 prunmark(p);
2487 2597 (void) cv_wait(cv, mp);
2488 2598 mutex_exit(mp);
2489 2599 p = pr_p_lock(pnp); /* NULL if process disappeared */
2490 2600 mutex_exit(&pr_pidlock);
2491 2601 }
2492 2602 }
2493 2603
2494 2604 if (p == NULL) /* the process disappeared */
2495 2605 return (NULL);
2496 2606
2497 2607 ASSERT(p == pnp->pr_pcommon->prc_proc);
2498 2608 ASSERT(MUTEX_HELD(&p->p_lock) && (p->p_proc_flag & P_PR_LOCK));
2499 2609
2500 2610 if (pr_watch_active(p)) {
2501 2611 pr_free_watchpoints(p);
2502 2612 if ((t = p->p_tlist) != NULL) {
2503 2613 do {
2504 2614 watch_disable(t);
2505 2615
2506 2616 } while ((t = t->t_forw) != p->p_tlist);
2507 2617 }
2508 2618 }
2509 2619
2510 2620 if ((as = p->p_as) != NULL) {
2511 2621 avl_tree_t *tree;
2512 2622 struct watched_page *pwp;
2513 2623
2514 2624 /*
2515 2625 * If this is the parent of a vfork, the watched page
2516 2626 * list has been moved temporarily to p->p_wpage.
2517 2627 */
2518 2628 if (avl_numnodes(&p->p_wpage) != 0)
2519 2629 tree = &p->p_wpage;
2520 2630 else
2521 2631 tree = &as->a_wpage;
2522 2632
2523 2633 mutex_exit(&p->p_lock);
2524 2634 AS_LOCK_ENTER(as, RW_WRITER);
2525 2635
2526 2636 for (pwp = avl_first(tree); pwp != NULL;
2527 2637 pwp = AVL_NEXT(tree, pwp)) {
2528 2638 pwp->wp_read = 0;
2529 2639 pwp->wp_write = 0;
2530 2640 pwp->wp_exec = 0;
2531 2641 if ((pwp->wp_flags & WP_SETPROT) == 0) {
2532 2642 pwp->wp_flags |= WP_SETPROT;
2533 2643 pwp->wp_prot = pwp->wp_oprot;
2534 2644 pwp->wp_list = p->p_wprot;
2535 2645 p->p_wprot = pwp;
2536 2646 }
2537 2647 }
2538 2648
2539 2649 AS_LOCK_EXIT(as);
2540 2650 mutex_enter(&p->p_lock);
2541 2651 }
2542 2652
2543 2653 /*
2544 2654 * Unpause the process now.
2545 2655 */
2546 2656 if (p == curproc)
2547 2657 continuelwps(p);
2548 2658 else
2549 2659 unpauselwps(p);
2550 2660
2551 2661 return (p);
2552 2662 }
|
↓ open down ↓ |
783 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX