Print this page
NEX-18463 Parallel dump produces corrupted dump file
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-17845 Remove support for BZIP2 from dump
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-17857 Parallel dump threshold (dump_ncpu_low) is not applied
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-17501 Enable parallel crash dump
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-17762 After a live dump (savecore -L) a subsequent panic will be saved in wrong directory
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Alexander Eremin <alexander.eremin@nexenta.com>
NEX-17182 Parallel dump hangs (fix typo)
Reviewed by: Jean McCormack <jean.mccormack@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-17182 Parallel dump hangs
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-9338 improve the layout of the crash directory (use sys/uuid.h)
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-9338 improve the layout of the crash directory
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Steve Peng <steve.peng@nexenta.com>
NEX-5164 backport illumos 6514 AS_* lock macros simplification
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/os/dumpsubr.c
+++ new/usr/src/uts/common/os/dumpsubr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2016 Joyent, Inc.
25 + * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
25 26 */
26 27
27 28 #include <sys/types.h>
28 29 #include <sys/param.h>
29 30 #include <sys/systm.h>
30 31 #include <sys/vm.h>
31 32 #include <sys/proc.h>
32 33 #include <sys/file.h>
33 34 #include <sys/conf.h>
34 35 #include <sys/kmem.h>
35 36 #include <sys/mem.h>
36 37 #include <sys/mman.h>
37 38 #include <sys/vnode.h>
38 39 #include <sys/errno.h>
39 40 #include <sys/memlist.h>
40 41 #include <sys/dumphdr.h>
41 42 #include <sys/dumpadm.h>
42 43 #include <sys/ksyms.h>
43 44 #include <sys/compress.h>
44 45 #include <sys/stream.h>
45 46 #include <sys/strsun.h>
46 47 #include <sys/cmn_err.h>
47 48 #include <sys/bitmap.h>
48 49 #include <sys/modctl.h>
49 50 #include <sys/utsname.h>
50 51 #include <sys/systeminfo.h>
51 52 #include <sys/vmem.h>
52 53 #include <sys/log.h>
53 54 #include <sys/var.h>
54 55 #include <sys/debug.h>
55 56 #include <sys/sunddi.h>
56 57 #include <fs/fs_subr.h>
57 58 #include <sys/fs/snode.h>
58 59 #include <sys/ontrap.h>
59 60 #include <sys/panic.h>
60 61 #include <sys/dkio.h>
61 62 #include <sys/vtoc.h>
62 63 #include <sys/errorq.h>
63 64 #include <sys/fm/util.h>
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
64 65 #include <sys/fs/zfs.h>
65 66
66 67 #include <vm/hat.h>
67 68 #include <vm/as.h>
68 69 #include <vm/page.h>
69 70 #include <vm/pvn.h>
70 71 #include <vm/seg.h>
71 72 #include <vm/seg_kmem.h>
72 73 #include <sys/clock_impl.h>
73 74 #include <sys/hold_page.h>
75 +#include <sys/cpu.h>
74 76
75 -#include <bzip2/bzlib.h>
77 +#include <sys/uuid.h>
76 78
77 79 /*
78 - * Crash dump time is dominated by disk write time. To reduce this,
79 - * the stronger compression method bzip2 is applied to reduce the dump
80 - * size and hence reduce I/O time. However, bzip2 is much more
81 - * computationally expensive than the existing lzjb algorithm, so to
82 - * avoid increasing compression time, CPUs that are otherwise idle
83 - * during panic are employed to parallelize the compression task.
84 - * Many helper CPUs are needed to prevent bzip2 from being a
85 - * bottleneck, and on systems with too few CPUs, the lzjb algorithm is
86 - * parallelized instead. Lastly, I/O and compression are performed by
87 - * different CPUs, and are hence overlapped in time, unlike the older
88 - * serial code.
89 - *
90 - * Another important consideration is the speed of the dump
91 - * device. Faster disks need less CPUs in order to benefit from
92 - * parallel lzjb versus parallel bzip2. Therefore, the CPU count
93 - * threshold for switching from parallel lzjb to paralled bzip2 is
94 - * elevated for faster disks. The dump device speed is adduced from
95 - * the setting for dumpbuf.iosize, see dump_update_clevel.
80 + * Parallel Dump:
81 + * CPUs that are otherwise idle during panic are employed to parallelize
82 + * the compression task. I/O and compression are performed by different
83 + * CPUs, and are hence overlapped in time, unlike the older serial code.
96 84 */
97 85
98 86 /*
99 87 * exported vars
100 88 */
101 89 kmutex_t dump_lock; /* lock for dump configuration */
102 90 dumphdr_t *dumphdr; /* dump header */
103 91 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */
104 92 vnode_t *dumpvp; /* dump device vnode pointer */
105 93 u_offset_t dumpvp_size; /* size of dump device, in bytes */
106 94 char *dumppath; /* pathname of dump device */
107 95 int dump_timeout = 120; /* timeout for dumping pages */
108 96 int dump_timeleft; /* portion of dump_timeout remaining */
109 97 int dump_ioerr; /* dump i/o error */
110 98 int dump_check_used; /* enable check for used pages */
111 -char *dump_stack_scratch; /* scratch area for saving stack summary */
99 +char *dump_stack_scratch; /* scratch area for saving stack summary */
112 100
113 101 /*
114 - * Tunables for dump compression and parallelism. These can be set via
115 - * /etc/system.
102 + * Tunables for dump compression and parallelism.
103 + * These can be set via /etc/system.
116 104 *
117 - * dump_ncpu_low number of helpers for parallel lzjb
118 - * This is also the minimum configuration.
105 + * dump_ncpu_low:
106 + * This is the minimum configuration for parallel lzjb.
107 + * A special value of 0 means that parallel dump will not be used.
119 108 *
120 - * dump_bzip2_level bzip2 compression level: 1-9
121 - * Higher numbers give greater compression, but take more memory
122 - * and time. Memory used per helper is ~(dump_bzip2_level * 1MB).
123 - *
124 - * dump_plat_mincpu the cross-over limit for using bzip2 (per platform):
125 - * if dump_plat_mincpu == 0, then always do single threaded dump
126 - * if ncpu >= dump_plat_mincpu then try to use bzip2
127 - *
128 - * dump_metrics_on if set, metrics are collected in the kernel, passed
129 - * to savecore via the dump file, and recorded by savecore in
130 - * METRICS.txt.
109 + * dump_metrics_on:
110 + * If set, metrics are collected in the kernel, passed to savecore
111 + * via the dump file, and recorded by savecore in METRICS.txt.
131 112 */
132 113 uint_t dump_ncpu_low = 4; /* minimum config for parallel lzjb */
133 -uint_t dump_bzip2_level = 1; /* bzip2 level (1-9) */
134 114
135 -/* Use dump_plat_mincpu_default unless this variable is set by /etc/system */
136 -#define MINCPU_NOT_SET ((uint_t)-1)
137 -uint_t dump_plat_mincpu = MINCPU_NOT_SET;
138 -
139 115 /* tunables for pre-reserved heap */
140 116 uint_t dump_kmem_permap = 1024;
141 117 uint_t dump_kmem_pages = 8;
142 118
143 119 /* Define multiple buffers per helper to avoid stalling */
144 120 #define NCBUF_PER_HELPER 2
145 121 #define NCMAP_PER_HELPER 4
146 122
147 123 /* minimum number of helpers configured */
148 -#define MINHELPERS (dump_ncpu_low)
124 +#define MINHELPERS (MAX(dump_ncpu_low, 1))
149 125 #define MINCBUFS (MINHELPERS * NCBUF_PER_HELPER)
150 126
151 127 /*
152 128 * Define constant parameters.
153 129 *
154 130 * CBUF_SIZE size of an output buffer
155 131 *
156 132 * CBUF_MAPSIZE size of virtual range for mapping pages
157 133 *
158 134 * CBUF_MAPNP size of virtual range in pages
159 135 *
160 136 */
161 137 #define DUMP_1KB ((size_t)1 << 10)
162 138 #define DUMP_1MB ((size_t)1 << 20)
163 139 #define CBUF_SIZE ((size_t)1 << 17)
164 140 #define CBUF_MAPSHIFT (22)
165 141 #define CBUF_MAPSIZE ((size_t)1 << CBUF_MAPSHIFT)
166 142 #define CBUF_MAPNP ((size_t)1 << (CBUF_MAPSHIFT - PAGESHIFT))
167 143
168 144 /*
169 145 * Compression metrics are accumulated nano-second subtotals. The
170 146 * results are normalized by the number of pages dumped. A report is
171 147 * generated when dumpsys() completes and is saved in the dump image
172 148 * after the trailing dump header.
173 149 *
174 150 * Metrics are always collected. Set the variable dump_metrics_on to
175 151 * cause metrics to be saved in the crash file, where savecore will
176 152 * save it in the file METRICS.txt.
177 153 */
178 154 #define PERPAGES \
179 155 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
180 156 PERPAGE(copy) PERPAGE(compress) \
181 157 PERPAGE(write) \
182 158 PERPAGE(inwait) PERPAGE(outwait)
183 159
184 160 typedef struct perpage {
185 161 #define PERPAGE(x) hrtime_t x;
186 162 PERPAGES
187 163 #undef PERPAGE
188 164 } perpage_t;
189 165
190 166 /*
191 167 * This macro controls the code generation for collecting dump
192 168 * performance information. By default, the code is generated, but
193 169 * automatic saving of the information is disabled. If dump_metrics_on
194 170 * is set to 1, the timing information is passed to savecore via the
195 171 * crash file, where it is appended to the file dump-dir/METRICS.txt.
196 172 */
197 173 #define COLLECT_METRICS
198 174
199 175 #ifdef COLLECT_METRICS
200 176 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */
201 177
202 178 #define HRSTART(v, m) v##ts.m = gethrtime()
203 179 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m
204 180 #define HRBEGIN(v, m, s) v##ts.m = gethrtime(); v.size += s
205 181 #define HREND(v, m) v.m += gethrtime() - v##ts.m
206 182 #define HRNORM(v, m, n) v.m /= (n)
207 183
208 184 #else
209 185 #define HRSTART(v, m)
210 186 #define HRSTOP(v, m)
211 187 #define HRBEGIN(v, m, s)
212 188 #define HREND(v, m)
213 189 #define HRNORM(v, m, n)
214 190 #endif /* COLLECT_METRICS */
215 191
216 192 /*
217 193 * Buffers for copying and compressing memory pages.
218 194 *
219 195 * cbuf_t buffer controllers: used for both input and output.
220 196 *
221 197 * The buffer state indicates how it is being used:
222 198 *
223 199 * CBUF_FREEMAP: CBUF_MAPSIZE virtual address range is available for
224 200 * mapping input pages.
225 201 *
226 202 * CBUF_INREADY: input pages are mapped and ready for compression by a
227 203 * helper.
228 204 *
229 205 * CBUF_USEDMAP: mapping has been consumed by a helper. Needs unmap.
230 206 *
231 207 * CBUF_FREEBUF: CBUF_SIZE output buffer, which is available.
232 208 *
233 209 * CBUF_WRITE: CBUF_SIZE block of compressed pages from a helper,
234 210 * ready to write out.
235 211 *
236 212 * CBUF_ERRMSG: CBUF_SIZE block of error messages from a helper
237 213 * (reports UE errors.)
238 214 */
239 215
240 216 typedef enum cbufstate {
241 217 CBUF_FREEMAP,
242 218 CBUF_INREADY,
243 219 CBUF_USEDMAP,
244 220 CBUF_FREEBUF,
245 221 CBUF_WRITE,
246 222 CBUF_ERRMSG
247 223 } cbufstate_t;
248 224
249 225 typedef struct cbuf cbuf_t;
250 226
251 227 struct cbuf {
252 228 cbuf_t *next; /* next in list */
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
253 229 cbufstate_t state; /* processing state */
254 230 size_t used; /* amount used */
255 231 size_t size; /* mem size */
256 232 char *buf; /* kmem or vmem */
257 233 pgcnt_t pagenum; /* index to pfn map */
258 234 pgcnt_t bitnum; /* first set bitnum */
259 235 pfn_t pfn; /* first pfn in mapped range */
260 236 int off; /* byte offset to first pfn */
261 237 };
262 238
263 -static char dump_osimage_uuid[36 + 1];
239 +static char dump_osimage_uuid[UUID_PRINTABLE_STRING_LENGTH];
264 240
265 241 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
266 242 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
267 243 ((ch) >= 'A' && (ch) <= 'F'))
268 244
269 245 /*
270 246 * cqueue_t queues: a uni-directional channel for communication
271 247 * from the master to helper tasks or vice-versa using put and
272 248 * get primitives. Both mappings and data buffers are passed via
273 249 * queues. Producers close a queue when done. The number of
274 250 * active producers is reference counted so the consumer can
275 251 * detect end of data. Concurrent access is mediated by atomic
276 252 * operations for panic dump, or mutex/cv for live dump.
277 253 *
278 254 * There a four queues, used as follows:
279 255 *
280 256 * Queue Dataflow NewState
281 257 * --------------------------------------------------
282 258 * mainq master -> master FREEMAP
283 259 * master has initialized or unmapped an input buffer
284 260 * --------------------------------------------------
285 261 * helperq master -> helper INREADY
286 262 * master has mapped input for use by helper
287 263 * --------------------------------------------------
288 264 * mainq master <- helper USEDMAP
289 265 * helper is done with input
290 266 * --------------------------------------------------
291 267 * freebufq master -> helper FREEBUF
292 268 * master has initialized or written an output buffer
293 269 * --------------------------------------------------
294 270 * mainq master <- helper WRITE
295 271 * block of compressed pages from a helper
296 272 * --------------------------------------------------
297 273 * mainq master <- helper ERRMSG
298 274 * error messages from a helper (memory error case)
299 275 * --------------------------------------------------
300 276 * writerq master <- master WRITE
301 277 * non-blocking queue of blocks to write
302 278 * --------------------------------------------------
303 279 */
304 280 typedef struct cqueue {
305 281 cbuf_t *volatile first; /* first in list */
306 282 cbuf_t *last; /* last in list */
307 283 hrtime_t ts; /* timestamp */
308 284 hrtime_t empty; /* total time empty */
309 285 kmutex_t mutex; /* live state lock */
310 286 kcondvar_t cv; /* live wait var */
311 287 lock_t spinlock; /* panic mode spin lock */
312 288 volatile uint_t open; /* producer ref count */
313 289 } cqueue_t;
314 290
315 291 /*
316 292 * Convenience macros for using the cqueue functions
317 293 * Note that the caller must have defined "dumpsync_t *ds"
318 294 */
319 295 #define CQ_IS_EMPTY(q) \
320 296 (ds->q.first == NULL)
321 297
322 298 #define CQ_OPEN(q) \
323 299 atomic_inc_uint(&ds->q.open)
324 300
325 301 #define CQ_CLOSE(q) \
326 302 dumpsys_close_cq(&ds->q, ds->live)
327 303
328 304 #define CQ_PUT(q, cp, st) \
329 305 dumpsys_put_cq(&ds->q, cp, st, ds->live)
330 306
331 307 #define CQ_GET(q) \
332 308 dumpsys_get_cq(&ds->q, ds->live)
333 309
334 310 /*
335 311 * Dynamic state when dumpsys() is running.
336 312 */
337 313 typedef struct dumpsync {
338 314 pgcnt_t npages; /* subtotal of pages dumped */
339 315 pgcnt_t pages_mapped; /* subtotal of pages mapped */
340 316 pgcnt_t pages_used; /* subtotal of pages used per map */
341 317 size_t nwrite; /* subtotal of bytes written */
342 318 uint_t live; /* running live dump */
343 319 uint_t neednl; /* will need to print a newline */
344 320 uint_t percent; /* dump progress */
345 321 uint_t percent_done; /* dump progress reported */
346 322 int sec_done; /* dump progress last report time */
347 323 cqueue_t freebufq; /* free kmem bufs for writing */
348 324 cqueue_t mainq; /* input for main task */
349 325 cqueue_t helperq; /* input for helpers */
350 326 cqueue_t writerq; /* input for writer */
351 327 hrtime_t start; /* start time */
352 328 hrtime_t elapsed; /* elapsed time when completed */
353 329 hrtime_t iotime; /* time spent writing nwrite bytes */
354 330 hrtime_t iowait; /* time spent waiting for output */
355 331 hrtime_t iowaitts; /* iowait timestamp */
356 332 perpage_t perpage; /* metrics */
357 333 perpage_t perpagets;
358 334 int dumpcpu; /* master cpu */
359 335 } dumpsync_t;
360 336
361 337 static dumpsync_t dumpsync; /* synchronization vars */
362 338
363 339 /*
364 340 * helper_t helpers: contains the context for a stream. CPUs run in
365 341 * parallel at dump time; each CPU creates a single stream of
366 342 * compression data. Stream data is divided into CBUF_SIZE blocks.
367 343 * The blocks are written in order within a stream. But, blocks from
368 344 * multiple streams can be interleaved. Each stream is identified by a
369 345 * unique tag.
370 346 */
371 347 typedef struct helper {
372 348 int helper; /* bound helper id */
|
↓ open down ↓ |
99 lines elided |
↑ open up ↑ |
373 349 int tag; /* compression stream tag */
374 350 perpage_t perpage; /* per page metrics */
375 351 perpage_t perpagets; /* per page metrics (timestamps) */
376 352 taskqid_t taskqid; /* live dump task ptr */
377 353 int in, out; /* buffer offsets */
378 354 cbuf_t *cpin, *cpout, *cperr; /* cbuf objects in process */
379 355 dumpsync_t *ds; /* pointer to sync vars */
380 356 size_t used; /* counts input consumed */
381 357 char *page; /* buffer for page copy */
382 358 char *lzbuf; /* lzjb output */
383 - bz_stream bzstream; /* bzip2 state */
384 359 } helper_t;
385 360
386 361 #define MAINHELPER (-1) /* helper is also the main task */
387 362 #define FREEHELPER (-2) /* unbound helper */
388 363 #define DONEHELPER (-3) /* helper finished */
389 364
390 365 /*
391 366 * configuration vars for dumpsys
392 367 */
393 368 typedef struct dumpcfg {
394 - int threshold; /* ncpu threshold for bzip2 */
395 369 int nhelper; /* number of helpers */
396 370 int nhelper_used; /* actual number of helpers used */
397 371 int ncmap; /* number VA pages for compression */
398 372 int ncbuf; /* number of bufs for compression */
399 373 int ncbuf_used; /* number of bufs in use */
400 374 uint_t clevel; /* dump compression level */
401 375 helper_t *helper; /* array of helpers */
402 376 cbuf_t *cmap; /* array of input (map) buffers */
403 377 cbuf_t *cbuf; /* array of output buffers */
404 378 ulong_t *helpermap; /* set of dumpsys helper CPU ids */
405 379 ulong_t *bitmap; /* bitmap for marking pages to dump */
406 380 ulong_t *rbitmap; /* bitmap for used CBUF_MAPSIZE ranges */
407 381 pgcnt_t bitmapsize; /* size of bitmap */
408 382 pgcnt_t rbitmapsize; /* size of bitmap for ranges */
409 383 pgcnt_t found4m; /* number ranges allocated by dump */
410 384 pgcnt_t foundsm; /* number small pages allocated by dump */
411 385 pid_t *pids; /* list of process IDs at dump time */
412 386 size_t maxsize; /* memory size needed at dump time */
413 387 size_t maxvmsize; /* size of reserved VM */
414 388 char *maxvm; /* reserved VM for spare pages */
415 389 lock_t helper_lock; /* protect helper state */
416 390 char helpers_wanted; /* flag to enable parallelism */
417 391 } dumpcfg_t;
418 392
419 393 static dumpcfg_t dumpcfg; /* config vars */
420 394
421 395 /*
422 396 * The dump I/O buffer.
423 397 *
424 398 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It is
425 399 * sized according to the optimum device transfer speed.
426 400 */
427 401 typedef struct dumpbuf {
428 402 vnode_t *cdev_vp; /* VCHR open of the dump device */
429 403 len_t vp_limit; /* maximum write offset */
430 404 offset_t vp_off; /* current dump device offset */
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
431 405 char *cur; /* dump write pointer */
432 406 char *start; /* dump buffer address */
433 407 char *end; /* dump buffer end */
434 408 size_t size; /* size of dumpbuf in bytes */
435 409 size_t iosize; /* best transfer size for device */
436 410 } dumpbuf_t;
437 411
438 412 dumpbuf_t dumpbuf; /* I/O buffer */
439 413
440 414 /*
415 + * DUMP_HELPER_MAX_WAIT
416 + * For parallel dump, defines maximum time main task thread will wait
417 + * for at least one helper to register in dumpcfg.helpermap, before
418 + * assuming there are no helpers and falling back to serial mode.
419 + */
420 +#define DUMP_HELPER_MAX_WAIT 1000 /* millisec */
421 +
422 +/*
441 423 * The dump I/O buffer must be at least one page, at most xfer_size
442 424 * bytes, and should scale with physmem in between. The transfer size
443 425 * passed in will either represent a global default (maxphys) or the
444 426 * best size for the device. The size of the dumpbuf I/O buffer is
445 427 * limited by dumpbuf_limit (8MB by default) because the dump
446 428 * performance saturates beyond a certain size. The default is to
447 429 * select 1/4096 of the memory.
448 430 */
449 431 static int dumpbuf_fraction = 12; /* memory size scale factor */
450 432 static size_t dumpbuf_limit = 8 * DUMP_1MB; /* max I/O buf size */
451 433
452 434 static size_t
453 435 dumpbuf_iosize(size_t xfer_size)
454 436 {
455 437 size_t iosize = ptob(physmem >> dumpbuf_fraction);
456 438
457 439 if (iosize < PAGESIZE)
458 440 iosize = PAGESIZE;
459 441 else if (iosize > xfer_size)
460 442 iosize = xfer_size;
461 443 if (iosize > dumpbuf_limit)
462 444 iosize = dumpbuf_limit;
463 445 return (iosize & PAGEMASK);
464 446 }
465 447
466 448 /*
467 449 * resize the I/O buffer
468 450 */
469 451 static void
470 452 dumpbuf_resize(void)
471 453 {
472 454 char *old_buf = dumpbuf.start;
473 455 size_t old_size = dumpbuf.size;
474 456 char *new_buf;
475 457 size_t new_size;
476 458
477 459 ASSERT(MUTEX_HELD(&dump_lock));
478 460
479 461 new_size = dumpbuf_iosize(MAX(dumpbuf.iosize, maxphys));
480 462 if (new_size <= old_size)
481 463 return; /* no need to reallocate buffer */
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
482 464
483 465 new_buf = kmem_alloc(new_size, KM_SLEEP);
484 466 dumpbuf.size = new_size;
485 467 dumpbuf.start = new_buf;
486 468 dumpbuf.end = new_buf + new_size;
487 469 kmem_free(old_buf, old_size);
488 470 }
489 471
490 472 /*
491 473 * dump_update_clevel is called when dumpadm configures the dump device.
492 - * Calculate number of helpers and buffers.
493 - * Allocate the minimum configuration for now.
474 + * Determine the compression level / type
475 + * - DUMP_CLEVEL_SERIAL is single threaded lzjb
476 + * - DUMP_CLEVEL_LZJB is parallel lzjb
477 + * Calculate number of helpers and buffers.
478 + * Allocate the minimum configuration for now.
494 479 *
495 480 * When the dump file is configured we reserve a minimum amount of
496 481 * memory for use at crash time. But we reserve VA for all the memory
497 482 * we really want in order to do the fastest dump possible. The VA is
498 483 * backed by pages not being dumped, according to the bitmap. If
499 484 * there is insufficient spare memory, however, we fall back to the
500 485 * minimum.
501 486 *
502 487 * Live dump (savecore -L) always uses the minimum config.
503 488 *
504 - * clevel 0 is single threaded lzjb
505 - * clevel 1 is parallel lzjb
506 - * clevel 2 is parallel bzip2
507 - *
508 - * The ncpu threshold is selected with dump_plat_mincpu.
509 - * On OPL, set_platform_defaults() overrides the sun4u setting.
510 - * The actual values are defined via DUMP_PLAT_*_MINCPU macros.
511 - *
512 - * Architecture Threshold Algorithm
513 - * sun4u < 51 parallel lzjb
514 - * sun4u >= 51 parallel bzip2(*)
515 - * sun4u OPL < 8 parallel lzjb
516 - * sun4u OPL >= 8 parallel bzip2(*)
517 - * sun4v < 128 parallel lzjb
518 - * sun4v >= 128 parallel bzip2(*)
519 - * x86 < 11 parallel lzjb
520 - * x86 >= 11 parallel bzip2(*)
521 - * 32-bit N/A single-threaded lzjb
522 - *
523 - * (*) bzip2 is only chosen if there is sufficient available
524 - * memory for buffers at dump time. See dumpsys_get_maxmem().
525 - *
526 - * Faster dump devices have larger I/O buffers. The threshold value is
527 - * increased according to the size of the dump I/O buffer, because
528 - * parallel lzjb performs better with faster disks. For buffers >= 1MB
529 - * the threshold is 3X; for buffers >= 256K threshold is 2X.
530 - *
531 489 * For parallel dumps, the number of helpers is ncpu-1. The CPU
532 490 * running panic runs the main task. For single-threaded dumps, the
533 491 * panic CPU does lzjb compression (it is tagged as MAINHELPER.)
534 492 *
535 493 * Need multiple buffers per helper so that they do not block waiting
536 494 * for the main task.
537 495 * parallel single-threaded
538 496 * Number of output buffers: nhelper*2 1
539 497 * Number of mapping buffers: nhelper*4 1
540 498 *
541 499 */
542 500 static void
543 501 dump_update_clevel()
544 502 {
545 503 int tag;
546 - size_t bz2size;
547 504 helper_t *hp, *hpend;
548 505 cbuf_t *cp, *cpend;
549 506 dumpcfg_t *old = &dumpcfg;
550 507 dumpcfg_t newcfg = *old;
551 508 dumpcfg_t *new = &newcfg;
552 509
553 510 ASSERT(MUTEX_HELD(&dump_lock));
554 511
555 512 /*
556 513 * Free the previously allocated bufs and VM.
557 514 */
558 515 if (old->helper != NULL) {
559 516
560 517 /* helpers */
561 518 hpend = &old->helper[old->nhelper];
562 519 for (hp = old->helper; hp != hpend; hp++) {
563 520 if (hp->lzbuf != NULL)
564 521 kmem_free(hp->lzbuf, PAGESIZE);
565 522 if (hp->page != NULL)
566 523 kmem_free(hp->page, PAGESIZE);
567 524 }
568 525 kmem_free(old->helper, old->nhelper * sizeof (helper_t));
569 526
570 527 /* VM space for mapping pages */
571 528 cpend = &old->cmap[old->ncmap];
572 529 for (cp = old->cmap; cp != cpend; cp++)
573 530 vmem_xfree(heap_arena, cp->buf, CBUF_MAPSIZE);
574 531 kmem_free(old->cmap, old->ncmap * sizeof (cbuf_t));
575 532
576 533 /* output bufs */
577 534 cpend = &old->cbuf[old->ncbuf];
578 535 for (cp = old->cbuf; cp != cpend; cp++)
579 536 if (cp->buf != NULL)
580 537 kmem_free(cp->buf, cp->size);
581 538 kmem_free(old->cbuf, old->ncbuf * sizeof (cbuf_t));
582 539
583 540 /* reserved VM for dumpsys_get_maxmem */
584 541 if (old->maxvmsize > 0)
585 542 vmem_xfree(heap_arena, old->maxvm, old->maxvmsize);
586 543 }
587 544
588 545 /*
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
589 546 * Allocate memory and VM.
590 547 * One CPU runs dumpsys, the rest are helpers.
591 548 */
592 549 new->nhelper = ncpus - 1;
593 550 if (new->nhelper < 1)
594 551 new->nhelper = 1;
595 552
596 553 if (new->nhelper > DUMP_MAX_NHELPER)
597 554 new->nhelper = DUMP_MAX_NHELPER;
598 555
599 - /* use platform default, unless /etc/system overrides */
600 - if (dump_plat_mincpu == MINCPU_NOT_SET)
601 - dump_plat_mincpu = dump_plat_mincpu_default;
602 -
603 - /* increase threshold for faster disks */
604 - new->threshold = dump_plat_mincpu;
605 - if (dumpbuf.iosize >= DUMP_1MB)
606 - new->threshold *= 3;
607 - else if (dumpbuf.iosize >= (256 * DUMP_1KB))
608 - new->threshold *= 2;
609 -
610 - /* figure compression level based upon the computed threshold. */
611 - if (dump_plat_mincpu == 0 || new->nhelper < 2) {
612 - new->clevel = 0;
556 + /* If dump_ncpu_low is 0 or greater than ncpus, do serial dump */
557 + if (dump_ncpu_low == 0 || dump_ncpu_low > ncpus || new->nhelper < 2) {
558 + new->clevel = DUMP_CLEVEL_SERIAL;
613 559 new->nhelper = 1;
614 - } else if ((new->nhelper + 1) >= new->threshold) {
615 - new->clevel = DUMP_CLEVEL_BZIP2;
616 - } else {
617 - new->clevel = DUMP_CLEVEL_LZJB;
618 - }
619 -
620 - if (new->clevel == 0) {
621 560 new->ncbuf = 1;
622 561 new->ncmap = 1;
623 562 } else {
563 + new->clevel = DUMP_CLEVEL_LZJB;
624 564 new->ncbuf = NCBUF_PER_HELPER * new->nhelper;
625 565 new->ncmap = NCMAP_PER_HELPER * new->nhelper;
626 566 }
627 567
628 568 /*
629 569 * Allocate new data structures and buffers for MINHELPERS,
630 570 * and also figure the max desired size.
631 571 */
632 - bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
633 572 new->maxsize = 0;
634 573 new->maxvmsize = 0;
635 574 new->maxvm = NULL;
636 575 tag = 1;
637 576 new->helper = kmem_zalloc(new->nhelper * sizeof (helper_t), KM_SLEEP);
638 577 hpend = &new->helper[new->nhelper];
639 578 for (hp = new->helper; hp != hpend; hp++) {
640 579 hp->tag = tag++;
641 580 if (hp < &new->helper[MINHELPERS]) {
642 581 hp->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
643 582 hp->page = kmem_alloc(PAGESIZE, KM_SLEEP);
644 - } else if (new->clevel < DUMP_CLEVEL_BZIP2) {
583 + } else {
645 584 new->maxsize += 2 * PAGESIZE;
646 - } else {
647 - new->maxsize += PAGESIZE;
648 585 }
649 - if (new->clevel >= DUMP_CLEVEL_BZIP2)
650 - new->maxsize += bz2size;
651 586 }
652 587
653 588 new->cbuf = kmem_zalloc(new->ncbuf * sizeof (cbuf_t), KM_SLEEP);
654 589 cpend = &new->cbuf[new->ncbuf];
655 590 for (cp = new->cbuf; cp != cpend; cp++) {
656 591 cp->state = CBUF_FREEBUF;
657 592 cp->size = CBUF_SIZE;
658 593 if (cp < &new->cbuf[MINCBUFS])
659 594 cp->buf = kmem_alloc(cp->size, KM_SLEEP);
660 595 else
661 596 new->maxsize += cp->size;
662 597 }
663 598
664 599 new->cmap = kmem_zalloc(new->ncmap * sizeof (cbuf_t), KM_SLEEP);
665 600 cpend = &new->cmap[new->ncmap];
666 601 for (cp = new->cmap; cp != cpend; cp++) {
667 602 cp->state = CBUF_FREEMAP;
668 603 cp->size = CBUF_MAPSIZE;
669 604 cp->buf = vmem_xalloc(heap_arena, CBUF_MAPSIZE, CBUF_MAPSIZE,
670 605 0, 0, NULL, NULL, VM_SLEEP);
671 606 }
672 607
673 608 /* reserve VA to be backed with spare pages at crash time */
674 609 if (new->maxsize > 0) {
675 610 new->maxsize = P2ROUNDUP(new->maxsize, PAGESIZE);
676 611 new->maxvmsize = P2ROUNDUP(new->maxsize, CBUF_MAPSIZE);
677 612 new->maxvm = vmem_xalloc(heap_arena, new->maxvmsize,
678 613 CBUF_MAPSIZE, 0, 0, NULL, NULL, VM_SLEEP);
679 614 }
680 615
681 616 /*
682 617 * Reserve memory for kmem allocation calls made during crash
683 618 * dump. The hat layer allocates memory for each mapping
684 619 * created, and the I/O path allocates buffers and data structs.
685 620 * Add a few pages for safety.
686 621 */
687 622 kmem_dump_init((new->ncmap * dump_kmem_permap) +
688 623 (dump_kmem_pages * PAGESIZE));
689 624
690 625 /* set new config pointers */
691 626 *old = *new;
692 627 }
693 628
694 629 /*
695 630 * Define a struct memlist walker to optimize bitnum to pfn
696 631 * lookup. The walker maintains the state of the list traversal.
697 632 */
698 633 typedef struct dumpmlw {
699 634 struct memlist *mp; /* current memlist */
700 635 pgcnt_t basenum; /* bitnum base offset */
701 636 pgcnt_t mppages; /* current memlist size */
702 637 pgcnt_t mpleft; /* size to end of current memlist */
703 638 pfn_t mpaddr; /* first pfn in memlist */
704 639 } dumpmlw_t;
705 640
706 641 /* initialize the walker */
707 642 static inline void
708 643 dump_init_memlist_walker(dumpmlw_t *pw)
709 644 {
710 645 pw->mp = phys_install;
711 646 pw->basenum = 0;
712 647 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
713 648 pw->mpleft = pw->mppages;
714 649 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
715 650 }
716 651
717 652 /*
718 653 * Lookup pfn given bitnum. The memlist can be quite long on some
719 654 * systems (e.g.: one per board). To optimize sequential lookups, the
720 655 * caller initializes and presents a memlist walker.
721 656 */
722 657 static pfn_t
723 658 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
724 659 {
725 660 bitnum -= pw->basenum;
726 661 while (pw->mp != NULL) {
727 662 if (bitnum < pw->mppages) {
728 663 pw->mpleft = pw->mppages - bitnum;
729 664 return (pw->mpaddr + bitnum);
730 665 }
731 666 bitnum -= pw->mppages;
732 667 pw->basenum += pw->mppages;
733 668 pw->mp = pw->mp->ml_next;
734 669 if (pw->mp != NULL) {
735 670 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
736 671 pw->mpleft = pw->mppages;
737 672 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
738 673 }
739 674 }
740 675 return (PFN_INVALID);
741 676 }
742 677
743 678 static pgcnt_t
744 679 dump_pfn_to_bitnum(pfn_t pfn)
745 680 {
746 681 struct memlist *mp;
747 682 pgcnt_t bitnum = 0;
748 683
749 684 for (mp = phys_install; mp != NULL; mp = mp->ml_next) {
750 685 if (pfn >= (mp->ml_address >> PAGESHIFT) &&
751 686 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT))
752 687 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT));
753 688 bitnum += mp->ml_size >> PAGESHIFT;
754 689 }
755 690 return ((pgcnt_t)-1);
756 691 }
757 692
758 693 /*
759 694 * Set/test bitmap for a CBUF_MAPSIZE range which includes pfn. The
760 695 * mapping of pfn to range index is imperfect because pfn and bitnum
761 696 * do not have the same phase. To make sure a CBUF_MAPSIZE range is
762 697 * covered, call this for both ends:
763 698 * dump_set_used(base)
764 699 * dump_set_used(base+CBUF_MAPNP-1)
765 700 *
766 701 * This is used during a panic dump to mark pages allocated by
767 702 * dumpsys_get_maxmem(). The macro IS_DUMP_PAGE(pp) is used by
768 703 * page_get_mnode_freelist() to make sure pages used by dump are never
769 704 * allocated.
770 705 */
771 706 #define CBUF_MAPP2R(pfn) ((pfn) >> (CBUF_MAPSHIFT - PAGESHIFT))
772 707
773 708 static void
774 709 dump_set_used(pfn_t pfn)
775 710 {
776 711
777 712 pgcnt_t bitnum, rbitnum;
778 713
779 714 bitnum = dump_pfn_to_bitnum(pfn);
780 715 ASSERT(bitnum != (pgcnt_t)-1);
781 716
782 717 rbitnum = CBUF_MAPP2R(bitnum);
783 718 ASSERT(rbitnum < dumpcfg.rbitmapsize);
784 719
785 720 BT_SET(dumpcfg.rbitmap, rbitnum);
786 721 }
787 722
788 723 int
789 724 dump_test_used(pfn_t pfn)
790 725 {
791 726 pgcnt_t bitnum, rbitnum;
792 727
|
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
793 728 bitnum = dump_pfn_to_bitnum(pfn);
794 729 ASSERT(bitnum != (pgcnt_t)-1);
795 730
796 731 rbitnum = CBUF_MAPP2R(bitnum);
797 732 ASSERT(rbitnum < dumpcfg.rbitmapsize);
798 733
799 734 return (BT_TEST(dumpcfg.rbitmap, rbitnum));
800 735 }
801 736
802 737 /*
803 - * dumpbzalloc and dumpbzfree are callbacks from the bzip2 library.
804 - * dumpsys_get_maxmem() uses them for BZ2_bzCompressInit().
805 - */
806 -static void *
807 -dumpbzalloc(void *opaque, int items, int size)
808 -{
809 - size_t *sz;
810 - char *ret;
811 -
812 - ASSERT(opaque != NULL);
813 - sz = opaque;
814 - ret = dumpcfg.maxvm + *sz;
815 - *sz += items * size;
816 - *sz = P2ROUNDUP(*sz, BZ2_BZALLOC_ALIGN);
817 - ASSERT(*sz <= dumpcfg.maxvmsize);
818 - return (ret);
819 -}
820 -
821 -/*ARGSUSED*/
822 -static void
823 -dumpbzfree(void *opaque, void *addr)
824 -{
825 -}
826 -
827 -/*
828 738 * Perform additional checks on the page to see if we can really use
829 739 * it. The kernel (kas) pages are always set in the bitmap. However,
830 740 * boot memory pages (prom_ppages or P_BOOTPAGES) are not in the
831 741 * bitmap. So we check for them.
832 742 */
833 743 static inline int
834 744 dump_pfn_check(pfn_t pfn)
835 745 {
836 746 page_t *pp = page_numtopp_nolock(pfn);
837 747 if (pp == NULL || pp->p_pagenum != pfn ||
838 748 #if defined(__sparc)
839 749 pp->p_vnode == &promvp ||
840 750 #else
841 751 PP_ISBOOTPAGES(pp) ||
842 752 #endif
843 753 pp->p_toxic != 0)
844 754 return (0);
845 755 return (1);
846 756 }
847 757
848 758 /*
849 759 * Check a range to see if all contained pages are available and
850 760 * return non-zero if the range can be used.
851 761 */
852 762 static inline int
853 763 dump_range_check(pgcnt_t start, pgcnt_t end, pfn_t pfn)
854 764 {
855 765 for (; start < end; start++, pfn++) {
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
856 766 if (BT_TEST(dumpcfg.bitmap, start))
857 767 return (0);
858 768 if (!dump_pfn_check(pfn))
859 769 return (0);
860 770 }
861 771 return (1);
862 772 }
863 773
864 774 /*
865 775 * dumpsys_get_maxmem() is called during panic. Find unused ranges
866 - * and use them for buffers. If we find enough memory switch to
867 - * parallel bzip2, otherwise use parallel lzjb.
868 - *
776 + * and use them for buffers.
869 777 * It searches the dump bitmap in 2 passes. The first time it looks
870 778 * for CBUF_MAPSIZE ranges. On the second pass it uses small pages.
871 779 */
872 780 static void
873 781 dumpsys_get_maxmem()
874 782 {
875 783 dumpcfg_t *cfg = &dumpcfg;
876 784 cbuf_t *endcp = &cfg->cbuf[cfg->ncbuf];
877 785 helper_t *endhp = &cfg->helper[cfg->nhelper];
878 786 pgcnt_t bitnum, end;
879 - size_t sz, endsz, bz2size;
787 + size_t sz, endsz;
880 788 pfn_t pfn, off;
881 789 cbuf_t *cp;
882 - helper_t *hp, *ohp;
790 + helper_t *hp;
883 791 dumpmlw_t mlw;
884 792 int k;
885 793
886 794 /*
887 - * Setting dump_plat_mincpu to 0 at any time forces a serial
888 - * dump.
795 + * Setting dump_ncpu_low to 0 forces a single threaded dump.
889 796 */
890 - if (dump_plat_mincpu == 0) {
891 - cfg->clevel = 0;
797 + if (dump_ncpu_low == 0) {
798 + cfg->clevel = DUMP_CLEVEL_SERIAL;
892 799 return;
893 800 }
894 801
895 802 /*
896 803 * There may be no point in looking for spare memory. If
897 804 * dumping all memory, then none is spare. If doing a serial
898 805 * dump, then already have buffers.
899 806 */
900 - if (cfg->maxsize == 0 || cfg->clevel < DUMP_CLEVEL_LZJB ||
807 + if (cfg->maxsize == 0 || cfg->clevel == DUMP_CLEVEL_SERIAL ||
901 808 (dump_conflags & DUMP_ALL) != 0) {
902 - if (cfg->clevel > DUMP_CLEVEL_LZJB)
903 - cfg->clevel = DUMP_CLEVEL_LZJB;
904 809 return;
905 810 }
906 811
907 812 sz = 0;
908 813 cfg->found4m = 0;
909 814 cfg->foundsm = 0;
910 815
911 816 /* bitmap of ranges used to estimate which pfns are being used */
912 817 bzero(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.rbitmapsize));
913 818
914 819 /* find ranges that are not being dumped to use for buffers */
915 820 dump_init_memlist_walker(&mlw);
916 821 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
917 822 dump_timeleft = dump_timeout;
918 823 end = bitnum + CBUF_MAPNP;
919 824 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
920 825 ASSERT(pfn != PFN_INVALID);
921 826
922 827 /* skip partial range at end of mem segment */
923 828 if (mlw.mpleft < CBUF_MAPNP) {
924 829 end = bitnum + mlw.mpleft;
925 830 continue;
926 831 }
927 832
928 833 /* skip non aligned pages */
929 834 off = P2PHASE(pfn, CBUF_MAPNP);
930 835 if (off != 0) {
931 836 end -= off;
932 837 continue;
933 838 }
934 839
935 840 if (!dump_range_check(bitnum, end, pfn))
936 841 continue;
937 842
938 843 ASSERT((sz + CBUF_MAPSIZE) <= cfg->maxvmsize);
939 844 hat_devload(kas.a_hat, cfg->maxvm + sz, CBUF_MAPSIZE, pfn,
940 845 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
941 846 sz += CBUF_MAPSIZE;
942 847 cfg->found4m++;
943 848
944 849 /* set the bitmap for both ends to be sure to cover the range */
945 850 dump_set_used(pfn);
946 851 dump_set_used(pfn + CBUF_MAPNP - 1);
947 852
948 853 if (sz >= cfg->maxsize)
949 854 goto foundmax;
950 855 }
951 856
952 857 /* Add small pages if we can't find enough large pages. */
953 858 dump_init_memlist_walker(&mlw);
954 859 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
955 860 dump_timeleft = dump_timeout;
956 861 end = bitnum + CBUF_MAPNP;
957 862 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
958 863 ASSERT(pfn != PFN_INVALID);
959 864
960 865 /* Find any non-aligned pages at start and end of segment. */
961 866 off = P2PHASE(pfn, CBUF_MAPNP);
962 867 if (mlw.mpleft < CBUF_MAPNP) {
963 868 end = bitnum + mlw.mpleft;
964 869 } else if (off != 0) {
965 870 end -= off;
966 871 } else if (cfg->found4m && dump_test_used(pfn)) {
967 872 continue;
968 873 }
969 874
970 875 for (; bitnum < end; bitnum++, pfn++) {
971 876 dump_timeleft = dump_timeout;
972 877 if (BT_TEST(dumpcfg.bitmap, bitnum))
973 878 continue;
974 879 if (!dump_pfn_check(pfn))
975 880 continue;
976 881 ASSERT((sz + PAGESIZE) <= cfg->maxvmsize);
|
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
977 882 hat_devload(kas.a_hat, cfg->maxvm + sz, PAGESIZE, pfn,
978 883 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
979 884 sz += PAGESIZE;
980 885 cfg->foundsm++;
981 886 dump_set_used(pfn);
982 887 if (sz >= cfg->maxsize)
983 888 goto foundmax;
984 889 }
985 890 }
986 891
987 - /* Fall back to lzjb if we did not get enough memory for bzip2. */
988 - endsz = (cfg->maxsize * cfg->threshold) / cfg->nhelper;
989 - if (sz < endsz) {
990 - cfg->clevel = DUMP_CLEVEL_LZJB;
991 - }
992 -
993 892 /* Allocate memory for as many helpers as we can. */
994 893 foundmax:
995 894
996 895 /* Byte offsets into memory found and mapped above */
997 896 endsz = sz;
998 897 sz = 0;
999 898
1000 - /* Set the size for bzip2 state. Only bzip2 needs it. */
1001 - bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
1002 -
1003 899 /* Skip the preallocate output buffers. */
1004 900 cp = &cfg->cbuf[MINCBUFS];
1005 901
1006 - /* Use this to move memory up from the preallocated helpers. */
1007 - ohp = cfg->helper;
1008 -
1009 902 /* Loop over all helpers and allocate memory. */
1010 903 for (hp = cfg->helper; hp < endhp; hp++) {
1011 904
1012 905 /* Skip preallocated helpers by checking hp->page. */
1013 906 if (hp->page == NULL) {
1014 - if (cfg->clevel <= DUMP_CLEVEL_LZJB) {
1015 - /* lzjb needs 2 1-page buffers */
1016 - if ((sz + (2 * PAGESIZE)) > endsz)
1017 - break;
1018 - hp->page = cfg->maxvm + sz;
1019 - sz += PAGESIZE;
1020 - hp->lzbuf = cfg->maxvm + sz;
1021 - sz += PAGESIZE;
1022 -
1023 - } else if (ohp->lzbuf != NULL) {
1024 - /* re-use the preallocted lzjb page for bzip2 */
1025 - hp->page = ohp->lzbuf;
1026 - ohp->lzbuf = NULL;
1027 - ++ohp;
1028 -
1029 - } else {
1030 - /* bzip2 needs a 1-page buffer */
1031 - if ((sz + PAGESIZE) > endsz)
1032 - break;
1033 - hp->page = cfg->maxvm + sz;
1034 - sz += PAGESIZE;
1035 - }
907 + /* lzjb needs 2 1-page buffers */
908 + if ((sz + (2 * PAGESIZE)) > endsz)
909 + break;
910 + hp->page = cfg->maxvm + sz;
911 + sz += PAGESIZE;
912 + hp->lzbuf = cfg->maxvm + sz;
913 + sz += PAGESIZE;
1036 914 }
1037 915
1038 916 /*
1039 917 * Add output buffers per helper. The number of
1040 918 * buffers per helper is determined by the ratio of
1041 919 * ncbuf to nhelper.
1042 920 */
1043 921 for (k = 0; cp < endcp && (sz + CBUF_SIZE) <= endsz &&
1044 922 k < NCBUF_PER_HELPER; k++) {
1045 923 cp->state = CBUF_FREEBUF;
1046 924 cp->size = CBUF_SIZE;
1047 925 cp->buf = cfg->maxvm + sz;
1048 926 sz += CBUF_SIZE;
1049 927 ++cp;
1050 928 }
1051 -
1052 - /*
1053 - * bzip2 needs compression state. Use the dumpbzalloc
1054 - * and dumpbzfree callbacks to allocate the memory.
1055 - * bzip2 does allocation only at init time.
1056 - */
1057 - if (cfg->clevel >= DUMP_CLEVEL_BZIP2) {
1058 - if ((sz + bz2size) > endsz) {
1059 - hp->page = NULL;
1060 - break;
1061 - } else {
1062 - hp->bzstream.opaque = &sz;
1063 - hp->bzstream.bzalloc = dumpbzalloc;
1064 - hp->bzstream.bzfree = dumpbzfree;
1065 - (void) BZ2_bzCompressInit(&hp->bzstream,
1066 - dump_bzip2_level, 0, 0);
1067 - hp->bzstream.opaque = NULL;
1068 - }
1069 - }
1070 929 }
1071 930
1072 931 /* Finish allocating output buffers */
1073 932 for (; cp < endcp && (sz + CBUF_SIZE) <= endsz; cp++) {
1074 933 cp->state = CBUF_FREEBUF;
1075 934 cp->size = CBUF_SIZE;
1076 935 cp->buf = cfg->maxvm + sz;
1077 936 sz += CBUF_SIZE;
1078 937 }
1079 938
1080 939 /* Enable IS_DUMP_PAGE macro, which checks for pages we took. */
1081 940 if (cfg->found4m || cfg->foundsm)
1082 941 dump_check_used = 1;
1083 942
1084 943 ASSERT(sz <= endsz);
1085 944 }
1086 945
1087 946 static void
1088 947 dumphdr_init(void)
1089 948 {
1090 949 pgcnt_t npages = 0;
1091 950
1092 951 ASSERT(MUTEX_HELD(&dump_lock));
1093 952
1094 953 if (dumphdr == NULL) {
1095 954 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
1096 955 dumphdr->dump_magic = DUMP_MAGIC;
1097 956 dumphdr->dump_version = DUMP_VERSION;
1098 957 dumphdr->dump_wordsize = DUMP_WORDSIZE;
1099 958 dumphdr->dump_pageshift = PAGESHIFT;
1100 959 dumphdr->dump_pagesize = PAGESIZE;
1101 960 dumphdr->dump_utsname = utsname;
1102 961 (void) strcpy(dumphdr->dump_platform, platform);
1103 962 dumpbuf.size = dumpbuf_iosize(maxphys);
1104 963 dumpbuf.start = kmem_alloc(dumpbuf.size, KM_SLEEP);
1105 964 dumpbuf.end = dumpbuf.start + dumpbuf.size;
1106 965 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
1107 966 dumpcfg.helpermap = kmem_zalloc(BT_SIZEOFMAP(NCPU), KM_SLEEP);
1108 967 LOCK_INIT_HELD(&dumpcfg.helper_lock);
1109 968 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP);
1110 969 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
1111 970 sizeof (dumphdr->dump_uuid));
1112 971 }
1113 972
1114 973 npages = num_phys_pages();
1115 974
1116 975 if (dumpcfg.bitmapsize != npages) {
1117 976 size_t rlen = CBUF_MAPP2R(P2ROUNDUP(npages, CBUF_MAPNP));
1118 977 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
1119 978 void *rmap = kmem_alloc(BT_SIZEOFMAP(rlen), KM_SLEEP);
1120 979
1121 980 if (dumpcfg.bitmap != NULL)
1122 981 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
1123 982 bitmapsize));
1124 983 if (dumpcfg.rbitmap != NULL)
1125 984 kmem_free(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.
1126 985 rbitmapsize));
1127 986 dumpcfg.bitmap = map;
1128 987 dumpcfg.bitmapsize = npages;
1129 988 dumpcfg.rbitmap = rmap;
1130 989 dumpcfg.rbitmapsize = rlen;
1131 990 }
1132 991 }
1133 992
1134 993 /*
1135 994 * Establish a new dump device.
1136 995 */
1137 996 int
1138 997 dumpinit(vnode_t *vp, char *name, int justchecking)
1139 998 {
1140 999 vnode_t *cvp;
1141 1000 vattr_t vattr;
1142 1001 vnode_t *cdev_vp;
1143 1002 int error = 0;
1144 1003
1145 1004 ASSERT(MUTEX_HELD(&dump_lock));
1146 1005
1147 1006 dumphdr_init();
1148 1007
1149 1008 cvp = common_specvp(vp);
1150 1009 if (cvp == dumpvp)
1151 1010 return (0);
1152 1011
1153 1012 /*
1154 1013 * Determine whether this is a plausible dump device. We want either:
1155 1014 * (1) a real device that's not mounted and has a cb_dump routine, or
1156 1015 * (2) a swapfile on some filesystem that has a vop_dump routine.
1157 1016 */
1158 1017 if ((error = VOP_OPEN(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
1159 1018 return (error);
1160 1019
1161 1020 vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV;
1162 1021 if ((error = VOP_GETATTR(cvp, &vattr, 0, kcred, NULL)) == 0) {
1163 1022 if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
1164 1023 if (devopsp[getmajor(vattr.va_rdev)]->
1165 1024 devo_cb_ops->cb_dump == nodev)
1166 1025 error = ENOTSUP;
1167 1026 else if (vfs_devismounted(vattr.va_rdev))
1168 1027 error = EBUSY;
1169 1028 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
1170 1029 ZFS_DRIVER) == 0 &&
1171 1030 IS_SWAPVP(common_specvp(cvp)))
1172 1031 error = EBUSY;
1173 1032 } else {
1174 1033 if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) ||
1175 1034 !IS_SWAPVP(cvp))
1176 1035 error = ENOTSUP;
1177 1036 }
1178 1037 }
1179 1038
1180 1039 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
1181 1040 error = ENOSPC;
1182 1041
1183 1042 if (error || justchecking) {
1184 1043 (void) VOP_CLOSE(cvp, FREAD | FWRITE, 1, (offset_t)0,
1185 1044 kcred, NULL);
1186 1045 return (error);
1187 1046 }
1188 1047
1189 1048 VN_HOLD(cvp);
1190 1049
1191 1050 if (dumpvp != NULL)
1192 1051 dumpfini(); /* unconfigure the old dump device */
1193 1052
1194 1053 dumpvp = cvp;
1195 1054 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1196 1055 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
1197 1056 (void) strcpy(dumppath, name);
1198 1057 dumpbuf.iosize = 0;
1199 1058
1200 1059 /*
1201 1060 * If the dump device is a block device, attempt to open up the
1202 1061 * corresponding character device and determine its maximum transfer
1203 1062 * size. We use this information to potentially resize dumpbuf to a
1204 1063 * larger and more optimal size for performing i/o to the dump device.
1205 1064 */
1206 1065 if (cvp->v_type == VBLK &&
1207 1066 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
1208 1067 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1209 1068 size_t blk_size;
1210 1069 struct dk_cinfo dki;
1211 1070 struct dk_minfo minf;
1212 1071
1213 1072 if (VOP_IOCTL(cdev_vp, DKIOCGMEDIAINFO,
1214 1073 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
1215 1074 == 0 && minf.dki_lbsize != 0)
1216 1075 blk_size = minf.dki_lbsize;
1217 1076 else
1218 1077 blk_size = DEV_BSIZE;
1219 1078
1220 1079 if (VOP_IOCTL(cdev_vp, DKIOCINFO, (intptr_t)&dki,
1221 1080 FKIOCTL, kcred, NULL, NULL) == 0) {
1222 1081 dumpbuf.iosize = dki.dki_maxtransfer * blk_size;
1223 1082 dumpbuf_resize();
1224 1083 }
1225 1084 /*
1226 1085 * If we are working with a zvol then dumpify it
1227 1086 * if it's not being used as swap.
1228 1087 */
1229 1088 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
1230 1089 if (IS_SWAPVP(common_specvp(cvp)))
1231 1090 error = EBUSY;
1232 1091 else if ((error = VOP_IOCTL(cdev_vp,
1233 1092 DKIOCDUMPINIT, NULL, FKIOCTL, kcred,
1234 1093 NULL, NULL)) != 0)
1235 1094 dumpfini();
1236 1095 }
1237 1096
1238 1097 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1239 1098 kcred, NULL);
1240 1099 }
1241 1100
1242 1101 VN_RELE(cdev_vp);
1243 1102 }
1244 1103
1245 1104 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
1246 1105
1247 1106 dump_update_clevel();
1248 1107
1249 1108 return (error);
1250 1109 }
1251 1110
1252 1111 void
1253 1112 dumpfini(void)
1254 1113 {
1255 1114 vattr_t vattr;
1256 1115 boolean_t is_zfs = B_FALSE;
1257 1116 vnode_t *cdev_vp;
1258 1117 ASSERT(MUTEX_HELD(&dump_lock));
1259 1118
1260 1119 kmem_free(dumppath, strlen(dumppath) + 1);
1261 1120
1262 1121 /*
1263 1122 * Determine if we are using zvols for our dump device
1264 1123 */
1265 1124 vattr.va_mask = AT_RDEV;
1266 1125 if (VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL) == 0) {
1267 1126 is_zfs = (getmajor(vattr.va_rdev) ==
1268 1127 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
1269 1128 }
1270 1129
1271 1130 /*
1272 1131 * If we have a zvol dump device then we call into zfs so
1273 1132 * that it may have a chance to cleanup.
1274 1133 */
1275 1134 if (is_zfs &&
1276 1135 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
1277 1136 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1278 1137 (void) VOP_IOCTL(cdev_vp, DKIOCDUMPFINI, NULL, FKIOCTL,
1279 1138 kcred, NULL, NULL);
1280 1139 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1281 1140 kcred, NULL);
1282 1141 }
1283 1142 VN_RELE(cdev_vp);
1284 1143 }
1285 1144
1286 1145 (void) VOP_CLOSE(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL);
1287 1146
1288 1147 VN_RELE(dumpvp);
1289 1148
1290 1149 dumpvp = NULL;
1291 1150 dumpvp_size = 0;
1292 1151 dumppath = NULL;
1293 1152 }
1294 1153
1295 1154 static offset_t
1296 1155 dumpvp_flush(void)
1297 1156 {
1298 1157 size_t size = P2ROUNDUP(dumpbuf.cur - dumpbuf.start, PAGESIZE);
1299 1158 hrtime_t iotime;
1300 1159 int err;
1301 1160
1302 1161 if (dumpbuf.vp_off + size > dumpbuf.vp_limit) {
1303 1162 dump_ioerr = ENOSPC;
1304 1163 dumpbuf.vp_off = dumpbuf.vp_limit;
1305 1164 } else if (size != 0) {
1306 1165 iotime = gethrtime();
1307 1166 dumpsync.iowait += iotime - dumpsync.iowaitts;
1308 1167 if (panicstr)
1309 1168 err = VOP_DUMP(dumpvp, dumpbuf.start,
1310 1169 lbtodb(dumpbuf.vp_off), btod(size), NULL);
1311 1170 else
1312 1171 err = vn_rdwr(UIO_WRITE, dumpbuf.cdev_vp != NULL ?
1313 1172 dumpbuf.cdev_vp : dumpvp, dumpbuf.start, size,
1314 1173 dumpbuf.vp_off, UIO_SYSSPACE, 0, dumpbuf.vp_limit,
1315 1174 kcred, 0);
1316 1175 if (err && dump_ioerr == 0)
1317 1176 dump_ioerr = err;
1318 1177 dumpsync.iowaitts = gethrtime();
1319 1178 dumpsync.iotime += dumpsync.iowaitts - iotime;
1320 1179 dumpsync.nwrite += size;
1321 1180 dumpbuf.vp_off += size;
1322 1181 }
1323 1182 dumpbuf.cur = dumpbuf.start;
1324 1183 dump_timeleft = dump_timeout;
1325 1184 return (dumpbuf.vp_off);
1326 1185 }
1327 1186
1328 1187 /* maximize write speed by keeping seek offset aligned with size */
1329 1188 void
1330 1189 dumpvp_write(const void *va, size_t size)
1331 1190 {
1332 1191 size_t len, off, sz;
1333 1192
1334 1193 while (size != 0) {
1335 1194 len = MIN(size, dumpbuf.end - dumpbuf.cur);
1336 1195 if (len == 0) {
1337 1196 off = P2PHASE(dumpbuf.vp_off, dumpbuf.size);
1338 1197 if (off == 0 || !ISP2(dumpbuf.size)) {
1339 1198 (void) dumpvp_flush();
1340 1199 } else {
1341 1200 sz = dumpbuf.size - off;
1342 1201 dumpbuf.cur = dumpbuf.start + sz;
1343 1202 (void) dumpvp_flush();
1344 1203 ovbcopy(dumpbuf.start + sz, dumpbuf.start, off);
1345 1204 dumpbuf.cur += off;
1346 1205 }
1347 1206 } else {
1348 1207 bcopy(va, dumpbuf.cur, len);
1349 1208 va = (char *)va + len;
1350 1209 dumpbuf.cur += len;
1351 1210 size -= len;
1352 1211 }
1353 1212 }
1354 1213 }
1355 1214
1356 1215 /*ARGSUSED*/
1357 1216 static void
1358 1217 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
1359 1218 {
1360 1219 dumpvp_write(src, size);
1361 1220 }
1362 1221
1363 1222 /*
1364 1223 * Mark 'pfn' in the bitmap and dump its translation table entry.
1365 1224 */
1366 1225 void
1367 1226 dump_addpage(struct as *as, void *va, pfn_t pfn)
1368 1227 {
1369 1228 mem_vtop_t mem_vtop;
1370 1229 pgcnt_t bitnum;
1371 1230
1372 1231 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1373 1232 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1374 1233 dumphdr->dump_npages++;
1375 1234 BT_SET(dumpcfg.bitmap, bitnum);
1376 1235 }
1377 1236 dumphdr->dump_nvtop++;
1378 1237 mem_vtop.m_as = as;
1379 1238 mem_vtop.m_va = va;
1380 1239 mem_vtop.m_pfn = pfn;
1381 1240 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
1382 1241 }
1383 1242 dump_timeleft = dump_timeout;
1384 1243 }
1385 1244
1386 1245 /*
1387 1246 * Mark 'pfn' in the bitmap
1388 1247 */
1389 1248 void
1390 1249 dump_page(pfn_t pfn)
1391 1250 {
1392 1251 pgcnt_t bitnum;
1393 1252
1394 1253 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1395 1254 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1396 1255 dumphdr->dump_npages++;
1397 1256 BT_SET(dumpcfg.bitmap, bitnum);
1398 1257 }
1399 1258 }
1400 1259 dump_timeleft = dump_timeout;
1401 1260 }
1402 1261
1403 1262 /*
1404 1263 * Dump the <as, va, pfn> information for a given address space.
1405 1264 * SEGOP_DUMP() will call dump_addpage() for each page in the segment.
1406 1265 */
1407 1266 static void
1408 1267 dump_as(struct as *as)
1409 1268 {
1410 1269 struct seg *seg;
1411 1270
1412 1271 AS_LOCK_ENTER(as, RW_READER);
1413 1272 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
1414 1273 if (seg->s_as != as)
1415 1274 break;
1416 1275 if (seg->s_ops == NULL)
1417 1276 continue;
1418 1277 SEGOP_DUMP(seg);
1419 1278 }
1420 1279 AS_LOCK_EXIT(as);
1421 1280
1422 1281 if (seg != NULL)
1423 1282 cmn_err(CE_WARN, "invalid segment %p in address space %p",
1424 1283 (void *)seg, (void *)as);
1425 1284 }
1426 1285
1427 1286 static int
1428 1287 dump_process(pid_t pid)
1429 1288 {
1430 1289 proc_t *p = sprlock(pid);
1431 1290
1432 1291 if (p == NULL)
1433 1292 return (-1);
1434 1293 if (p->p_as != &kas) {
1435 1294 mutex_exit(&p->p_lock);
1436 1295 dump_as(p->p_as);
1437 1296 mutex_enter(&p->p_lock);
1438 1297 }
1439 1298
1440 1299 sprunlock(p);
1441 1300
1442 1301 return (0);
1443 1302 }
1444 1303
1445 1304 /*
1446 1305 * The following functions (dump_summary(), dump_ereports(), and
1447 1306 * dump_messages()), write data to an uncompressed area within the
1448 1307 * crashdump. The layout of these is
1449 1308 *
1450 1309 * +------------------------------------------------------------+
1451 1310 * | compressed pages | summary | ereports | messages |
1452 1311 * +------------------------------------------------------------+
1453 1312 *
1454 1313 * With the advent of saving a compressed crash dump by default, we
1455 1314 * need to save a little more data to describe the failure mode in
1456 1315 * an uncompressed buffer available before savecore uncompresses
1457 1316 * the dump. Initially this is a copy of the stack trace. Additional
1458 1317 * summary information should be added here.
1459 1318 */
1460 1319
1461 1320 void
1462 1321 dump_summary(void)
1463 1322 {
1464 1323 u_offset_t dumpvp_start;
1465 1324 summary_dump_t sd;
1466 1325
1467 1326 if (dumpvp == NULL || dumphdr == NULL)
1468 1327 return;
1469 1328
1470 1329 dumpbuf.cur = dumpbuf.start;
1471 1330
1472 1331 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE +
1473 1332 DUMP_ERPTSIZE);
1474 1333 dumpvp_start = dumpbuf.vp_limit - DUMP_SUMMARYSIZE;
1475 1334 dumpbuf.vp_off = dumpvp_start;
1476 1335
1477 1336 sd.sd_magic = SUMMARY_MAGIC;
1478 1337 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE);
1479 1338 dumpvp_write(&sd, sizeof (sd));
1480 1339 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE);
1481 1340
1482 1341 sd.sd_magic = 0; /* indicate end of summary */
1483 1342 dumpvp_write(&sd, sizeof (sd));
1484 1343 (void) dumpvp_flush();
1485 1344 }
1486 1345
1487 1346 void
1488 1347 dump_ereports(void)
1489 1348 {
1490 1349 u_offset_t dumpvp_start;
1491 1350 erpt_dump_t ed;
1492 1351
1493 1352 if (dumpvp == NULL || dumphdr == NULL)
1494 1353 return;
1495 1354
1496 1355 dumpbuf.cur = dumpbuf.start;
1497 1356 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
1498 1357 dumpvp_start = dumpbuf.vp_limit - DUMP_ERPTSIZE;
1499 1358 dumpbuf.vp_off = dumpvp_start;
1500 1359
1501 1360 fm_ereport_dump();
1502 1361 if (panicstr)
1503 1362 errorq_dump();
1504 1363
1505 1364 bzero(&ed, sizeof (ed)); /* indicate end of ereports */
1506 1365 dumpvp_write(&ed, sizeof (ed));
1507 1366 (void) dumpvp_flush();
1508 1367
1509 1368 if (!panicstr) {
1510 1369 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1511 1370 (size_t)(dumpbuf.vp_off - dumpvp_start),
1512 1371 B_INVAL | B_FORCE, kcred, NULL);
1513 1372 }
1514 1373 }
1515 1374
1516 1375 void
1517 1376 dump_messages(void)
1518 1377 {
1519 1378 log_dump_t ld;
1520 1379 mblk_t *mctl, *mdata;
1521 1380 queue_t *q, *qlast;
1522 1381 u_offset_t dumpvp_start;
1523 1382
1524 1383 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
1525 1384 return;
1526 1385
1527 1386 dumpbuf.cur = dumpbuf.start;
1528 1387 dumpbuf.vp_limit = dumpvp_size - DUMP_OFFSET;
1529 1388 dumpvp_start = dumpbuf.vp_limit - DUMP_LOGSIZE;
1530 1389 dumpbuf.vp_off = dumpvp_start;
1531 1390
1532 1391 qlast = NULL;
1533 1392 do {
1534 1393 for (q = log_consq; q->q_next != qlast; q = q->q_next)
1535 1394 continue;
1536 1395 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
1537 1396 dump_timeleft = dump_timeout;
1538 1397 mdata = mctl->b_cont;
1539 1398 ld.ld_magic = LOG_MAGIC;
1540 1399 ld.ld_msgsize = MBLKL(mctl->b_cont);
1541 1400 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
1542 1401 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
1543 1402 dumpvp_write(&ld, sizeof (ld));
1544 1403 dumpvp_write(mctl->b_rptr, MBLKL(mctl));
1545 1404 dumpvp_write(mdata->b_rptr, MBLKL(mdata));
1546 1405 }
1547 1406 } while ((qlast = q) != log_consq);
1548 1407
1549 1408 ld.ld_magic = 0; /* indicate end of messages */
1550 1409 dumpvp_write(&ld, sizeof (ld));
1551 1410 (void) dumpvp_flush();
1552 1411 if (!panicstr) {
1553 1412 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1554 1413 (size_t)(dumpbuf.vp_off - dumpvp_start),
1555 1414 B_INVAL | B_FORCE, kcred, NULL);
1556 1415 }
1557 1416 }
1558 1417
1559 1418 /*
1560 1419 * The following functions are called on multiple CPUs during dump.
1561 1420 * They must not use most kernel services, because all cross-calls are
1562 1421 * disabled during panic. Therefore, blocking locks and cache flushes
1563 1422 * will not work.
1564 1423 */
1565 1424
1566 1425 /*
1567 1426 * Copy pages, trapping ECC errors. Also, for robustness, trap data
1568 1427 * access in case something goes wrong in the hat layer and the
1569 1428 * mapping is broken.
1570 1429 */
1571 1430 static int
1572 1431 dump_pagecopy(void *src, void *dst)
1573 1432 {
1574 1433 long *wsrc = (long *)src;
1575 1434 long *wdst = (long *)dst;
1576 1435 const ulong_t ncopies = PAGESIZE / sizeof (long);
1577 1436 volatile int w = 0;
1578 1437 volatile int ueoff = -1;
1579 1438 on_trap_data_t otd;
1580 1439
1581 1440 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) {
1582 1441 if (ueoff == -1)
1583 1442 ueoff = w * sizeof (long);
1584 1443 /* report "bad ECC" or "bad address" */
1585 1444 #ifdef _LP64
1586 1445 if (otd.ot_trap & OT_DATA_EC)
1587 1446 wdst[w++] = 0x00badecc00badecc;
1588 1447 else
1589 1448 wdst[w++] = 0x00badadd00badadd;
1590 1449 #else
1591 1450 if (otd.ot_trap & OT_DATA_EC)
1592 1451 wdst[w++] = 0x00badecc;
1593 1452 else
1594 1453 wdst[w++] = 0x00badadd;
1595 1454 #endif
1596 1455 }
1597 1456 while (w < ncopies) {
1598 1457 wdst[w] = wsrc[w];
1599 1458 w++;
1600 1459 }
1601 1460 no_trap();
1602 1461 return (ueoff);
1603 1462 }
1604 1463
1605 1464 static void
1606 1465 dumpsys_close_cq(cqueue_t *cq, int live)
1607 1466 {
1608 1467 if (live) {
1609 1468 mutex_enter(&cq->mutex);
1610 1469 atomic_dec_uint(&cq->open);
1611 1470 cv_signal(&cq->cv);
1612 1471 mutex_exit(&cq->mutex);
1613 1472 } else {
1614 1473 atomic_dec_uint(&cq->open);
1615 1474 }
1616 1475 }
1617 1476
1618 1477 static inline void
1619 1478 dumpsys_spinlock(lock_t *lp)
1620 1479 {
1621 1480 uint_t backoff = 0;
1622 1481 int loop_count = 0;
1623 1482
1624 1483 while (LOCK_HELD(lp) || !lock_spin_try(lp)) {
1625 1484 if (++loop_count >= ncpus) {
1626 1485 backoff = mutex_lock_backoff(0);
1627 1486 loop_count = 0;
1628 1487 } else {
1629 1488 backoff = mutex_lock_backoff(backoff);
1630 1489 }
1631 1490 mutex_lock_delay(backoff);
1632 1491 }
1633 1492 }
1634 1493
1635 1494 static inline void
1636 1495 dumpsys_spinunlock(lock_t *lp)
1637 1496 {
1638 1497 lock_clear(lp);
1639 1498 }
1640 1499
1641 1500 static inline void
1642 1501 dumpsys_lock(cqueue_t *cq, int live)
1643 1502 {
1644 1503 if (live)
1645 1504 mutex_enter(&cq->mutex);
1646 1505 else
1647 1506 dumpsys_spinlock(&cq->spinlock);
1648 1507 }
1649 1508
1650 1509 static inline void
1651 1510 dumpsys_unlock(cqueue_t *cq, int live, int signal)
1652 1511 {
1653 1512 if (live) {
1654 1513 if (signal)
1655 1514 cv_signal(&cq->cv);
1656 1515 mutex_exit(&cq->mutex);
1657 1516 } else {
1658 1517 dumpsys_spinunlock(&cq->spinlock);
1659 1518 }
1660 1519 }
1661 1520
1662 1521 static void
1663 1522 dumpsys_wait_cq(cqueue_t *cq, int live)
1664 1523 {
1665 1524 if (live) {
1666 1525 cv_wait(&cq->cv, &cq->mutex);
1667 1526 } else {
1668 1527 dumpsys_spinunlock(&cq->spinlock);
1669 1528 while (cq->open)
1670 1529 if (cq->first)
1671 1530 break;
1672 1531 dumpsys_spinlock(&cq->spinlock);
1673 1532 }
1674 1533 }
1675 1534
1676 1535 static void
1677 1536 dumpsys_put_cq(cqueue_t *cq, cbuf_t *cp, int newstate, int live)
1678 1537 {
1679 1538 if (cp == NULL)
1680 1539 return;
1681 1540
1682 1541 dumpsys_lock(cq, live);
1683 1542
1684 1543 if (cq->ts != 0) {
1685 1544 cq->empty += gethrtime() - cq->ts;
1686 1545 cq->ts = 0;
1687 1546 }
1688 1547
1689 1548 cp->state = newstate;
1690 1549 cp->next = NULL;
1691 1550 if (cq->last == NULL)
1692 1551 cq->first = cp;
1693 1552 else
1694 1553 cq->last->next = cp;
1695 1554 cq->last = cp;
1696 1555
1697 1556 dumpsys_unlock(cq, live, 1);
1698 1557 }
1699 1558
1700 1559 static cbuf_t *
1701 1560 dumpsys_get_cq(cqueue_t *cq, int live)
1702 1561 {
1703 1562 cbuf_t *cp;
1704 1563 hrtime_t now = gethrtime();
1705 1564
1706 1565 dumpsys_lock(cq, live);
1707 1566
1708 1567 /* CONSTCOND */
1709 1568 while (1) {
1710 1569 cp = (cbuf_t *)cq->first;
1711 1570 if (cp == NULL) {
1712 1571 if (cq->open == 0)
1713 1572 break;
1714 1573 dumpsys_wait_cq(cq, live);
1715 1574 continue;
1716 1575 }
1717 1576 cq->first = cp->next;
1718 1577 if (cq->first == NULL) {
1719 1578 cq->last = NULL;
1720 1579 cq->ts = now;
1721 1580 }
1722 1581 break;
1723 1582 }
1724 1583
1725 1584 dumpsys_unlock(cq, live, cq->first != NULL || cq->open == 0);
1726 1585 return (cp);
1727 1586 }
1728 1587
1729 1588 /*
1730 1589 * Send an error message to the console. If the main task is running
1731 1590 * just write the message via uprintf. If a helper is running the
1732 1591 * message has to be put on a queue for the main task. Setting fmt to
1733 1592 * NULL means flush the error message buffer. If fmt is not NULL, just
1734 1593 * add the text to the existing buffer.
1735 1594 */
1736 1595 static void
1737 1596 dumpsys_errmsg(helper_t *hp, const char *fmt, ...)
1738 1597 {
1739 1598 dumpsync_t *ds = hp->ds;
1740 1599 cbuf_t *cp = hp->cperr;
1741 1600 va_list adx;
1742 1601
1743 1602 if (hp->helper == MAINHELPER) {
1744 1603 if (fmt != NULL) {
1745 1604 if (ds->neednl) {
1746 1605 uprintf("\n");
1747 1606 ds->neednl = 0;
1748 1607 }
1749 1608 va_start(adx, fmt);
1750 1609 vuprintf(fmt, adx);
1751 1610 va_end(adx);
1752 1611 }
1753 1612 } else if (fmt == NULL) {
1754 1613 if (cp != NULL) {
1755 1614 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1756 1615 hp->cperr = NULL;
1757 1616 }
1758 1617 } else {
1759 1618 if (hp->cperr == NULL) {
1760 1619 cp = CQ_GET(freebufq);
1761 1620 hp->cperr = cp;
1762 1621 cp->used = 0;
1763 1622 }
1764 1623 va_start(adx, fmt);
1765 1624 cp->used += vsnprintf(cp->buf + cp->used, cp->size - cp->used,
1766 1625 fmt, adx);
1767 1626 va_end(adx);
1768 1627 if ((cp->used + LOG_MSGSIZE) > cp->size) {
1769 1628 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1770 1629 hp->cperr = NULL;
1771 1630 }
1772 1631 }
1773 1632 }
1774 1633
1775 1634 /*
1776 1635 * Write an output buffer to the dump file. If the main task is
1777 1636 * running just write the data. If a helper is running the output is
1778 1637 * placed on a queue for the main task.
1779 1638 */
1780 1639 static void
1781 1640 dumpsys_swrite(helper_t *hp, cbuf_t *cp, size_t used)
1782 1641 {
1783 1642 dumpsync_t *ds = hp->ds;
1784 1643
1785 1644 if (hp->helper == MAINHELPER) {
1786 1645 HRSTART(ds->perpage, write);
1787 1646 dumpvp_write(cp->buf, used);
1788 1647 HRSTOP(ds->perpage, write);
1789 1648 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
1790 1649 } else {
1791 1650 cp->used = used;
1792 1651 CQ_PUT(mainq, cp, CBUF_WRITE);
1793 1652 }
1794 1653 }
1795 1654
1796 1655 /*
1797 1656 * Copy one page within the mapped range. The offset starts at 0 and
1798 1657 * is relative to the first pfn. cp->buf + cp->off is the address of
1799 1658 * the first pfn. If dump_pagecopy returns a UE offset, create an
1800 1659 * error message. Returns the offset to the next pfn in the range
1801 1660 * selected by the bitmap.
1802 1661 */
1803 1662 static int
1804 1663 dumpsys_copy_page(helper_t *hp, int offset)
1805 1664 {
1806 1665 cbuf_t *cp = hp->cpin;
1807 1666 int ueoff;
1808 1667
1809 1668 ASSERT(cp->off + offset + PAGESIZE <= cp->size);
1810 1669 ASSERT(BT_TEST(dumpcfg.bitmap, cp->bitnum));
1811 1670
1812 1671 ueoff = dump_pagecopy(cp->buf + cp->off + offset, hp->page);
1813 1672
1814 1673 /* ueoff is the offset in the page to a UE error */
1815 1674 if (ueoff != -1) {
1816 1675 uint64_t pa = ptob(cp->pfn) + offset + ueoff;
1817 1676
1818 1677 dumpsys_errmsg(hp, "cpu %d: memory error at PA 0x%08x.%08x\n",
1819 1678 CPU->cpu_id, (uint32_t)(pa >> 32), (uint32_t)pa);
1820 1679 }
1821 1680
1822 1681 /*
1823 1682 * Advance bitnum and offset to the next input page for the
1824 1683 * next call to this function.
1825 1684 */
1826 1685 offset += PAGESIZE;
1827 1686 cp->bitnum++;
1828 1687 while (cp->off + offset < cp->size) {
1829 1688 if (BT_TEST(dumpcfg.bitmap, cp->bitnum))
1830 1689 break;
1831 1690 offset += PAGESIZE;
1832 1691 cp->bitnum++;
1833 1692 }
1834 1693
1835 1694 return (offset);
1836 1695 }
1837 1696
1838 1697 /*
1839 1698 * Read the helper queue, and copy one mapped page. Return 0 when
1840 1699 * done. Return 1 when a page has been copied into hp->page.
1841 1700 */
1842 1701 static int
1843 1702 dumpsys_sread(helper_t *hp)
1844 1703 {
1845 1704 dumpsync_t *ds = hp->ds;
1846 1705
1847 1706 /* CONSTCOND */
1848 1707 while (1) {
1849 1708
1850 1709 /* Find the next input buffer. */
1851 1710 if (hp->cpin == NULL) {
1852 1711 HRSTART(hp->perpage, inwait);
1853 1712
1854 1713 /* CONSTCOND */
1855 1714 while (1) {
1856 1715 hp->cpin = CQ_GET(helperq);
1857 1716 dump_timeleft = dump_timeout;
1858 1717
1859 1718 /*
1860 1719 * NULL return means the helper queue
1861 1720 * is closed and empty.
1862 1721 */
1863 1722 if (hp->cpin == NULL)
1864 1723 break;
1865 1724
1866 1725 /* Have input, check for dump I/O error. */
1867 1726 if (!dump_ioerr)
1868 1727 break;
1869 1728
1870 1729 /*
1871 1730 * If an I/O error occurs, stay in the
1872 1731 * loop in order to empty the helper
1873 1732 * queue. Return the buffers to the
1874 1733 * main task to unmap and free it.
1875 1734 */
1876 1735 hp->cpin->used = 0;
1877 1736 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1878 1737 }
1879 1738 HRSTOP(hp->perpage, inwait);
1880 1739
1881 1740 /* Stop here when the helper queue is closed. */
1882 1741 if (hp->cpin == NULL)
1883 1742 break;
1884 1743
1885 1744 /* Set the offset=0 to get the first pfn. */
1886 1745 hp->in = 0;
1887 1746
1888 1747 /* Set the total processed to 0 */
1889 1748 hp->used = 0;
1890 1749 }
1891 1750
1892 1751 /* Process the next page. */
1893 1752 if (hp->used < hp->cpin->used) {
1894 1753
1895 1754 /*
1896 1755 * Get the next page from the input buffer and
1897 1756 * return a copy.
1898 1757 */
1899 1758 ASSERT(hp->in != -1);
1900 1759 HRSTART(hp->perpage, copy);
1901 1760 hp->in = dumpsys_copy_page(hp, hp->in);
1902 1761 hp->used += PAGESIZE;
|
↓ open down ↓ |
823 lines elided |
↑ open up ↑ |
1903 1762 HRSTOP(hp->perpage, copy);
1904 1763 break;
1905 1764
1906 1765 } else {
1907 1766
1908 1767 /*
1909 1768 * Done with the input. Flush the VM and
1910 1769 * return the buffer to the main task.
1911 1770 */
1912 1771 if (panicstr && hp->helper != MAINHELPER)
1913 - hat_flush_range(kas.a_hat,
1914 - hp->cpin->buf, hp->cpin->size);
1772 + hat_flush();
1915 1773 dumpsys_errmsg(hp, NULL);
1916 1774 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1917 1775 hp->cpin = NULL;
1918 1776 }
1919 1777 }
1920 1778
1921 1779 return (hp->cpin != NULL);
1922 1780 }
1923 1781
1924 1782 /*
1925 - * Compress size bytes starting at buf with bzip2
1926 - * mode:
1927 - * BZ_RUN add one more compressed page
1928 - * BZ_FINISH no more input, flush the state
1929 - */
1930 -static void
1931 -dumpsys_bzrun(helper_t *hp, void *buf, size_t size, int mode)
1932 -{
1933 - dumpsync_t *ds = hp->ds;
1934 - const int CSIZE = sizeof (dumpcsize_t);
1935 - bz_stream *ps = &hp->bzstream;
1936 - int rc = 0;
1937 - uint32_t csize;
1938 - dumpcsize_t cs;
1939 -
1940 - /* Set input pointers to new input page */
1941 - if (size > 0) {
1942 - ps->avail_in = size;
1943 - ps->next_in = buf;
1944 - }
1945 -
1946 - /* CONSTCOND */
1947 - while (1) {
1948 -
1949 - /* Quit when all input has been consumed */
1950 - if (ps->avail_in == 0 && mode == BZ_RUN)
1951 - break;
1952 -
1953 - /* Get a new output buffer */
1954 - if (hp->cpout == NULL) {
1955 - HRSTART(hp->perpage, outwait);
1956 - hp->cpout = CQ_GET(freebufq);
1957 - HRSTOP(hp->perpage, outwait);
1958 - ps->avail_out = hp->cpout->size - CSIZE;
1959 - ps->next_out = hp->cpout->buf + CSIZE;
1960 - }
1961 -
1962 - /* Compress input, or finalize */
1963 - HRSTART(hp->perpage, compress);
1964 - rc = BZ2_bzCompress(ps, mode);
1965 - HRSTOP(hp->perpage, compress);
1966 -
1967 - /* Check for error */
1968 - if (mode == BZ_RUN && rc != BZ_RUN_OK) {
1969 - dumpsys_errmsg(hp, "%d: BZ_RUN error %s at page %lx\n",
1970 - hp->helper, BZ2_bzErrorString(rc),
1971 - hp->cpin->pagenum);
1972 - break;
1973 - }
1974 -
1975 - /* Write the buffer if it is full, or we are flushing */
1976 - if (ps->avail_out == 0 || mode == BZ_FINISH) {
1977 - csize = hp->cpout->size - CSIZE - ps->avail_out;
1978 - cs = DUMP_SET_TAG(csize, hp->tag);
1979 - if (csize > 0) {
1980 - (void) memcpy(hp->cpout->buf, &cs, CSIZE);
1981 - dumpsys_swrite(hp, hp->cpout, csize + CSIZE);
1982 - hp->cpout = NULL;
1983 - }
1984 - }
1985 -
1986 - /* Check for final complete */
1987 - if (mode == BZ_FINISH) {
1988 - if (rc == BZ_STREAM_END)
1989 - break;
1990 - if (rc != BZ_FINISH_OK) {
1991 - dumpsys_errmsg(hp, "%d: BZ_FINISH error %s\n",
1992 - hp->helper, BZ2_bzErrorString(rc));
1993 - break;
1994 - }
1995 - }
1996 - }
1997 -
1998 - /* Cleanup state and buffers */
1999 - if (mode == BZ_FINISH) {
2000 -
2001 - /* Reset state so that it is re-usable. */
2002 - (void) BZ2_bzCompressReset(&hp->bzstream);
2003 -
2004 - /* Give any unused outout buffer to the main task */
2005 - if (hp->cpout != NULL) {
2006 - hp->cpout->used = 0;
2007 - CQ_PUT(mainq, hp->cpout, CBUF_ERRMSG);
2008 - hp->cpout = NULL;
2009 - }
2010 - }
2011 -}
2012 -
2013 -static void
2014 -dumpsys_bz2compress(helper_t *hp)
2015 -{
2016 - dumpsync_t *ds = hp->ds;
2017 - dumpstreamhdr_t sh;
2018 -
2019 - (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2020 - sh.stream_pagenum = (pgcnt_t)-1;
2021 - sh.stream_npages = 0;
2022 - hp->cpin = NULL;
2023 - hp->cpout = NULL;
2024 - hp->cperr = NULL;
2025 - hp->in = 0;
2026 - hp->out = 0;
2027 - hp->bzstream.avail_in = 0;
2028 -
2029 - /* Bump reference to mainq while we are running */
2030 - CQ_OPEN(mainq);
2031 -
2032 - /* Get one page at a time */
2033 - while (dumpsys_sread(hp)) {
2034 - if (sh.stream_pagenum != hp->cpin->pagenum) {
2035 - sh.stream_pagenum = hp->cpin->pagenum;
2036 - sh.stream_npages = btop(hp->cpin->used);
2037 - dumpsys_bzrun(hp, &sh, sizeof (sh), BZ_RUN);
2038 - }
2039 - dumpsys_bzrun(hp, hp->page, PAGESIZE, 0);
2040 - }
2041 -
2042 - /* Done with input, flush any partial buffer */
2043 - if (sh.stream_pagenum != (pgcnt_t)-1) {
2044 - dumpsys_bzrun(hp, NULL, 0, BZ_FINISH);
2045 - dumpsys_errmsg(hp, NULL);
2046 - }
2047 -
2048 - ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2049 -
2050 - /* Decrement main queue count, we are done */
2051 - CQ_CLOSE(mainq);
2052 -}
2053 -
2054 -/*
2055 1783 * Compress with lzjb
2056 1784 * write stream block if full or size==0
2057 1785 * if csize==0 write stream header, else write <csize, data>
2058 1786 * size==0 is a call to flush a buffer
2059 1787 * hp->cpout is the buffer we are flushing or filling
2060 1788 * hp->out is the next index to fill data
2061 1789 * osize is either csize+data, or the size of a stream header
2062 1790 */
2063 1791 static void
2064 1792 dumpsys_lzjbrun(helper_t *hp, size_t csize, void *buf, size_t size)
2065 1793 {
2066 1794 dumpsync_t *ds = hp->ds;
2067 1795 const int CSIZE = sizeof (dumpcsize_t);
2068 1796 dumpcsize_t cs;
2069 1797 size_t osize = csize > 0 ? CSIZE + size : size;
2070 1798
2071 1799 /* If flush, and there is no buffer, just return */
2072 1800 if (size == 0 && hp->cpout == NULL)
2073 1801 return;
2074 1802
2075 1803 /* If flush, or cpout is full, write it out */
2076 1804 if (size == 0 ||
2077 1805 hp->cpout != NULL && hp->out + osize > hp->cpout->size) {
2078 1806
2079 1807 /* Set tag+size word at the front of the stream block. */
2080 1808 cs = DUMP_SET_TAG(hp->out - CSIZE, hp->tag);
2081 1809 (void) memcpy(hp->cpout->buf, &cs, CSIZE);
2082 1810
2083 1811 /* Write block to dump file. */
2084 1812 dumpsys_swrite(hp, hp->cpout, hp->out);
2085 1813
2086 1814 /* Clear pointer to indicate we need a new buffer */
2087 1815 hp->cpout = NULL;
2088 1816
2089 1817 /* flushing, we are done */
2090 1818 if (size == 0)
2091 1819 return;
2092 1820 }
2093 1821
2094 1822 /* Get an output buffer if we dont have one. */
2095 1823 if (hp->cpout == NULL) {
2096 1824 HRSTART(hp->perpage, outwait);
2097 1825 hp->cpout = CQ_GET(freebufq);
2098 1826 HRSTOP(hp->perpage, outwait);
2099 1827 hp->out = CSIZE;
2100 1828 }
2101 1829
2102 1830 /* Store csize word. This is the size of compressed data. */
2103 1831 if (csize > 0) {
2104 1832 cs = DUMP_SET_TAG(csize, 0);
2105 1833 (void) memcpy(hp->cpout->buf + hp->out, &cs, CSIZE);
2106 1834 hp->out += CSIZE;
2107 1835 }
2108 1836
2109 1837 /* Store the data. */
2110 1838 (void) memcpy(hp->cpout->buf + hp->out, buf, size);
2111 1839 hp->out += size;
2112 1840 }
2113 1841
2114 1842 static void
2115 1843 dumpsys_lzjbcompress(helper_t *hp)
2116 1844 {
2117 1845 dumpsync_t *ds = hp->ds;
2118 1846 size_t csize;
2119 1847 dumpstreamhdr_t sh;
2120 1848
2121 1849 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2122 1850 sh.stream_pagenum = (pfn_t)-1;
2123 1851 sh.stream_npages = 0;
2124 1852 hp->cpin = NULL;
2125 1853 hp->cpout = NULL;
2126 1854 hp->cperr = NULL;
2127 1855 hp->in = 0;
2128 1856 hp->out = 0;
2129 1857
2130 1858 /* Bump reference to mainq while we are running */
2131 1859 CQ_OPEN(mainq);
2132 1860
2133 1861 /* Get one page at a time */
2134 1862 while (dumpsys_sread(hp)) {
2135 1863
2136 1864 /* Create a stream header for each new input map */
2137 1865 if (sh.stream_pagenum != hp->cpin->pagenum) {
2138 1866 sh.stream_pagenum = hp->cpin->pagenum;
2139 1867 sh.stream_npages = btop(hp->cpin->used);
2140 1868 dumpsys_lzjbrun(hp, 0, &sh, sizeof (sh));
2141 1869 }
2142 1870
2143 1871 /* Compress one page */
2144 1872 HRSTART(hp->perpage, compress);
2145 1873 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2146 1874 HRSTOP(hp->perpage, compress);
2147 1875
2148 1876 /* Add csize+data to output block */
2149 1877 ASSERT(csize > 0 && csize <= PAGESIZE);
2150 1878 dumpsys_lzjbrun(hp, csize, hp->lzbuf, csize);
2151 1879 }
2152 1880
2153 1881 /* Done with input, flush any partial buffer */
2154 1882 if (sh.stream_pagenum != (pfn_t)-1) {
2155 1883 dumpsys_lzjbrun(hp, 0, NULL, 0);
2156 1884 dumpsys_errmsg(hp, NULL);
2157 1885 }
2158 1886
2159 1887 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2160 1888
2161 1889 /* Decrement main queue count, we are done */
2162 1890 CQ_CLOSE(mainq);
2163 1891 }
2164 1892
2165 1893 /*
2166 1894 * Dump helper called from panic_idle() to compress pages. CPUs in
2167 1895 * this path must not call most kernel services.
2168 1896 *
2169 1897 * During panic, all but one of the CPUs is idle. These CPUs are used
2170 1898 * as helpers working in parallel to copy and compress memory
2171 1899 * pages. During a panic, however, these processors cannot call any
2172 1900 * kernel services. This is because mutexes become no-ops during
2173 1901 * panic, and, cross-call interrupts are inhibited. Therefore, during
2174 1902 * panic dump the helper CPUs communicate with the panic CPU using
2175 1903 * memory variables. All memory mapping and I/O is performed by the
2176 1904 * panic CPU.
2177 1905 *
2178 1906 * At dump configuration time, helper_lock is set and helpers_wanted
2179 1907 * is 0. dumpsys() decides whether to set helpers_wanted before
2180 1908 * clearing helper_lock.
2181 1909 *
2182 1910 * At panic time, idle CPUs spin-wait on helper_lock, then alternately
2183 1911 * take the lock and become a helper, or return.
2184 1912 */
2185 1913 void
|
↓ open down ↓ |
121 lines elided |
↑ open up ↑ |
2186 1914 dumpsys_helper()
2187 1915 {
2188 1916 dumpsys_spinlock(&dumpcfg.helper_lock);
2189 1917 if (dumpcfg.helpers_wanted) {
2190 1918 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2191 1919
2192 1920 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2193 1921 if (hp->helper == FREEHELPER) {
2194 1922 hp->helper = CPU->cpu_id;
2195 1923 BT_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2196 -
2197 1924 dumpsys_spinunlock(&dumpcfg.helper_lock);
2198 -
2199 - if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2200 - dumpsys_lzjbcompress(hp);
2201 - else
2202 - dumpsys_bz2compress(hp);
2203 -
1925 + dumpsys_lzjbcompress(hp);
2204 1926 hp->helper = DONEHELPER;
2205 1927 return;
2206 1928 }
2207 1929 }
2208 1930
2209 1931 /* No more helpers are needed. */
2210 1932 dumpcfg.helpers_wanted = 0;
2211 1933
2212 1934 }
2213 1935 dumpsys_spinunlock(&dumpcfg.helper_lock);
2214 1936 }
2215 1937
2216 1938 /*
2217 1939 * No-wait helper callable in spin loops.
2218 1940 *
2219 1941 * Do not wait for helper_lock. Just check helpers_wanted. The caller
2220 1942 * may decide to continue. This is the "c)ontinue, s)ync, r)eset? s"
2221 1943 * case.
2222 1944 */
2223 1945 void
2224 1946 dumpsys_helper_nw()
2225 1947 {
2226 1948 if (dumpcfg.helpers_wanted)
2227 1949 dumpsys_helper();
2228 1950 }
2229 1951
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2230 1952 /*
2231 1953 * Dump helper for live dumps.
2232 1954 * These run as a system task.
2233 1955 */
2234 1956 static void
2235 1957 dumpsys_live_helper(void *arg)
2236 1958 {
2237 1959 helper_t *hp = arg;
2238 1960
2239 1961 BT_ATOMIC_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2240 - if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2241 - dumpsys_lzjbcompress(hp);
2242 - else
2243 - dumpsys_bz2compress(hp);
1962 + dumpsys_lzjbcompress(hp);
2244 1963 }
2245 1964
2246 1965 /*
2247 1966 * Compress one page with lzjb (single threaded case)
2248 1967 */
2249 1968 static void
2250 1969 dumpsys_lzjb_page(helper_t *hp, cbuf_t *cp)
2251 1970 {
2252 1971 dumpsync_t *ds = hp->ds;
2253 1972 uint32_t csize;
2254 1973
2255 1974 hp->helper = MAINHELPER;
2256 1975 hp->in = 0;
2257 1976 hp->used = 0;
2258 1977 hp->cpin = cp;
2259 1978 while (hp->used < cp->used) {
2260 1979 HRSTART(hp->perpage, copy);
2261 1980 hp->in = dumpsys_copy_page(hp, hp->in);
2262 1981 hp->used += PAGESIZE;
2263 1982 HRSTOP(hp->perpage, copy);
2264 1983
2265 1984 HRSTART(hp->perpage, compress);
2266 1985 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2267 1986 HRSTOP(hp->perpage, compress);
2268 1987
2269 1988 HRSTART(hp->perpage, write);
2270 1989 dumpvp_write(&csize, sizeof (csize));
2271 1990 dumpvp_write(hp->lzbuf, csize);
2272 1991 HRSTOP(hp->perpage, write);
2273 1992 }
2274 1993 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
2275 1994 hp->cpin = NULL;
2276 1995 }
2277 1996
2278 1997 /*
2279 1998 * Main task to dump pages. This is called on the dump CPU.
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2280 1999 */
2281 2000 static void
2282 2001 dumpsys_main_task(void *arg)
2283 2002 {
2284 2003 dumpsync_t *ds = arg;
2285 2004 pgcnt_t pagenum = 0, bitnum = 0, hibitnum;
2286 2005 dumpmlw_t mlw;
2287 2006 cbuf_t *cp;
2288 2007 pgcnt_t baseoff, pfnoff;
2289 2008 pfn_t base, pfn;
2290 - int i, dumpserial;
2009 + int i;
2291 2010
2292 2011 /*
2293 2012 * Fall back to serial mode if there are no helpers.
2294 - * dump_plat_mincpu can be set to 0 at any time.
2013 + * dump_ncpu_low can be set to 0 at any time.
2295 2014 * dumpcfg.helpermap must contain at least one member.
2015 + *
2016 + * It is possible that the helpers haven't registered
2017 + * in helpermap yet; wait up to DUMP_HELPER_MAX_WAIT.
2296 2018 */
2297 - dumpserial = 1;
2019 + if (dump_ncpu_low != 0 && dumpcfg.clevel != DUMP_CLEVEL_SERIAL) {
2020 + boolean_t dumpserial = B_TRUE;
2021 + hrtime_t hrtmax = MSEC2NSEC(DUMP_HELPER_MAX_WAIT);
2022 + hrtime_t hrtstart = gethrtime();
2298 2023
2299 - if (dump_plat_mincpu != 0 && dumpcfg.clevel != 0) {
2300 - for (i = 0; i < BT_BITOUL(NCPU); ++i) {
2301 - if (dumpcfg.helpermap[i] != 0) {
2302 - dumpserial = 0;
2024 + for (;;) {
2025 + for (i = 0; i < BT_BITOUL(NCPU); ++i) {
2026 + if (dumpcfg.helpermap[i] != 0) {
2027 + dumpserial = B_FALSE;
2028 + break;
2029 + }
2030 + }
2031 +
2032 + if ((!dumpserial) ||
2033 + ((gethrtime() - hrtstart) >= hrtmax)) {
2303 2034 break;
2304 2035 }
2036 +
2037 + ht_pause();
2305 2038 }
2306 - }
2307 2039
2308 - if (dumpserial) {
2309 - dumpcfg.clevel = 0;
2310 - if (dumpcfg.helper[0].lzbuf == NULL)
2311 - dumpcfg.helper[0].lzbuf = dumpcfg.helper[1].page;
2040 + if (dumpserial) {
2041 + dumpcfg.clevel = DUMP_CLEVEL_SERIAL;
2042 + if (dumpcfg.helper[0].lzbuf == NULL) {
2043 + dumpcfg.helper[0].lzbuf =
2044 + dumpcfg.helper[1].page;
2045 + }
2046 + }
2312 2047 }
2313 2048
2314 2049 dump_init_memlist_walker(&mlw);
2315 2050
2316 2051 for (;;) {
2317 2052 int sec = (gethrtime() - ds->start) / NANOSEC;
2318 2053
2319 2054 /*
2320 2055 * Render a simple progress display on the system console to
2321 2056 * make clear to the operator that the system has not hung.
2322 2057 * Emit an update when dump progress has advanced by one
2323 2058 * percent, or when no update has been drawn in the last
2324 2059 * second.
2325 2060 */
2326 2061 if (ds->percent > ds->percent_done || sec > ds->sec_done) {
2327 2062 ds->sec_done = sec;
2328 2063 ds->percent_done = ds->percent;
2329 2064 uprintf("^\rdumping: %2d:%02d %3d%% done",
2330 2065 sec / 60, sec % 60, ds->percent);
2331 2066 ds->neednl = 1;
2332 2067 }
2333 2068
2334 2069 while (CQ_IS_EMPTY(mainq) && !CQ_IS_EMPTY(writerq)) {
2335 2070
2336 2071 /* the writerq never blocks */
2337 2072 cp = CQ_GET(writerq);
2338 2073 if (cp == NULL)
2339 2074 break;
2340 2075
2341 2076 dump_timeleft = dump_timeout;
2342 2077
2343 2078 HRSTART(ds->perpage, write);
2344 2079 dumpvp_write(cp->buf, cp->used);
2345 2080 HRSTOP(ds->perpage, write);
2346 2081
2347 2082 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2348 2083 }
2349 2084
2350 2085 /*
2351 2086 * Wait here for some buffers to process. Returns NULL
2352 2087 * when all helpers have terminated and all buffers
2353 2088 * have been processed.
2354 2089 */
2355 2090 cp = CQ_GET(mainq);
2356 2091
2357 2092 if (cp == NULL) {
2358 2093
2359 2094 /* Drain the write queue. */
2360 2095 if (!CQ_IS_EMPTY(writerq))
2361 2096 continue;
2362 2097
2363 2098 /* Main task exits here. */
2364 2099 break;
2365 2100 }
2366 2101
2367 2102 dump_timeleft = dump_timeout;
2368 2103
2369 2104 switch (cp->state) {
2370 2105
2371 2106 case CBUF_FREEMAP:
2372 2107
2373 2108 /*
2374 2109 * Note that we drop CBUF_FREEMAP buffers on
2375 2110 * the floor (they will not be on any cqueue)
2376 2111 * when we no longer need them.
2377 2112 */
2378 2113 if (bitnum >= dumpcfg.bitmapsize)
2379 2114 break;
2380 2115
2381 2116 if (dump_ioerr) {
2382 2117 bitnum = dumpcfg.bitmapsize;
2383 2118 CQ_CLOSE(helperq);
2384 2119 break;
2385 2120 }
2386 2121
2387 2122 HRSTART(ds->perpage, bitmap);
2388 2123 for (; bitnum < dumpcfg.bitmapsize; bitnum++)
2389 2124 if (BT_TEST(dumpcfg.bitmap, bitnum))
2390 2125 break;
2391 2126 HRSTOP(ds->perpage, bitmap);
2392 2127 dump_timeleft = dump_timeout;
2393 2128
2394 2129 if (bitnum >= dumpcfg.bitmapsize) {
2395 2130 CQ_CLOSE(helperq);
2396 2131 break;
2397 2132 }
2398 2133
2399 2134 /*
2400 2135 * Try to map CBUF_MAPSIZE ranges. Can't
2401 2136 * assume that memory segment size is a
2402 2137 * multiple of CBUF_MAPSIZE. Can't assume that
2403 2138 * the segment starts on a CBUF_MAPSIZE
2404 2139 * boundary.
2405 2140 */
2406 2141 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2407 2142 ASSERT(pfn != PFN_INVALID);
2408 2143 ASSERT(bitnum + mlw.mpleft <= dumpcfg.bitmapsize);
2409 2144
2410 2145 base = P2ALIGN(pfn, CBUF_MAPNP);
2411 2146 if (base < mlw.mpaddr) {
2412 2147 base = mlw.mpaddr;
2413 2148 baseoff = P2PHASE(base, CBUF_MAPNP);
2414 2149 } else {
2415 2150 baseoff = 0;
2416 2151 }
2417 2152
2418 2153 pfnoff = pfn - base;
2419 2154 if (pfnoff + mlw.mpleft < CBUF_MAPNP) {
2420 2155 hibitnum = bitnum + mlw.mpleft;
2421 2156 cp->size = ptob(pfnoff + mlw.mpleft);
2422 2157 } else {
2423 2158 hibitnum = bitnum - pfnoff + CBUF_MAPNP -
2424 2159 baseoff;
2425 2160 cp->size = CBUF_MAPSIZE - ptob(baseoff);
2426 2161 }
2427 2162
2428 2163 cp->pfn = pfn;
2429 2164 cp->bitnum = bitnum++;
2430 2165 cp->pagenum = pagenum++;
2431 2166 cp->off = ptob(pfnoff);
2432 2167
2433 2168 for (; bitnum < hibitnum; bitnum++)
2434 2169 if (BT_TEST(dumpcfg.bitmap, bitnum))
2435 2170 pagenum++;
2436 2171
2437 2172 dump_timeleft = dump_timeout;
2438 2173 cp->used = ptob(pagenum - cp->pagenum);
2439 2174
2440 2175 HRSTART(ds->perpage, map);
2441 2176 hat_devload(kas.a_hat, cp->buf, cp->size, base,
2442 2177 PROT_READ, HAT_LOAD_NOCONSIST);
2443 2178 HRSTOP(ds->perpage, map);
|
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
2444 2179
2445 2180 ds->pages_mapped += btop(cp->size);
2446 2181 ds->pages_used += pagenum - cp->pagenum;
2447 2182
2448 2183 CQ_OPEN(mainq);
2449 2184
2450 2185 /*
2451 2186 * If there are no helpers the main task does
2452 2187 * non-streams lzjb compress.
2453 2188 */
2454 - if (dumpserial) {
2189 + if (dumpcfg.clevel == DUMP_CLEVEL_SERIAL) {
2455 2190 dumpsys_lzjb_page(dumpcfg.helper, cp);
2456 - break;
2191 + } else {
2192 + /* pass mapped pages to a helper */
2193 + CQ_PUT(helperq, cp, CBUF_INREADY);
2457 2194 }
2458 2195
2459 - /* pass mapped pages to a helper */
2460 - CQ_PUT(helperq, cp, CBUF_INREADY);
2461 -
2462 2196 /* the last page was done */
2463 2197 if (bitnum >= dumpcfg.bitmapsize)
2464 2198 CQ_CLOSE(helperq);
2465 2199
2466 2200 break;
2467 2201
2468 2202 case CBUF_USEDMAP:
2469 2203
2470 2204 ds->npages += btop(cp->used);
2471 2205
2472 2206 HRSTART(ds->perpage, unmap);
2473 2207 hat_unload(kas.a_hat, cp->buf, cp->size, HAT_UNLOAD);
2474 2208 HRSTOP(ds->perpage, unmap);
2475 2209
2476 2210 if (bitnum < dumpcfg.bitmapsize)
2477 2211 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2478 2212 CQ_CLOSE(mainq);
2479 2213
2480 2214 ASSERT(ds->npages <= dumphdr->dump_npages);
2481 2215 ds->percent = ds->npages * 100LL / dumphdr->dump_npages;
2482 2216 break;
2483 2217
2484 2218 case CBUF_WRITE:
2485 2219
2486 2220 CQ_PUT(writerq, cp, CBUF_WRITE);
2487 2221 break;
2488 2222
2489 2223 case CBUF_ERRMSG:
2490 2224
2491 2225 if (cp->used > 0) {
2492 2226 cp->buf[cp->size - 2] = '\n';
2493 2227 cp->buf[cp->size - 1] = '\0';
2494 2228 if (ds->neednl) {
2495 2229 uprintf("\n%s", cp->buf);
2496 2230 ds->neednl = 0;
2497 2231 } else {
2498 2232 uprintf("%s", cp->buf);
2499 2233 }
2500 2234 /* wait for console output */
2501 2235 drv_usecwait(200000);
2502 2236 dump_timeleft = dump_timeout;
2503 2237 }
2504 2238 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2505 2239 break;
2506 2240
2507 2241 default:
2508 2242 uprintf("dump: unexpected buffer state %d, "
2509 2243 "buffer will be lost\n", cp->state);
2510 2244 break;
2511 2245
2512 2246 } /* end switch */
2513 2247 }
2514 2248 }
2515 2249
2516 2250 #ifdef COLLECT_METRICS
2517 2251 size_t
2518 2252 dumpsys_metrics(dumpsync_t *ds, char *buf, size_t size)
2519 2253 {
2520 2254 dumpcfg_t *cfg = &dumpcfg;
2521 2255 int myid = CPU->cpu_seqid;
2522 2256 int i, compress_ratio;
2523 2257 int sec, iorate;
2524 2258 helper_t *hp, *hpend = &cfg->helper[cfg->nhelper];
2525 2259 char *e = buf + size;
2526 2260 char *p = buf;
2527 2261
2528 2262 sec = ds->elapsed / (1000 * 1000 * 1000ULL);
2529 2263 if (sec < 1)
2530 2264 sec = 1;
2531 2265
2532 2266 if (ds->iotime < 1)
2533 2267 ds->iotime = 1;
2534 2268 iorate = (ds->nwrite * 100000ULL) / ds->iotime;
2535 2269
2536 2270 compress_ratio = 100LL * ds->npages / btopr(ds->nwrite + 1);
2537 2271
2538 2272 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
2539 2273
2540 2274 P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
2541 2275 P("Master cpu_id,%d\n", CPU->cpu_id);
2542 2276 P("dump_flags,0x%x\n", dumphdr->dump_flags);
2543 2277 P("dump_ioerr,%d\n", dump_ioerr);
2544 2278
2545 2279 P("Helpers:\n");
2546 2280 for (i = 0; i < ncpus; i++) {
2547 2281 if ((i & 15) == 0)
2548 2282 P(",,%03d,", i);
2549 2283 if (i == myid)
2550 2284 P(" M");
2551 2285 else if (BT_TEST(cfg->helpermap, i))
2552 2286 P("%4d", cpu_seq[i]->cpu_id);
2553 2287 else
2554 2288 P(" *");
2555 2289 if ((i & 15) == 15)
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
2556 2290 P("\n");
2557 2291 }
2558 2292
2559 2293 P("ncbuf_used,%d\n", cfg->ncbuf_used);
2560 2294 P("ncmap,%d\n", cfg->ncmap);
2561 2295
2562 2296 P("Found %ldM ranges,%ld\n", (CBUF_MAPSIZE / DUMP_1MB), cfg->found4m);
2563 2297 P("Found small pages,%ld\n", cfg->foundsm);
2564 2298
2565 2299 P("Compression level,%d\n", cfg->clevel);
2566 - P("Compression type,%s %s\n", cfg->clevel == 0 ? "serial" : "parallel",
2567 - cfg->clevel >= DUMP_CLEVEL_BZIP2 ? "bzip2" : "lzjb");
2300 + P("Compression type,%s lzjb\n",
2301 + cfg->clevel == DUMP_CLEVEL_SERIAL ? "serial" : "parallel");
2568 2302 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
2569 2303 100);
2570 2304 P("nhelper_used,%d\n", cfg->nhelper_used);
2571 2305
2572 2306 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
2573 2307 P("..total bytes,%lld\n", (u_longlong_t)ds->nwrite);
2574 2308 P("..total nsec,%lld\n", (u_longlong_t)ds->iotime);
2575 2309 P("dumpbuf.iosize,%ld\n", dumpbuf.iosize);
2576 2310 P("dumpbuf.size,%ld\n", dumpbuf.size);
2577 2311
2578 2312 P("Dump pages/sec,%llu\n", (u_longlong_t)ds->npages / sec);
2579 2313 P("Dump pages,%llu\n", (u_longlong_t)ds->npages);
2580 2314 P("Dump time,%d\n", sec);
2581 2315
2582 2316 if (ds->pages_mapped > 0)
2583 2317 P("per-cent map utilization,%d\n", (int)((100 * ds->pages_used)
2584 2318 / ds->pages_mapped));
2585 2319
2586 2320 P("\nPer-page metrics:\n");
2587 2321 if (ds->npages > 0) {
2588 2322 for (hp = cfg->helper; hp != hpend; hp++) {
2589 2323 #define PERPAGE(x) ds->perpage.x += hp->perpage.x;
2590 2324 PERPAGES;
2591 2325 #undef PERPAGE
2592 2326 }
2593 2327 #define PERPAGE(x) \
2594 2328 P("%s nsec/page,%d\n", #x, (int)(ds->perpage.x / ds->npages));
2595 2329 PERPAGES;
2596 2330 #undef PERPAGE
2597 2331 P("freebufq.empty,%d\n", (int)(ds->freebufq.empty /
2598 2332 ds->npages));
2599 2333 P("helperq.empty,%d\n", (int)(ds->helperq.empty /
2600 2334 ds->npages));
2601 2335 P("writerq.empty,%d\n", (int)(ds->writerq.empty /
2602 2336 ds->npages));
2603 2337 P("mainq.empty,%d\n", (int)(ds->mainq.empty / ds->npages));
2604 2338
2605 2339 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(ds->iowait /
2606 2340 ds->npages));
2607 2341 }
2608 2342 #undef P
2609 2343 if (p < e)
2610 2344 bzero(p, e - p);
2611 2345 return (p - buf);
2612 2346 }
2613 2347 #endif /* COLLECT_METRICS */
2614 2348
2615 2349 /*
2616 2350 * Dump the system.
2617 2351 */
2618 2352 void
2619 2353 dumpsys(void)
2620 2354 {
2621 2355 dumpsync_t *ds = &dumpsync;
2622 2356 taskq_t *livetaskq = NULL;
2623 2357 pfn_t pfn;
2624 2358 pgcnt_t bitnum;
2625 2359 proc_t *p;
2626 2360 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2627 2361 cbuf_t *cp;
2628 2362 pid_t npids, pidx;
2629 2363 char *content;
2630 2364 char *buf;
2631 2365 size_t size;
2632 2366 int save_dump_clevel;
2633 2367 dumpmlw_t mlw;
2634 2368 dumpcsize_t datatag;
2635 2369 dumpdatahdr_t datahdr;
2636 2370
2637 2371 if (dumpvp == NULL || dumphdr == NULL) {
2638 2372 uprintf("skipping system dump - no dump device configured\n");
2639 2373 if (panicstr) {
2640 2374 dumpcfg.helpers_wanted = 0;
2641 2375 dumpsys_spinunlock(&dumpcfg.helper_lock);
2642 2376 }
2643 2377 return;
2644 2378 }
2645 2379 dumpbuf.cur = dumpbuf.start;
2646 2380
2647 2381 /* clear the sync variables */
2648 2382 ASSERT(dumpcfg.nhelper > 0);
2649 2383 bzero(ds, sizeof (*ds));
2650 2384 ds->dumpcpu = CPU->cpu_id;
2651 2385
2652 2386 /*
2653 2387 * Calculate the starting block for dump. If we're dumping on a
2654 2388 * swap device, start 1/5 of the way in; otherwise, start at the
2655 2389 * beginning. And never use the first page -- it may be a disk label.
2656 2390 */
2657 2391 if (dumpvp->v_flag & VISSWAP)
2658 2392 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
2659 2393 else
2660 2394 dumphdr->dump_start = DUMP_OFFSET;
2661 2395
2662 2396 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
2663 2397 dumphdr->dump_crashtime = gethrestime_sec();
2664 2398 dumphdr->dump_npages = 0;
|
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
2665 2399 dumphdr->dump_nvtop = 0;
2666 2400 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
2667 2401 dump_timeleft = dump_timeout;
2668 2402
2669 2403 if (panicstr) {
2670 2404 dumphdr->dump_flags &= ~DF_LIVE;
2671 2405 (void) VOP_DUMPCTL(dumpvp, DUMP_FREE, NULL, NULL);
2672 2406 (void) VOP_DUMPCTL(dumpvp, DUMP_ALLOC, NULL, NULL);
2673 2407 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
2674 2408 panicstr, panicargs);
2675 -
2409 + (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
2410 + sizeof (dumphdr->dump_uuid));
2676 2411 }
2677 2412
2678 2413 if (dump_conflags & DUMP_ALL)
2679 2414 content = "all";
2680 2415 else if (dump_conflags & DUMP_CURPROC)
2681 2416 content = "kernel + curproc";
2682 2417 else
2683 2418 content = "kernel";
2684 2419 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
2685 2420 dumphdr->dump_start, content);
2686 2421
2687 2422 /* Make sure nodename is current */
2688 2423 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
2689 2424
2690 2425 /*
2691 2426 * If this is a live dump, try to open a VCHR vnode for better
2692 2427 * performance. We must take care to flush the buffer cache
2693 2428 * first.
2694 2429 */
2695 2430 if (!panicstr) {
2696 2431 vnode_t *cdev_vp, *cmn_cdev_vp;
2697 2432
2698 2433 ASSERT(dumpbuf.cdev_vp == NULL);
2699 2434 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
2700 2435 if (cdev_vp != NULL) {
2701 2436 cmn_cdev_vp = common_specvp(cdev_vp);
2702 2437 if (VOP_OPEN(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
2703 2438 == 0) {
2704 2439 if (vn_has_cached_data(dumpvp))
2705 2440 (void) pvn_vplist_dirty(dumpvp, 0, NULL,
2706 2441 B_INVAL | B_TRUNC, kcred);
2707 2442 dumpbuf.cdev_vp = cmn_cdev_vp;
2708 2443 } else {
2709 2444 VN_RELE(cdev_vp);
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
2710 2445 }
2711 2446 }
2712 2447 }
2713 2448
2714 2449 /*
2715 2450 * Store a hires timestamp so we can look it up during debugging.
2716 2451 */
2717 2452 lbolt_debug_entry();
2718 2453
2719 2454 /*
2720 - * Leave room for the message and ereport save areas and terminal dump
2721 - * header.
2455 + * Leave room for the summary, message and ereport save areas
2456 + * and terminal dump header.
2722 2457 */
2723 2458 dumpbuf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
2724 - DUMP_ERPTSIZE;
2459 + DUMP_ERPTSIZE - DUMP_SUMMARYSIZE;
2725 2460
2726 2461 /*
2727 2462 * Write out the symbol table. It's no longer compressed,
2728 2463 * so its 'size' and 'csize' are equal.
2729 2464 */
2730 2465 dumpbuf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
2731 2466 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
2732 2467 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
2733 2468
2734 2469 /*
2735 2470 * Write out the translation map.
2736 2471 */
2737 2472 dumphdr->dump_map = dumpvp_flush();
2738 2473 dump_as(&kas);
2739 2474 dumphdr->dump_nvtop += dump_plat_addr();
2740 2475
2741 2476 /*
2742 2477 * call into hat, which may have unmapped pages that also need to
2743 2478 * be in the dump
2744 2479 */
2745 2480 hat_dump();
2746 2481
2747 2482 if (dump_conflags & DUMP_ALL) {
2748 2483 mutex_enter(&pidlock);
2749 2484
2750 2485 for (npids = 0, p = practive; p != NULL; p = p->p_next)
2751 2486 dumpcfg.pids[npids++] = p->p_pid;
2752 2487
2753 2488 mutex_exit(&pidlock);
2754 2489
2755 2490 for (pidx = 0; pidx < npids; pidx++)
2756 2491 (void) dump_process(dumpcfg.pids[pidx]);
2757 2492
2758 2493 dump_init_memlist_walker(&mlw);
2759 2494 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2760 2495 dump_timeleft = dump_timeout;
2761 2496 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2762 2497 /*
2763 2498 * Some hypervisors do not have all pages available to
2764 2499 * be accessed by the guest OS. Check for page
2765 2500 * accessibility.
2766 2501 */
2767 2502 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) !=
2768 2503 PLAT_HOLD_OK)
2769 2504 continue;
2770 2505 BT_SET(dumpcfg.bitmap, bitnum);
2771 2506 }
2772 2507 dumphdr->dump_npages = dumpcfg.bitmapsize;
2773 2508 dumphdr->dump_flags |= DF_ALL;
2774 2509
2775 2510 } else if (dump_conflags & DUMP_CURPROC) {
2776 2511 /*
2777 2512 * Determine which pid is to be dumped. If we're panicking, we
2778 2513 * dump the process associated with panic_thread (if any). If
2779 2514 * this is a live dump, we dump the process associated with
2780 2515 * curthread.
2781 2516 */
2782 2517 npids = 0;
2783 2518 if (panicstr) {
2784 2519 if (panic_thread != NULL &&
2785 2520 panic_thread->t_procp != NULL &&
2786 2521 panic_thread->t_procp != &p0) {
2787 2522 dumpcfg.pids[npids++] =
2788 2523 panic_thread->t_procp->p_pid;
2789 2524 }
2790 2525 } else {
2791 2526 dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
2792 2527 }
2793 2528
2794 2529 if (npids && dump_process(dumpcfg.pids[0]) == 0)
2795 2530 dumphdr->dump_flags |= DF_CURPROC;
2796 2531 else
2797 2532 dumphdr->dump_flags |= DF_KERNEL;
2798 2533
2799 2534 } else {
2800 2535 dumphdr->dump_flags |= DF_KERNEL;
2801 2536 }
2802 2537
2803 2538 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
2804 2539
2805 2540 /*
2806 2541 * Write out the pfn table.
2807 2542 */
2808 2543 dumphdr->dump_pfn = dumpvp_flush();
2809 2544 dump_init_memlist_walker(&mlw);
2810 2545 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2811 2546 dump_timeleft = dump_timeout;
2812 2547 if (!BT_TEST(dumpcfg.bitmap, bitnum))
2813 2548 continue;
2814 2549 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2815 2550 ASSERT(pfn != PFN_INVALID);
2816 2551 dumpvp_write(&pfn, sizeof (pfn_t));
2817 2552 }
|
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
2818 2553 dump_plat_pfn();
2819 2554
2820 2555 /*
2821 2556 * Write out all the pages.
2822 2557 * Map pages, copy them handling UEs, compress, and write them out.
2823 2558 * Cooperate with any helpers running on CPUs in panic_idle().
2824 2559 */
2825 2560 dumphdr->dump_data = dumpvp_flush();
2826 2561
2827 2562 bzero(dumpcfg.helpermap, BT_SIZEOFMAP(NCPU));
2828 - ds->live = dumpcfg.clevel > 0 &&
2563 + ds->live = dumpcfg.clevel > DUMP_CLEVEL_SERIAL &&
2829 2564 (dumphdr->dump_flags & DF_LIVE) != 0;
2830 2565
2831 2566 save_dump_clevel = dumpcfg.clevel;
2832 2567 if (panicstr)
2833 2568 dumpsys_get_maxmem();
2834 - else if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2835 - dumpcfg.clevel = DUMP_CLEVEL_LZJB;
2836 2569
2837 2570 dumpcfg.nhelper_used = 0;
2838 2571 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2839 2572 if (hp->page == NULL) {
2840 2573 hp->helper = DONEHELPER;
2841 2574 continue;
2842 2575 }
2843 2576 ++dumpcfg.nhelper_used;
2844 2577 hp->helper = FREEHELPER;
2845 2578 hp->taskqid = NULL;
2846 2579 hp->ds = ds;
2847 2580 bzero(&hp->perpage, sizeof (hp->perpage));
2848 - if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2849 - (void) BZ2_bzCompressReset(&hp->bzstream);
2850 2581 }
2851 2582
2852 2583 CQ_OPEN(freebufq);
2853 2584 CQ_OPEN(helperq);
2854 2585
2855 2586 dumpcfg.ncbuf_used = 0;
2856 2587 for (cp = dumpcfg.cbuf; cp != &dumpcfg.cbuf[dumpcfg.ncbuf]; cp++) {
2857 2588 if (cp->buf != NULL) {
2858 2589 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2859 2590 ++dumpcfg.ncbuf_used;
2860 2591 }
2861 2592 }
2862 2593
2863 2594 for (cp = dumpcfg.cmap; cp != &dumpcfg.cmap[dumpcfg.ncmap]; cp++)
2864 2595 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2865 2596
2866 2597 ds->start = gethrtime();
2867 2598 ds->iowaitts = ds->start;
2868 2599
2869 2600 /* start helpers */
2870 2601 if (ds->live) {
2871 2602 int n = dumpcfg.nhelper_used;
2872 2603 int pri = MINCLSYSPRI - 25;
2873 2604
2874 2605 livetaskq = taskq_create("LiveDump", n, pri, n, n,
2875 2606 TASKQ_PREPOPULATE);
2876 2607 for (hp = dumpcfg.helper; hp != hpend; hp++) {
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2877 2608 if (hp->page == NULL)
2878 2609 continue;
2879 2610 hp->helper = hp - dumpcfg.helper;
2880 2611 hp->taskqid = taskq_dispatch(livetaskq,
2881 2612 dumpsys_live_helper, (void *)hp, TQ_NOSLEEP);
2882 2613 }
2883 2614
2884 2615 } else {
2885 2616 if (panicstr)
2886 2617 kmem_dump_begin();
2887 - dumpcfg.helpers_wanted = dumpcfg.clevel > 0;
2618 + dumpcfg.helpers_wanted = dumpcfg.clevel > DUMP_CLEVEL_SERIAL;
2888 2619 dumpsys_spinunlock(&dumpcfg.helper_lock);
2889 2620 }
2890 2621
2891 2622 /* run main task */
2892 2623 dumpsys_main_task(ds);
2893 2624
2894 2625 ds->elapsed = gethrtime() - ds->start;
2895 2626 if (ds->elapsed < 1)
2896 2627 ds->elapsed = 1;
2897 2628
2898 2629 if (livetaskq != NULL)
2899 2630 taskq_destroy(livetaskq);
2900 2631
2901 2632 if (ds->neednl) {
2902 2633 uprintf("\n");
2903 2634 ds->neednl = 0;
2904 2635 }
2905 2636
2906 2637 /* record actual pages dumped */
2907 2638 dumphdr->dump_npages = ds->npages;
2908 2639
2909 2640 /* platform-specific data */
2910 2641 dumphdr->dump_npages += dump_plat_data(dumpcfg.cbuf[0].buf);
2911 2642
2912 2643 /* note any errors by clearing DF_COMPLETE */
2913 2644 if (dump_ioerr || ds->npages < dumphdr->dump_npages)
2914 2645 dumphdr->dump_flags &= ~DF_COMPLETE;
2915 2646
2916 2647 /* end of stream blocks */
2917 2648 datatag = 0;
2918 2649 dumpvp_write(&datatag, sizeof (datatag));
2919 2650
2920 2651 bzero(&datahdr, sizeof (datahdr));
2921 2652
2922 2653 /* buffer for metrics */
2923 2654 buf = dumpcfg.cbuf[0].buf;
2924 2655 size = MIN(dumpcfg.cbuf[0].size, DUMP_OFFSET - sizeof (dumphdr_t) -
2925 2656 sizeof (dumpdatahdr_t));
2926 2657
2927 2658 /* finish the kmem intercepts, collect kmem verbose info */
2928 2659 if (panicstr) {
2929 2660 datahdr.dump_metrics = kmem_dump_finish(buf, size);
2930 2661 buf += datahdr.dump_metrics;
2931 2662 size -= datahdr.dump_metrics;
2932 2663 }
2933 2664
2934 2665 /* record in the header whether this is a fault-management panic */
2935 2666 if (panicstr)
2936 2667 dumphdr->dump_fm_panic = is_fm_panic();
2937 2668
2938 2669 /* compression info in data header */
2939 2670 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
2940 2671 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
2941 2672 datahdr.dump_maxcsize = CBUF_SIZE;
2942 2673 datahdr.dump_maxrange = CBUF_MAPSIZE / PAGESIZE;
2943 2674 datahdr.dump_nstreams = dumpcfg.nhelper_used;
2944 2675 datahdr.dump_clevel = dumpcfg.clevel;
2945 2676 #ifdef COLLECT_METRICS
2946 2677 if (dump_metrics_on)
2947 2678 datahdr.dump_metrics += dumpsys_metrics(ds, buf, size);
2948 2679 #endif
2949 2680 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
2950 2681
2951 2682 /*
2952 2683 * Write out the initial and terminal dump headers.
2953 2684 */
2954 2685 dumpbuf.vp_off = dumphdr->dump_start;
2955 2686 dumpvp_write(dumphdr, sizeof (dumphdr_t));
2956 2687 (void) dumpvp_flush();
2957 2688
2958 2689 dumpbuf.vp_limit = dumpvp_size;
2959 2690 dumpbuf.vp_off = dumpbuf.vp_limit - DUMP_OFFSET;
2960 2691 dumpvp_write(dumphdr, sizeof (dumphdr_t));
2961 2692 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
2962 2693 dumpvp_write(dumpcfg.cbuf[0].buf, datahdr.dump_metrics);
2963 2694
2964 2695 (void) dumpvp_flush();
2965 2696
2966 2697 uprintf("\r%3d%% done: %llu pages dumped, ",
2967 2698 ds->percent_done, (u_longlong_t)ds->npages);
2968 2699
2969 2700 if (dump_ioerr == 0) {
2970 2701 uprintf("dump succeeded\n");
2971 2702 } else {
2972 2703 uprintf("dump failed: error %d\n", dump_ioerr);
2973 2704 #ifdef DEBUG
2974 2705 if (panicstr)
2975 2706 debug_enter("dump failed");
2976 2707 #endif
2977 2708 }
2978 2709
2979 2710 /*
2980 2711 * Write out all undelivered messages. This has to be the *last*
2981 2712 * thing we do because the dump process itself emits messages.
2982 2713 */
2983 2714 if (panicstr) {
2984 2715 dump_summary();
2985 2716 dump_ereports();
2986 2717 dump_messages();
2987 2718 }
2988 2719
2989 2720 delay(2 * hz); /* let people see the 'done' message */
2990 2721 dump_timeleft = 0;
2991 2722 dump_ioerr = 0;
2992 2723
2993 2724 /* restore settings after live dump completes */
2994 2725 if (!panicstr) {
2995 2726 dumpcfg.clevel = save_dump_clevel;
2996 2727
2997 2728 /* release any VCHR open of the dump device */
2998 2729 if (dumpbuf.cdev_vp != NULL) {
2999 2730 (void) VOP_CLOSE(dumpbuf.cdev_vp, FREAD | FWRITE, 1, 0,
3000 2731 kcred, NULL);
3001 2732 VN_RELE(dumpbuf.cdev_vp);
3002 2733 dumpbuf.cdev_vp = NULL;
3003 2734 }
3004 2735 }
3005 2736 }
3006 2737
3007 2738 /*
3008 2739 * This function is called whenever the memory size, as represented
3009 2740 * by the phys_install list, changes.
3010 2741 */
3011 2742 void
3012 2743 dump_resize()
3013 2744 {
3014 2745 mutex_enter(&dump_lock);
3015 2746 dumphdr_init();
3016 2747 dumpbuf_resize();
3017 2748 dump_update_clevel();
3018 2749 mutex_exit(&dump_lock);
3019 2750 }
3020 2751
3021 2752 /*
3022 2753 * This function allows for dynamic resizing of a dump area. It assumes that
3023 2754 * the underlying device has update its appropriate size(9P).
3024 2755 */
3025 2756 int
3026 2757 dumpvp_resize()
3027 2758 {
3028 2759 int error;
3029 2760 vattr_t vattr;
3030 2761
3031 2762 mutex_enter(&dump_lock);
3032 2763 vattr.va_mask = AT_SIZE;
3033 2764 if ((error = VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
3034 2765 mutex_exit(&dump_lock);
3035 2766 return (error);
3036 2767 }
3037 2768
|
↓ open down ↓ |
140 lines elided |
↑ open up ↑ |
3038 2769 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
3039 2770 mutex_exit(&dump_lock);
3040 2771 return (ENOSPC);
3041 2772 }
3042 2773
3043 2774 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
3044 2775 mutex_exit(&dump_lock);
3045 2776 return (0);
3046 2777 }
3047 2778
3048 -int
3049 -dump_set_uuid(const char *uuidstr)
2779 +static int
2780 +dump_validate_uuid(const char *uuidstr)
3050 2781 {
3051 2782 const char *ptr;
3052 2783 int i;
3053 2784
3054 - if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36)
2785 + if (uuidstr == NULL || strlen(uuidstr) !=
2786 + UUID_PRINTABLE_STRING_LENGTH - 1)
3055 2787 return (EINVAL);
3056 2788
3057 2789 /* uuid_parse is not common code so check manually */
3058 - for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) {
2790 + for (i = 0, ptr = uuidstr; i < UUID_PRINTABLE_STRING_LENGTH - 1;
2791 + i++, ptr++) {
3059 2792 switch (i) {
3060 2793 case 8:
3061 2794 case 13:
3062 2795 case 18:
3063 2796 case 23:
3064 2797 if (*ptr != '-')
3065 2798 return (EINVAL);
3066 2799 break;
3067 2800
3068 2801 default:
3069 2802 if (!isxdigit(*ptr))
3070 2803 return (EINVAL);
3071 2804 break;
3072 2805 }
3073 2806 }
3074 2807
2808 + return (0);
2809 +}
2810 +
2811 +int
2812 +dump_update_uuid(const char *uuidstr)
2813 +{
2814 +
2815 + if (dump_validate_uuid(uuidstr) != 0 || dumphdr == NULL)
2816 + return (EINVAL);
2817 +
2818 + bzero(dumphdr->dump_uuid, sizeof (dumphdr->dump_uuid));
2819 + (void) strncpy(dumphdr->dump_uuid, uuidstr,
2820 + sizeof (dumphdr->dump_uuid));
2821 +
2822 + return (0);
2823 +}
2824 +
2825 +int
2826 +dump_set_uuid(const char *uuidstr)
2827 +{
2828 + if (dump_validate_uuid(uuidstr) != 0)
2829 + return (EINVAL);
2830 +
3075 2831 if (dump_osimage_uuid[0] != '\0')
3076 2832 return (EALREADY);
3077 2833
3078 - (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1);
2834 + (void) strncpy(dump_osimage_uuid, uuidstr,
2835 + UUID_PRINTABLE_STRING_LENGTH);
3079 2836
3080 2837 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n",
3081 2838 dump_osimage_uuid);
3082 2839
3083 2840 return (0);
3084 2841 }
3085 2842
3086 2843 const char *
3087 2844 dump_get_uuid(void)
3088 2845 {
3089 2846 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : "");
3090 2847 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX