Print this page
NEX-3079 port illumos ARC improvements
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/zfs/zio_inject.c
+++ new/usr/src/uts/common/fs/zfs/zio_inject.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * ZFS fault injection
28 28 *
29 29 * To handle fault injection, we keep track of a series of zinject_record_t
30 30 * structures which describe which logical block(s) should be injected with a
31 31 * fault. These are kept in a global list. Each record corresponds to a given
32 32 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
33 33 * or exported while the injection record exists.
34 34 *
35 35 * Device level injection is done using the 'zi_guid' field. If this is set, it
36 36 * means that the error is destined for a particular device, not a piece of
37 37 * data.
38 38 *
39 39 * This is a rather poor data structure and algorithm, but we don't expect more
40 40 * than a few faults at any one time, so it should be sufficient for our needs.
41 41 */
42 42
43 43 #include <sys/arc.h>
44 44 #include <sys/zio_impl.h>
45 45 #include <sys/zfs_ioctl.h>
46 46 #include <sys/vdev_impl.h>
47 47 #include <sys/dmu_objset.h>
48 48 #include <sys/fs/zfs.h>
49 49
50 50 uint32_t zio_injection_enabled;
51 51
52 52 /*
53 53 * Data describing each zinject handler registered on the system, and
54 54 * contains the list node linking the handler in the global zinject
55 55 * handler list.
56 56 */
57 57 typedef struct inject_handler {
58 58 int zi_id;
59 59 spa_t *zi_spa;
60 60 zinject_record_t zi_record;
61 61 uint64_t *zi_lanes;
62 62 int zi_next_lane;
63 63 list_node_t zi_link;
64 64 } inject_handler_t;
65 65
66 66 /*
67 67 * List of all zinject handlers registered on the system, protected by
68 68 * the inject_lock defined below.
69 69 */
70 70 static list_t inject_handlers;
71 71
72 72 /*
73 73 * This protects insertion into, and traversal of, the inject handler
74 74 * list defined above; as well as the inject_delay_count. Any time a
75 75 * handler is inserted or removed from the list, this lock should be
76 76 * taken as a RW_WRITER; and any time traversal is done over the list
77 77 * (without modification to it) this lock should be taken as a RW_READER.
78 78 */
79 79 static krwlock_t inject_lock;
80 80
81 81 /*
82 82 * This holds the number of zinject delay handlers that have been
83 83 * registered on the system. It is protected by the inject_lock defined
84 84 * above. Thus modifications to this count must be a RW_WRITER of the
85 85 * inject_lock, and reads of this count must be (at least) a RW_READER
86 86 * of the lock.
87 87 */
88 88 static int inject_delay_count = 0;
89 89
90 90 /*
91 91 * This lock is used only in zio_handle_io_delay(), refer to the comment
92 92 * in that function for more details.
93 93 */
94 94 static kmutex_t inject_delay_mtx;
95 95
96 96 /*
97 97 * Used to assign unique identifying numbers to each new zinject handler.
98 98 */
99 99 static int inject_next_id = 1;
100 100
101 101 /*
102 102 * Returns true if the given record matches the I/O in progress.
103 103 */
104 104 static boolean_t
105 105 zio_match_handler(zbookmark_phys_t *zb, uint64_t type,
106 106 zinject_record_t *record, int error)
107 107 {
108 108 /*
109 109 * Check for a match against the MOS, which is based on type
110 110 */
111 111 if (zb->zb_objset == DMU_META_OBJSET &&
112 112 record->zi_objset == DMU_META_OBJSET &&
113 113 record->zi_object == DMU_META_DNODE_OBJECT) {
114 114 if (record->zi_type == DMU_OT_NONE ||
115 115 type == record->zi_type)
116 116 return (record->zi_freq == 0 ||
117 117 spa_get_random(100) < record->zi_freq);
118 118 else
119 119 return (B_FALSE);
120 120 }
121 121
122 122 /*
123 123 * Check for an exact match.
124 124 */
125 125 if (zb->zb_objset == record->zi_objset &&
126 126 zb->zb_object == record->zi_object &&
127 127 zb->zb_level == record->zi_level &&
128 128 zb->zb_blkid >= record->zi_start &&
129 129 zb->zb_blkid <= record->zi_end &&
130 130 error == record->zi_error)
131 131 return (record->zi_freq == 0 ||
132 132 spa_get_random(100) < record->zi_freq);
133 133
134 134 return (B_FALSE);
135 135 }
136 136
137 137 /*
138 138 * Panic the system when a config change happens in the function
139 139 * specified by tag.
140 140 */
141 141 void
142 142 zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type)
143 143 {
144 144 inject_handler_t *handler;
145 145
146 146 rw_enter(&inject_lock, RW_READER);
147 147
148 148 for (handler = list_head(&inject_handlers); handler != NULL;
149 149 handler = list_next(&inject_handlers, handler)) {
150 150
151 151 if (spa != handler->zi_spa)
152 152 continue;
153 153
154 154 if (handler->zi_record.zi_type == type &&
155 155 strcmp(tag, handler->zi_record.zi_func) == 0)
156 156 panic("Panic requested in function %s\n", tag);
157 157 }
158 158
159 159 rw_exit(&inject_lock);
160 160 }
161 161
162 162 /*
163 163 * Determine if the I/O in question should return failure. Returns the errno
164 164 * to be returned to the caller.
165 165 */
166 166 int
167 167 zio_handle_fault_injection(zio_t *zio, int error)
168 168 {
169 169 int ret = 0;
170 170 inject_handler_t *handler;
171 171
172 172 /*
173 173 * Ignore I/O not associated with any logical data.
174 174 */
175 175 if (zio->io_logical == NULL)
176 176 return (0);
177 177
178 178 /*
179 179 * Currently, we only support fault injection on reads.
180 180 */
181 181 if (zio->io_type != ZIO_TYPE_READ)
182 182 return (0);
183 183
184 184 rw_enter(&inject_lock, RW_READER);
185 185
186 186 for (handler = list_head(&inject_handlers); handler != NULL;
187 187 handler = list_next(&inject_handlers, handler)) {
188 188
189 189 if (zio->io_spa != handler->zi_spa ||
190 190 handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
191 191 continue;
192 192
193 193 /* If this handler matches, return EIO */
194 194 if (zio_match_handler(&zio->io_logical->io_bookmark,
195 195 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
196 196 &handler->zi_record, error)) {
197 197 ret = error;
198 198 break;
199 199 }
200 200 }
201 201
202 202 rw_exit(&inject_lock);
203 203
204 204 return (ret);
205 205 }
206 206
207 207 /*
208 208 * Determine if the zio is part of a label update and has an injection
209 209 * handler associated with that portion of the label. Currently, we
210 210 * allow error injection in either the nvlist or the uberblock region of
211 211 * of the vdev label.
212 212 */
213 213 int
214 214 zio_handle_label_injection(zio_t *zio, int error)
215 215 {
216 216 inject_handler_t *handler;
217 217 vdev_t *vd = zio->io_vd;
218 218 uint64_t offset = zio->io_offset;
219 219 int label;
220 220 int ret = 0;
221 221
222 222 if (offset >= VDEV_LABEL_START_SIZE &&
223 223 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
224 224 return (0);
225 225
226 226 rw_enter(&inject_lock, RW_READER);
227 227
228 228 for (handler = list_head(&inject_handlers); handler != NULL;
229 229 handler = list_next(&inject_handlers, handler)) {
230 230 uint64_t start = handler->zi_record.zi_start;
231 231 uint64_t end = handler->zi_record.zi_end;
232 232
233 233 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
234 234 continue;
235 235
236 236 /*
237 237 * The injection region is the relative offsets within a
238 238 * vdev label. We must determine the label which is being
239 239 * updated and adjust our region accordingly.
240 240 */
241 241 label = vdev_label_number(vd->vdev_psize, offset);
242 242 start = vdev_label_offset(vd->vdev_psize, label, start);
243 243 end = vdev_label_offset(vd->vdev_psize, label, end);
244 244
245 245 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
246 246 (offset >= start && offset <= end)) {
247 247 ret = error;
248 248 break;
249 249 }
250 250 }
251 251 rw_exit(&inject_lock);
252 252 return (ret);
253 253 }
254 254
255 255
256 256 int
257 257 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
258 258 {
259 259 inject_handler_t *handler;
260 260 int ret = 0;
261 261
262 262 /*
263 263 * We skip over faults in the labels unless it's during
264 264 * device open (i.e. zio == NULL).
265 265 */
266 266 if (zio != NULL) {
267 267 uint64_t offset = zio->io_offset;
268 268
269 269 if (offset < VDEV_LABEL_START_SIZE ||
270 270 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
271 271 return (0);
272 272 }
273 273
274 274 rw_enter(&inject_lock, RW_READER);
275 275
276 276 for (handler = list_head(&inject_handlers); handler != NULL;
277 277 handler = list_next(&inject_handlers, handler)) {
278 278
279 279 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
280 280 continue;
281 281
282 282 if (vd->vdev_guid == handler->zi_record.zi_guid) {
283 283 if (handler->zi_record.zi_failfast &&
284 284 (zio == NULL || (zio->io_flags &
285 285 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) {
286 286 continue;
287 287 }
288 288
289 289 /* Handle type specific I/O failures */
290 290 if (zio != NULL &&
291 291 handler->zi_record.zi_iotype != ZIO_TYPES &&
292 292 handler->zi_record.zi_iotype != zio->io_type)
293 293 continue;
294 294
295 295 if (handler->zi_record.zi_error == error) {
296 296 /*
297 297 * For a failed open, pretend like the device
298 298 * has gone away.
299 299 */
300 300 if (error == ENXIO)
301 301 vd->vdev_stat.vs_aux =
302 302 VDEV_AUX_OPEN_FAILED;
303 303
304 304 /*
305 305 * Treat these errors as if they had been
306 306 * retried so that all the appropriate stats
307 307 * and FMA events are generated.
308 308 */
309 309 if (!handler->zi_record.zi_failfast &&
310 310 zio != NULL)
311 311 zio->io_flags |= ZIO_FLAG_IO_RETRY;
312 312
313 313 ret = error;
314 314 break;
315 315 }
316 316 if (handler->zi_record.zi_error == ENXIO) {
317 317 ret = SET_ERROR(EIO);
318 318 break;
319 319 }
320 320 }
321 321 }
322 322
323 323 rw_exit(&inject_lock);
324 324
325 325 return (ret);
326 326 }
327 327
328 328 /*
329 329 * Simulate hardware that ignores cache flushes. For requested number
330 330 * of seconds nix the actual writing to disk.
331 331 */
332 332 void
333 333 zio_handle_ignored_writes(zio_t *zio)
334 334 {
335 335 inject_handler_t *handler;
336 336
337 337 rw_enter(&inject_lock, RW_READER);
338 338
339 339 for (handler = list_head(&inject_handlers); handler != NULL;
340 340 handler = list_next(&inject_handlers, handler)) {
341 341
342 342 /* Ignore errors not destined for this pool */
343 343 if (zio->io_spa != handler->zi_spa ||
344 344 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
345 345 continue;
346 346
347 347 /*
348 348 * Positive duration implies # of seconds, negative
349 349 * a number of txgs
350 350 */
351 351 if (handler->zi_record.zi_timer == 0) {
352 352 if (handler->zi_record.zi_duration > 0)
353 353 handler->zi_record.zi_timer = ddi_get_lbolt64();
354 354 else
355 355 handler->zi_record.zi_timer = zio->io_txg;
356 356 }
357 357
358 358 /* Have a "problem" writing 60% of the time */
359 359 if (spa_get_random(100) < 60)
360 360 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
361 361 break;
362 362 }
363 363
364 364 rw_exit(&inject_lock);
365 365 }
366 366
367 367 void
368 368 spa_handle_ignored_writes(spa_t *spa)
369 369 {
370 370 inject_handler_t *handler;
371 371
372 372 if (zio_injection_enabled == 0)
373 373 return;
374 374
375 375 rw_enter(&inject_lock, RW_READER);
376 376
377 377 for (handler = list_head(&inject_handlers); handler != NULL;
378 378 handler = list_next(&inject_handlers, handler)) {
379 379
380 380 if (spa != handler->zi_spa ||
381 381 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
382 382 continue;
383 383
384 384 if (handler->zi_record.zi_duration > 0) {
385 385 VERIFY(handler->zi_record.zi_timer == 0 ||
386 386 handler->zi_record.zi_timer +
387 387 handler->zi_record.zi_duration * hz >
388 388 ddi_get_lbolt64());
389 389 } else {
390 390 /* duration is negative so the subtraction here adds */
391 391 VERIFY(handler->zi_record.zi_timer == 0 ||
392 392 handler->zi_record.zi_timer -
393 393 handler->zi_record.zi_duration >=
394 394 spa_syncing_txg(spa));
395 395 }
396 396 }
397 397
398 398 rw_exit(&inject_lock);
399 399 }
400 400
401 401 hrtime_t
402 402 zio_handle_io_delay(zio_t *zio)
403 403 {
404 404 vdev_t *vd = zio->io_vd;
405 405 inject_handler_t *min_handler = NULL;
406 406 hrtime_t min_target = 0;
407 407
408 408 rw_enter(&inject_lock, RW_READER);
409 409
410 410 /*
411 411 * inject_delay_count is a subset of zio_injection_enabled that
412 412 * is only incremented for delay handlers. These checks are
413 413 * mainly added to remind the reader why we're not explicitly
414 414 * checking zio_injection_enabled like the other functions.
415 415 */
416 416 IMPLY(inject_delay_count > 0, zio_injection_enabled > 0);
417 417 IMPLY(zio_injection_enabled == 0, inject_delay_count == 0);
418 418
419 419 /*
420 420 * If there aren't any inject delay handlers registered, then we
421 421 * can short circuit and simply return 0 here. A value of zero
422 422 * informs zio_delay_interrupt() that this request should not be
423 423 * delayed. This short circuit keeps us from acquiring the
424 424 * inject_delay_mutex unnecessarily.
425 425 */
426 426 if (inject_delay_count == 0) {
427 427 rw_exit(&inject_lock);
428 428 return (0);
429 429 }
430 430
431 431 /*
432 432 * Each inject handler has a number of "lanes" associated with
433 433 * it. Each lane is able to handle requests independently of one
434 434 * another, and at a latency defined by the inject handler
435 435 * record's zi_timer field. Thus if a handler in configured with
436 436 * a single lane with a 10ms latency, it will delay requests
437 437 * such that only a single request is completed every 10ms. So,
438 438 * if more than one request is attempted per each 10ms interval,
439 439 * the average latency of the requests will be greater than
440 440 * 10ms; but if only a single request is submitted each 10ms
441 441 * interval the average latency will be 10ms.
442 442 *
443 443 * We need to acquire this mutex to prevent multiple concurrent
444 444 * threads being assigned to the same lane of a given inject
445 445 * handler. The mutex allows us to perform the following two
446 446 * operations atomically:
447 447 *
448 448 * 1. determine the minimum handler and minimum target
449 449 * value of all the possible handlers
450 450 * 2. update that minimum handler's lane array
451 451 *
452 452 * Without atomicity, two (or more) threads could pick the same
453 453 * lane in step (1), and then conflict with each other in step
454 454 * (2). This could allow a single lane handler to process
455 455 * multiple requests simultaneously, which shouldn't be possible.
456 456 */
457 457 mutex_enter(&inject_delay_mtx);
458 458
459 459 for (inject_handler_t *handler = list_head(&inject_handlers);
460 460 handler != NULL; handler = list_next(&inject_handlers, handler)) {
461 461 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
462 462 continue;
463 463
464 464 if (vd->vdev_guid != handler->zi_record.zi_guid)
465 465 continue;
466 466
467 467 /*
468 468 * Defensive; should never happen as the array allocation
469 469 * occurs prior to inserting this handler on the list.
470 470 */
471 471 ASSERT3P(handler->zi_lanes, !=, NULL);
472 472
473 473 /*
474 474 * This should never happen, the zinject command should
475 475 * prevent a user from setting an IO delay with zero lanes.
476 476 */
477 477 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
478 478
479 479 ASSERT3U(handler->zi_record.zi_nlanes, >,
480 480 handler->zi_next_lane);
481 481
482 482 /*
483 483 * We want to issue this IO to the lane that will become
484 484 * idle the soonest, so we compare the soonest this
485 485 * specific handler can complete the IO with all other
486 486 * handlers, to find the lowest value of all possible
487 487 * lanes. We then use this lane to submit the request.
488 488 *
489 489 * Since each handler has a constant value for its
490 490 * delay, we can just use the "next" lane for that
491 491 * handler; as it will always be the lane with the
492 492 * lowest value for that particular handler (i.e. the
493 493 * lane that will become idle the soonest). This saves a
494 494 * scan of each handler's lanes array.
495 495 *
496 496 * There's two cases to consider when determining when
497 497 * this specific IO request should complete. If this
498 498 * lane is idle, we want to "submit" the request now so
499 499 * it will complete after zi_timer milliseconds. Thus,
500 500 * we set the target to now + zi_timer.
501 501 *
502 502 * If the lane is busy, we want this request to complete
503 503 * zi_timer milliseconds after the lane becomes idle.
504 504 * Since the 'zi_lanes' array holds the time at which
505 505 * each lane will become idle, we use that value to
506 506 * determine when this request should complete.
507 507 */
508 508 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
509 509 hrtime_t busy = handler->zi_record.zi_timer +
510 510 handler->zi_lanes[handler->zi_next_lane];
511 511 hrtime_t target = MAX(idle, busy);
512 512
513 513 if (min_handler == NULL) {
514 514 min_handler = handler;
515 515 min_target = target;
516 516 continue;
517 517 }
518 518
519 519 ASSERT3P(min_handler, !=, NULL);
520 520 ASSERT3U(min_target, !=, 0);
521 521
522 522 /*
523 523 * We don't yet increment the "next lane" variable since
524 524 * we still might find a lower value lane in another
525 525 * handler during any remaining iterations. Once we're
526 526 * sure we've selected the absolute minimum, we'll claim
527 527 * the lane and increment the handler's "next lane"
528 528 * field below.
529 529 */
530 530
531 531 if (target < min_target) {
532 532 min_handler = handler;
533 533 min_target = target;
534 534 }
535 535 }
536 536
537 537 /*
538 538 * 'min_handler' will be NULL if no IO delays are registered for
539 539 * this vdev, otherwise it will point to the handler containing
540 540 * the lane that will become idle the soonest.
541 541 */
542 542 if (min_handler != NULL) {
543 543 ASSERT3U(min_target, !=, 0);
544 544 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
545 545
546 546 /*
547 547 * If we've used all possible lanes for this handler,
548 548 * loop back and start using the first lane again;
549 549 * otherwise, just increment the lane index.
550 550 */
551 551 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
552 552 min_handler->zi_record.zi_nlanes;
553 553 }
554 554
555 555 mutex_exit(&inject_delay_mtx);
556 556 rw_exit(&inject_lock);
557 557
558 558 return (min_target);
559 559 }
560 560
561 561 /*
562 562 * Create a new handler for the given record. We add it to the list, adding
563 563 * a reference to the spa_t in the process. We increment zio_injection_enabled,
564 564 * which is the switch to trigger all fault injection.
565 565 */
566 566 int
567 567 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
568 568 {
569 569 inject_handler_t *handler;
570 570 int error;
571 571 spa_t *spa;
572 572
573 573 /*
574 574 * If this is pool-wide metadata, make sure we unload the corresponding
575 575 * spa_t, so that the next attempt to load it will trigger the fault.
576 576 * We call spa_reset() to unload the pool appropriately.
577 577 */
578 578 if (flags & ZINJECT_UNLOAD_SPA)
579 579 if ((error = spa_reset(name)) != 0)
580 580 return (error);
581 581
582 582 if (record->zi_cmd == ZINJECT_DELAY_IO) {
583 583 /*
584 584 * A value of zero for the number of lanes or for the
585 585 * delay time doesn't make sense.
586 586 */
587 587 if (record->zi_timer == 0 || record->zi_nlanes == 0)
588 588 return (SET_ERROR(EINVAL));
589 589
590 590 /*
591 591 * The number of lanes is directly mapped to the size of
592 592 * an array used by the handler. Thus, to ensure the
593 593 * user doesn't trigger an allocation that's "too large"
594 594 * we cap the number of lanes here.
595 595 */
596 596 if (record->zi_nlanes >= UINT16_MAX)
597 597 return (SET_ERROR(EINVAL));
598 598 }
599 599
600 600 if (!(flags & ZINJECT_NULL)) {
601 601 /*
602 602 * spa_inject_ref() will add an injection reference, which will
603 603 * prevent the pool from being removed from the namespace while
604 604 * still allowing it to be unloaded.
605 605 */
606 606 if ((spa = spa_inject_addref(name)) == NULL)
607 607 return (SET_ERROR(ENOENT));
608 608
609 609 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
610 610
611 611 handler->zi_spa = spa;
612 612 handler->zi_record = *record;
613 613
614 614 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
615 615 handler->zi_lanes = kmem_zalloc(
616 616 sizeof (*handler->zi_lanes) *
617 617 handler->zi_record.zi_nlanes, KM_SLEEP);
618 618 handler->zi_next_lane = 0;
619 619 } else {
620 620 handler->zi_lanes = NULL;
621 621 handler->zi_next_lane = 0;
622 622 }
623 623
624 624 rw_enter(&inject_lock, RW_WRITER);
625 625
626 626 /*
627 627 * We can't move this increment into the conditional
628 628 * above because we need to hold the RW_WRITER lock of
629 629 * inject_lock, and we don't want to hold that while
630 630 * allocating the handler's zi_lanes array.
631 631 */
632 632 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
633 633 ASSERT3S(inject_delay_count, >=, 0);
634 634 inject_delay_count++;
635 635 ASSERT3S(inject_delay_count, >, 0);
636 636 }
637 637
638 638 *id = handler->zi_id = inject_next_id++;
639 639 list_insert_tail(&inject_handlers, handler);
640 640 atomic_inc_32(&zio_injection_enabled);
641 641
642 642 rw_exit(&inject_lock);
643 643 }
644 644
645 645 /*
|
↓ open down ↓ |
645 lines elided |
↑ open up ↑ |
646 646 * Flush the ARC, so that any attempts to read this data will end up
647 647 * going to the ZIO layer. Note that this is a little overkill, but
648 648 * we don't have the necessary ARC interfaces to do anything else, and
649 649 * fault injection isn't a performance critical path.
650 650 */
651 651 if (flags & ZINJECT_FLUSH_ARC)
652 652 /*
653 653 * We must use FALSE to ensure arc_flush returns, since
654 654 * we're not preventing concurrent ARC insertions.
655 655 */
656 - arc_flush(NULL, FALSE);
656 + arc_flush(NULL, B_FALSE);
657 657
658 658 return (0);
659 659 }
660 660
661 661 /*
662 662 * Returns the next record with an ID greater than that supplied to the
663 663 * function. Used to iterate over all handlers in the system.
664 664 */
665 665 int
666 666 zio_inject_list_next(int *id, char *name, size_t buflen,
667 667 zinject_record_t *record)
668 668 {
669 669 inject_handler_t *handler;
670 670 int ret;
671 671
672 672 mutex_enter(&spa_namespace_lock);
673 673 rw_enter(&inject_lock, RW_READER);
674 674
675 675 for (handler = list_head(&inject_handlers); handler != NULL;
676 676 handler = list_next(&inject_handlers, handler))
677 677 if (handler->zi_id > *id)
678 678 break;
679 679
680 680 if (handler) {
681 681 *record = handler->zi_record;
682 682 *id = handler->zi_id;
683 683 (void) strncpy(name, spa_name(handler->zi_spa), buflen);
684 684 ret = 0;
685 685 } else {
686 686 ret = SET_ERROR(ENOENT);
687 687 }
688 688
689 689 rw_exit(&inject_lock);
690 690 mutex_exit(&spa_namespace_lock);
691 691
692 692 return (ret);
693 693 }
694 694
695 695 /*
696 696 * Clear the fault handler with the given identifier, or return ENOENT if none
697 697 * exists.
698 698 */
699 699 int
700 700 zio_clear_fault(int id)
701 701 {
702 702 inject_handler_t *handler;
703 703
704 704 rw_enter(&inject_lock, RW_WRITER);
705 705
706 706 for (handler = list_head(&inject_handlers); handler != NULL;
707 707 handler = list_next(&inject_handlers, handler))
708 708 if (handler->zi_id == id)
709 709 break;
710 710
711 711 if (handler == NULL) {
712 712 rw_exit(&inject_lock);
713 713 return (SET_ERROR(ENOENT));
714 714 }
715 715
716 716 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
717 717 ASSERT3S(inject_delay_count, >, 0);
718 718 inject_delay_count--;
719 719 ASSERT3S(inject_delay_count, >=, 0);
720 720 }
721 721
722 722 list_remove(&inject_handlers, handler);
723 723 rw_exit(&inject_lock);
724 724
725 725 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
726 726 ASSERT3P(handler->zi_lanes, !=, NULL);
727 727 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
728 728 handler->zi_record.zi_nlanes);
729 729 } else {
730 730 ASSERT3P(handler->zi_lanes, ==, NULL);
731 731 }
732 732
733 733 spa_inject_delref(handler->zi_spa);
734 734 kmem_free(handler, sizeof (inject_handler_t));
735 735 atomic_dec_32(&zio_injection_enabled);
736 736
737 737 return (0);
738 738 }
739 739
740 740 void
741 741 zio_inject_init(void)
742 742 {
743 743 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL);
744 744 mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL);
745 745 list_create(&inject_handlers, sizeof (inject_handler_t),
746 746 offsetof(inject_handler_t, zi_link));
747 747 }
748 748
749 749 void
750 750 zio_inject_fini(void)
751 751 {
752 752 list_destroy(&inject_handlers);
753 753 mutex_destroy(&inject_delay_mtx);
754 754 rw_destroy(&inject_lock);
755 755 }
|
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX