Print this page
usr/src/common/zfs/zprop_common.c
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/scsi/targets/sd.c
+++ new/usr/src/uts/common/io/scsi/targets/sd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 +
25 26 /*
27 + * Copyright 2011 cyril.galibern@opensvc.com
26 28 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 29 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
28 30 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
29 - * Copyright 2017 Nexenta Systems, Inc.
31 + * Copyright 2019 Nexenta Systems, Inc.
30 32 */
31 -/*
32 - * Copyright 2011 cyril.galibern@opensvc.com
33 - */
34 33
35 34 /*
36 35 * SCSI disk target driver.
37 36 */
38 -#include <sys/scsi/scsi.h>
37 +#include <sys/aio_req.h>
38 +#include <sys/byteorder.h>
39 +#include <sys/cdio.h>
40 +#include <sys/cmlb.h>
41 +#include <sys/debug.h>
39 42 #include <sys/dkbad.h>
40 -#include <sys/dklabel.h>
41 43 #include <sys/dkio.h>
42 -#include <sys/fdio.h>
43 -#include <sys/cdio.h>
44 -#include <sys/mhd.h>
45 -#include <sys/vtoc.h>
44 +#include <sys/dkioc_free_util.h>
45 +#include <sys/dklabel.h>
46 46 #include <sys/dktp/fdisk.h>
47 +#include <sys/efi_partition.h>
48 +#include <sys/fdio.h>
49 +#include <sys/fm/protocol.h>
50 +#include <sys/fs/dv_node.h>
47 51 #include <sys/kstat.h>
48 -#include <sys/vtrace.h>
49 -#include <sys/note.h>
50 -#include <sys/thread.h>
52 +#include <sys/mhd.h>
51 53 #include <sys/proc.h>
52 -#include <sys/efi_partition.h>
53 -#include <sys/var.h>
54 -#include <sys/aio_req.h>
55 -
56 -#ifdef __lock_lint
57 -#define _LP64
58 -#define __amd64
59 -#endif
60 -
61 -#if (defined(__fibre))
62 -/* Note: is there a leadville version of the following? */
63 -#include <sys/fc4/fcal_linkapp.h>
64 -#endif
54 +#include <sys/scsi/scsi.h>
55 +#include <sys/scsi/targets/sddef.h>
56 +#include <sys/sdt.h>
57 +#include <sys/sysevent/dev.h>
58 +#include <sys/sysevent/eventdefs.h>
65 59 #include <sys/taskq.h>
60 +#include <sys/thread.h>
66 61 #include <sys/uuid.h>
67 -#include <sys/byteorder.h>
68 -#include <sys/sdt.h>
62 +#include <sys/var.h>
63 +#include <sys/vtoc.h>
64 +#include <sys/vtrace.h>
69 65
70 66 #include "sd_xbuf.h"
71 67
72 -#include <sys/scsi/targets/sddef.h>
73 -#include <sys/cmlb.h>
74 -#include <sys/sysevent/eventdefs.h>
75 -#include <sys/sysevent/dev.h>
76 -
77 -#include <sys/fm/protocol.h>
78 -
79 -/*
80 - * Loadable module info.
81 - */
82 -#if (defined(__fibre))
83 -#define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver"
84 -#else /* !__fibre */
85 68 #define SD_MODULE_NAME "SCSI Disk Driver"
86 -#endif /* !__fibre */
69 +static char *sd_label = "sd";
87 70
88 71 /*
89 - * Define the interconnect type, to allow the driver to distinguish
90 - * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
91 - *
92 - * This is really for backward compatibility. In the future, the driver
93 - * should actually check the "interconnect-type" property as reported by
94 - * the HBA; however at present this property is not defined by all HBAs,
95 - * so we will use this #define (1) to permit the driver to run in
96 - * backward-compatibility mode; and (2) to print a notification message
97 - * if an FC HBA does not support the "interconnect-type" property. The
98 - * behavior of the driver will be to assume parallel SCSI behaviors unless
99 - * the "interconnect-type" property is defined by the HBA **AND** has a
100 - * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
101 - * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
102 - * Channel behaviors (as per the old ssd). (Note that the
103 - * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
104 - * will result in the driver assuming parallel SCSI behaviors.)
105 - *
106 - * (see common/sys/scsi/impl/services.h)
107 - *
108 - * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
109 - * since some FC HBAs may already support that, and there is some code in
110 - * the driver that already looks for it. Using INTERCONNECT_FABRIC as the
111 - * default would confuse that code, and besides things should work fine
112 - * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
113 - * "interconnect_type" property.
114 - *
115 - */
116 -#if (defined(__fibre))
117 -#define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE
118 -#else
119 -#define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
120 -#endif
121 -
122 -/*
123 - * The name of the driver, established from the module name in _init.
124 - */
125 -static char *sd_label = NULL;
126 -
127 -/*
128 - * Driver name is unfortunately prefixed on some driver.conf properties.
129 - */
130 -#if (defined(__fibre))
131 -#define sd_max_xfer_size ssd_max_xfer_size
132 -#define sd_config_list ssd_config_list
133 -static char *sd_max_xfer_size = "ssd_max_xfer_size";
134 -static char *sd_config_list = "ssd-config-list";
135 -#else
136 -static char *sd_max_xfer_size = "sd_max_xfer_size";
137 -static char *sd_config_list = "sd-config-list";
138 -#endif
139 -
140 -/*
141 72 * Driver global variables
142 73 */
143 74
144 -#if (defined(__fibre))
145 -/*
146 - * These #defines are to avoid namespace collisions that occur because this
147 - * code is currently used to compile two separate driver modules: sd and ssd.
148 - * All global variables need to be treated this way (even if declared static)
149 - * in order to allow the debugger to resolve the names properly.
150 - * It is anticipated that in the near future the ssd module will be obsoleted,
151 - * at which time this namespace issue should go away.
152 - */
153 -#define sd_state ssd_state
154 -#define sd_io_time ssd_io_time
155 -#define sd_failfast_enable ssd_failfast_enable
156 -#define sd_ua_retry_count ssd_ua_retry_count
157 -#define sd_report_pfa ssd_report_pfa
158 -#define sd_max_throttle ssd_max_throttle
159 -#define sd_min_throttle ssd_min_throttle
160 -#define sd_rot_delay ssd_rot_delay
161 -
162 -#define sd_retry_on_reservation_conflict \
163 - ssd_retry_on_reservation_conflict
164 -#define sd_reinstate_resv_delay ssd_reinstate_resv_delay
165 -#define sd_resv_conflict_name ssd_resv_conflict_name
166 -
167 -#define sd_component_mask ssd_component_mask
168 -#define sd_level_mask ssd_level_mask
169 -#define sd_debug_un ssd_debug_un
170 -#define sd_error_level ssd_error_level
171 -
172 -#define sd_xbuf_active_limit ssd_xbuf_active_limit
173 -#define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit
174 -
175 -#define sd_tr ssd_tr
176 -#define sd_reset_throttle_timeout ssd_reset_throttle_timeout
177 -#define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout
178 -#define sd_qfull_throttle_enable ssd_qfull_throttle_enable
179 -#define sd_check_media_time ssd_check_media_time
180 -#define sd_wait_cmds_complete ssd_wait_cmds_complete
181 -#define sd_label_mutex ssd_label_mutex
182 -#define sd_detach_mutex ssd_detach_mutex
183 -#define sd_log_buf ssd_log_buf
184 -#define sd_log_mutex ssd_log_mutex
185 -
186 -#define sd_disk_table ssd_disk_table
187 -#define sd_disk_table_size ssd_disk_table_size
188 -#define sd_sense_mutex ssd_sense_mutex
189 -#define sd_cdbtab ssd_cdbtab
190 -
191 -#define sd_cb_ops ssd_cb_ops
192 -#define sd_ops ssd_ops
193 -#define sd_additional_codes ssd_additional_codes
194 -#define sd_tgops ssd_tgops
195 -
196 -#define sd_minor_data ssd_minor_data
197 -#define sd_minor_data_efi ssd_minor_data_efi
198 -
199 -#define sd_tq ssd_tq
200 -#define sd_wmr_tq ssd_wmr_tq
201 -#define sd_taskq_name ssd_taskq_name
202 -#define sd_wmr_taskq_name ssd_wmr_taskq_name
203 -#define sd_taskq_minalloc ssd_taskq_minalloc
204 -#define sd_taskq_maxalloc ssd_taskq_maxalloc
205 -
206 -#define sd_dump_format_string ssd_dump_format_string
207 -
208 -#define sd_iostart_chain ssd_iostart_chain
209 -#define sd_iodone_chain ssd_iodone_chain
210 -
211 -#define sd_pm_idletime ssd_pm_idletime
212 -
213 -#define sd_force_pm_supported ssd_force_pm_supported
214 -
215 -#define sd_dtype_optical_bind ssd_dtype_optical_bind
216 -
217 -#define sd_ssc_init ssd_ssc_init
218 -#define sd_ssc_send ssd_ssc_send
219 -#define sd_ssc_fini ssd_ssc_fini
220 -#define sd_ssc_assessment ssd_ssc_assessment
221 -#define sd_ssc_post ssd_ssc_post
222 -#define sd_ssc_print ssd_ssc_print
223 -#define sd_ssc_ereport_post ssd_ssc_ereport_post
224 -#define sd_ssc_set_info ssd_ssc_set_info
225 -#define sd_ssc_extract_info ssd_ssc_extract_info
226 -
227 -#endif
228 -
229 75 #ifdef SDDEBUG
230 76 int sd_force_pm_supported = 0;
231 77 #endif /* SDDEBUG */
232 78
233 79 void *sd_state = NULL;
234 80 int sd_io_time = SD_IO_TIME;
235 -int sd_failfast_enable = 1;
236 81 int sd_ua_retry_count = SD_UA_RETRY_COUNT;
237 82 int sd_report_pfa = 1;
238 83 int sd_max_throttle = SD_MAX_THROTTLE;
239 84 int sd_min_throttle = SD_MIN_THROTTLE;
240 85 int sd_rot_delay = 4; /* Default 4ms Rotation delay */
241 86 int sd_qfull_throttle_enable = TRUE;
242 87
243 88 int sd_retry_on_reservation_conflict = 1;
244 89 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
245 -_NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
90 +int sd_enable_lun_reset = FALSE;
246 91
247 -static int sd_dtype_optical_bind = -1;
92 +/*
93 + * Default safe I/O delay threshold of 30s for all devices.
94 + * Can be overriden for vendor/device id in sd.conf
95 + */
96 +hrtime_t sd_slow_io_threshold = 30LL * NANOSEC;
248 97
249 -/* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
250 -static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict";
251 -
252 98 /*
253 99 * Global data for debug logging. To enable debug printing, sd_component_mask
254 100 * and sd_level_mask should be set to the desired bit patterns as outlined in
255 101 * sddef.h.
256 102 */
257 103 uint_t sd_component_mask = 0x0;
258 104 uint_t sd_level_mask = 0x0;
259 105 struct sd_lun *sd_debug_un = NULL;
260 106 uint_t sd_error_level = SCSI_ERR_RETRYABLE;
261 107
262 108 /* Note: these may go away in the future... */
263 109 static uint32_t sd_xbuf_active_limit = 512;
264 110 static uint32_t sd_xbuf_reserve_limit = 16;
265 111
266 112 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 };
267 113
268 114 /*
269 115 * Timer value used to reset the throttle after it has been reduced
270 116 * (typically in response to TRAN_BUSY or STATUS_QFULL)
271 117 */
272 118 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT;
273 119 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT;
274 120
275 121 /*
276 122 * Interval value associated with the media change scsi watch.
277 123 */
278 124 static int sd_check_media_time = 3000000;
279 125
280 126 /*
281 127 * Wait value used for in progress operations during a DDI_SUSPEND
282 128 */
283 129 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE;
284 130
285 131 /*
286 132 * sd_label_mutex protects a static buffer used in the disk label
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
287 133 * component of the driver
288 134 */
289 135 static kmutex_t sd_label_mutex;
290 136
291 137 /*
292 138 * sd_detach_mutex protects un_layer_count, un_detach_count, and
293 139 * un_opens_in_progress in the sd_lun structure.
294 140 */
295 141 static kmutex_t sd_detach_mutex;
296 142
297 -_NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex,
298 - sd_lun::{un_layer_count un_detach_count un_opens_in_progress}))
299 -
300 143 /*
301 144 * Global buffer and mutex for debug logging
302 145 */
303 146 static char sd_log_buf[1024];
304 147 static kmutex_t sd_log_mutex;
305 148
306 149 /*
307 150 * Structs and globals for recording attached lun information.
308 151 * This maintains a chain. Each node in the chain represents a SCSI controller.
309 152 * The structure records the number of luns attached to each target connected
310 153 * with the controller.
311 154 * For parallel scsi device only.
312 155 */
313 156 struct sd_scsi_hba_tgt_lun {
314 157 struct sd_scsi_hba_tgt_lun *next;
315 158 dev_info_t *pdip;
316 159 int nlun[NTARGETS_WIDE];
317 160 };
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
318 161
319 162 /*
320 163 * Flag to indicate the lun is attached or detached
321 164 */
322 165 #define SD_SCSI_LUN_ATTACH 0
323 166 #define SD_SCSI_LUN_DETACH 1
324 167
325 168 static kmutex_t sd_scsi_target_lun_mutex;
326 169 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL;
327 170
328 -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
329 - sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
330 -
331 -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
332 - sd_scsi_target_lun_head))
333 -
334 171 /*
335 172 * "Smart" Probe Caching structs, globals, #defines, etc.
336 173 * For parallel scsi and non-self-identify device only.
337 174 */
338 175
339 176 /*
340 177 * The following resources and routines are implemented to support
341 178 * "smart" probing, which caches the scsi_probe() results in an array,
342 179 * in order to help avoid long probe times.
343 180 */
344 181 struct sd_scsi_probe_cache {
345 182 struct sd_scsi_probe_cache *next;
346 183 dev_info_t *pdip;
347 184 int cache[NTARGETS_WIDE];
348 185 };
349 186
350 187 static kmutex_t sd_scsi_probe_cache_mutex;
351 188 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
352 189
353 190 /*
354 - * Really we only need protection on the head of the linked list, but
355 - * better safe than sorry.
191 + * Create taskq for all targets in the system. This is created at
192 + * _init(9E) and destroyed at _fini(9E).
193 + *
194 + * Note: here we set the minalloc to a reasonably high number to ensure that
195 + * we will have an adequate supply of task entries available at interrupt time.
196 + * This is used in conjunction with the TASKQ_PREPOPULATE flag in
197 + * sd_create_taskq(). Since we do not want to sleep for allocations at
198 + * interrupt time, set maxalloc equal to minalloc. That way we will just fail
199 + * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
200 + * requests any one instant in time.
356 201 */
357 -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
358 - sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
202 +#define SD_TASKQ_NUMTHREADS 8
203 +#define SD_TASKQ_MINALLOC 256
204 +#define SD_TASKQ_MAXALLOC 256
359 205
360 -_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
361 - sd_scsi_probe_cache_head))
206 +static taskq_t *sd_tq = NULL;
362 207
208 +static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
209 +static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
210 +
211 +#define SD_BAIL_CHECK(a) if ((a)->un_detach_count != 0) { \
212 + mutex_exit(SD_MUTEX((a))); \
213 + return (ENXIO); \
214 + }
363 215 /*
216 + * The following task queue is being created for the write part of
217 + * read-modify-write of non-512 block size devices.
218 + * Limit the number of threads to 1 for now. This number has been chosen
219 + * considering the fact that it applies only to dvd ram drives/MO drives
220 + * currently. Performance for which is not main criteria at this stage.
221 + * Note: It needs to be explored if we can use a single taskq in future
222 + */
223 +#define SD_WMR_TASKQ_NUMTHREADS 1
224 +static taskq_t *sd_wmr_tq = NULL;
225 +
226 +/*
364 227 * Power attribute table
365 228 */
366 229 static sd_power_attr_ss sd_pwr_ss = {
367 230 { "NAME=spindle-motor", "0=off", "1=on", NULL },
368 231 {0, 100},
369 232 {30, 0},
370 233 {20000, 0}
371 234 };
372 235
373 236 static sd_power_attr_pc sd_pwr_pc = {
374 237 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
375 238 "3=active", NULL },
376 239 {0, 0, 0, 100},
377 240 {90, 90, 20, 0},
378 241 {15000, 15000, 1000, 0}
379 242 };
380 243
381 244 /*
382 245 * Power level to power condition
383 246 */
384 247 static int sd_pl2pc[] = {
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
385 248 SD_TARGET_START_VALID,
386 249 SD_TARGET_STANDBY,
387 250 SD_TARGET_IDLE,
388 251 SD_TARGET_ACTIVE
389 252 };
390 253
391 254 /*
392 255 * Vendor specific data name property declarations
393 256 */
394 257
395 -#if defined(__fibre) || defined(__i386) ||defined(__amd64)
396 -
397 258 static sd_tunables seagate_properties = {
398 259 SEAGATE_THROTTLE_VALUE,
399 260 0,
400 261 0,
401 262 0,
402 263 0,
403 264 0,
404 265 0,
405 266 0,
406 267 0
407 268 };
408 269
409 -
410 270 static sd_tunables fujitsu_properties = {
411 271 FUJITSU_THROTTLE_VALUE,
412 272 0,
413 273 0,
414 274 0,
415 275 0,
416 276 0,
417 277 0,
418 278 0,
419 279 0
420 280 };
421 281
422 282 static sd_tunables ibm_properties = {
423 283 IBM_THROTTLE_VALUE,
424 284 0,
425 285 0,
426 286 0,
427 287 0,
428 288 0,
429 289 0,
430 290 0,
431 291 0
432 292 };
433 293
434 294 static sd_tunables purple_properties = {
435 295 PURPLE_THROTTLE_VALUE,
436 296 0,
437 297 0,
438 298 PURPLE_BUSY_RETRIES,
439 299 PURPLE_RESET_RETRY_COUNT,
440 300 PURPLE_RESERVE_RELEASE_TIME,
441 301 0,
442 302 0,
443 303 0
444 304 };
445 305
446 306 static sd_tunables sve_properties = {
447 307 SVE_THROTTLE_VALUE,
448 308 0,
449 309 0,
450 310 SVE_BUSY_RETRIES,
451 311 SVE_RESET_RETRY_COUNT,
452 312 SVE_RESERVE_RELEASE_TIME,
453 313 SVE_MIN_THROTTLE_VALUE,
454 314 SVE_DISKSORT_DISABLED_FLAG,
455 315 0
456 316 };
457 317
458 318 static sd_tunables maserati_properties = {
459 319 0,
460 320 0,
461 321 0,
462 322 0,
463 323 0,
464 324 0,
465 325 0,
466 326 MASERATI_DISKSORT_DISABLED_FLAG,
467 327 MASERATI_LUN_RESET_ENABLED_FLAG
468 328 };
469 329
470 330 static sd_tunables pirus_properties = {
471 331 PIRUS_THROTTLE_VALUE,
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
472 332 0,
473 333 PIRUS_NRR_COUNT,
474 334 PIRUS_BUSY_RETRIES,
475 335 PIRUS_RESET_RETRY_COUNT,
476 336 0,
477 337 PIRUS_MIN_THROTTLE_VALUE,
478 338 PIRUS_DISKSORT_DISABLED_FLAG,
479 339 PIRUS_LUN_RESET_ENABLED_FLAG
480 340 };
481 341
482 -#endif
483 -
484 -#if (defined(__sparc) && !defined(__fibre)) || \
485 - (defined(__i386) || defined(__amd64))
486 -
487 -
488 342 static sd_tunables elite_properties = {
489 343 ELITE_THROTTLE_VALUE,
490 344 0,
491 345 0,
492 346 0,
493 347 0,
494 348 0,
495 349 0,
496 350 0,
497 351 0
498 352 };
499 353
500 354 static sd_tunables st31200n_properties = {
501 355 ST31200N_THROTTLE_VALUE,
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
502 356 0,
503 357 0,
504 358 0,
505 359 0,
506 360 0,
507 361 0,
508 362 0,
509 363 0
510 364 };
511 365
512 -#endif /* Fibre or not */
513 -
514 366 static sd_tunables lsi_properties_scsi = {
515 367 LSI_THROTTLE_VALUE,
516 368 0,
517 369 LSI_NOTREADY_RETRIES,
518 370 0,
519 371 0,
520 372 0,
521 373 0,
522 374 0,
523 375 0
524 376 };
525 377
526 378 static sd_tunables symbios_properties = {
527 379 SYMBIOS_THROTTLE_VALUE,
528 380 0,
529 381 SYMBIOS_NOTREADY_RETRIES,
530 382 0,
531 383 0,
532 384 0,
533 385 0,
534 386 0,
535 387 0
536 388 };
537 389
538 390 static sd_tunables lsi_properties = {
539 391 0,
540 392 0,
541 393 LSI_NOTREADY_RETRIES,
542 394 0,
543 395 0,
544 396 0,
545 397 0,
546 398 0,
547 399 0
548 400 };
549 401
550 402 static sd_tunables lsi_oem_properties = {
551 403 0,
552 404 0,
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
553 405 LSI_OEM_NOTREADY_RETRIES,
554 406 0,
555 407 0,
556 408 0,
557 409 0,
558 410 0,
559 411 0,
560 412 1
561 413 };
562 414
563 -
564 -
565 415 #if (defined(SD_PROP_TST))
566 -
567 416 #define SD_TST_CTYPE_VAL CTYPE_CDROM
568 417 #define SD_TST_THROTTLE_VAL 16
569 418 #define SD_TST_NOTREADY_VAL 12
570 419 #define SD_TST_BUSY_VAL 60
571 420 #define SD_TST_RST_RETRY_VAL 36
572 421 #define SD_TST_RSV_REL_TIME 60
573 -
574 422 static sd_tunables tst_properties = {
575 423 SD_TST_THROTTLE_VAL,
576 424 SD_TST_CTYPE_VAL,
577 425 SD_TST_NOTREADY_VAL,
578 426 SD_TST_BUSY_VAL,
579 427 SD_TST_RST_RETRY_VAL,
580 428 SD_TST_RSV_REL_TIME,
581 429 0,
582 430 0,
583 431 0
584 432 };
585 433 #endif
586 434
587 435 /* This is similar to the ANSI toupper implementation */
588 436 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
589 437
590 438 /*
591 439 * Static Driver Configuration Table
592 440 *
593 441 * This is the table of disks which need throttle adjustment (or, perhaps
594 442 * something else as defined by the flags at a future time.) device_id
595 443 * is a string consisting of concatenated vid (vendor), pid (product/model)
596 444 * and revision strings as defined in the scsi_inquiry structure. Offsets of
597 445 * the parts of the string are as defined by the sizes in the scsi_inquiry
598 446 * structure. Device type is searched as far as the device_id string is
599 447 * defined. Flags defines which values are to be set in the driver from the
600 448 * properties list.
601 449 *
602 450 * Entries below which begin and end with a "*" are a special case.
603 451 * These do not have a specific vendor, and the string which follows
604 452 * can appear anywhere in the 16 byte PID portion of the inquiry data.
605 453 *
606 454 * Entries below which begin and end with a " " (blank) are a special
607 455 * case. The comparison function will treat multiple consecutive blanks
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
608 456 * as equivalent to a single blank. For example, this causes a
609 457 * sd_disk_table entry of " NEC CDROM " to match a device's id string
610 458 * of "NEC CDROM".
611 459 *
612 460 * Note: The MD21 controller type has been obsoleted.
613 461 * ST318202F is a Legacy device
614 462 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
615 463 * made with an FC connection. The entries here are a legacy.
616 464 */
617 465 static sd_disk_config_t sd_disk_table[] = {
618 -#if defined(__fibre) || defined(__i386) || defined(__amd64)
619 466 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
620 467 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
621 468 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
622 469 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
623 470 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
624 471 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
625 472 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
626 473 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
627 474 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
628 475 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
629 476 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties },
630 477 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties },
631 478 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties },
632 479 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties },
633 480 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
634 481 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
635 482 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
636 483 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
637 484 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
638 485 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
639 486 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
640 487 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
641 488 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
642 489 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties },
643 490 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties },
644 491 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties },
645 492 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties },
646 493 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
647 494 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
648 495 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
649 496 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
650 497 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
651 498 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
652 499 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
653 500 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
654 501 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
655 502 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
656 503 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
657 504 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
658 505 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
659 506 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
660 507 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
661 508 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
662 509 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
663 510 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
664 511 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
665 512 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
666 513 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
667 514 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
668 - { "*CSM100_*", SD_CONF_BSET_NRR_COUNT |
669 - SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
670 - { "*CSM200_*", SD_CONF_BSET_NRR_COUNT |
671 - SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
672 - { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
515 + { "*CSM100_*", SD_CONF_BSET_NRR_COUNT|
516 + SD_CONF_BSET_CACHE_IS_NV,
517 + &lsi_oem_properties },
518 + { "*CSM200_*", SD_CONF_BSET_NRR_COUNT|
519 + SD_CONF_BSET_CACHE_IS_NV,
520 + &lsi_oem_properties },
521 + { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
673 522 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties },
674 - { "SUN T3", SD_CONF_BSET_THROTTLE |
675 - SD_CONF_BSET_BSY_RETRY_COUNT|
676 - SD_CONF_BSET_RST_RETRIES|
677 - SD_CONF_BSET_RSV_REL_TIME,
678 - &purple_properties },
679 - { "SUN SESS01", SD_CONF_BSET_THROTTLE |
680 - SD_CONF_BSET_BSY_RETRY_COUNT|
681 - SD_CONF_BSET_RST_RETRIES|
682 - SD_CONF_BSET_RSV_REL_TIME|
683 - SD_CONF_BSET_MIN_THROTTLE|
684 - SD_CONF_BSET_DISKSORT_DISABLED,
685 - &sve_properties },
686 - { "SUN T4", SD_CONF_BSET_THROTTLE |
687 - SD_CONF_BSET_BSY_RETRY_COUNT|
688 - SD_CONF_BSET_RST_RETRIES|
689 - SD_CONF_BSET_RSV_REL_TIME,
690 - &purple_properties },
691 - { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
692 - SD_CONF_BSET_LUN_RESET_ENABLED,
693 - &maserati_properties },
694 - { "SUN SE6920", SD_CONF_BSET_THROTTLE |
695 - SD_CONF_BSET_NRR_COUNT|
696 - SD_CONF_BSET_BSY_RETRY_COUNT|
697 - SD_CONF_BSET_RST_RETRIES|
698 - SD_CONF_BSET_MIN_THROTTLE|
699 - SD_CONF_BSET_DISKSORT_DISABLED|
700 - SD_CONF_BSET_LUN_RESET_ENABLED,
701 - &pirus_properties },
702 - { "SUN SE6940", SD_CONF_BSET_THROTTLE |
703 - SD_CONF_BSET_NRR_COUNT|
704 - SD_CONF_BSET_BSY_RETRY_COUNT|
705 - SD_CONF_BSET_RST_RETRIES|
706 - SD_CONF_BSET_MIN_THROTTLE|
707 - SD_CONF_BSET_DISKSORT_DISABLED|
708 - SD_CONF_BSET_LUN_RESET_ENABLED,
709 - &pirus_properties },
710 - { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE |
711 - SD_CONF_BSET_NRR_COUNT|
712 - SD_CONF_BSET_BSY_RETRY_COUNT|
713 - SD_CONF_BSET_RST_RETRIES|
714 - SD_CONF_BSET_MIN_THROTTLE|
715 - SD_CONF_BSET_DISKSORT_DISABLED|
716 - SD_CONF_BSET_LUN_RESET_ENABLED,
717 - &pirus_properties },
718 - { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE |
719 - SD_CONF_BSET_NRR_COUNT|
720 - SD_CONF_BSET_BSY_RETRY_COUNT|
721 - SD_CONF_BSET_RST_RETRIES|
722 - SD_CONF_BSET_MIN_THROTTLE|
723 - SD_CONF_BSET_DISKSORT_DISABLED|
724 - SD_CONF_BSET_LUN_RESET_ENABLED,
725 - &pirus_properties },
726 - { "SUN PSX1000", SD_CONF_BSET_THROTTLE |
727 - SD_CONF_BSET_NRR_COUNT|
728 - SD_CONF_BSET_BSY_RETRY_COUNT|
729 - SD_CONF_BSET_RST_RETRIES|
730 - SD_CONF_BSET_MIN_THROTTLE|
731 - SD_CONF_BSET_DISKSORT_DISABLED|
732 - SD_CONF_BSET_LUN_RESET_ENABLED,
733 - &pirus_properties },
734 - { "SUN SE6330", SD_CONF_BSET_THROTTLE |
735 - SD_CONF_BSET_NRR_COUNT|
736 - SD_CONF_BSET_BSY_RETRY_COUNT|
737 - SD_CONF_BSET_RST_RETRIES|
738 - SD_CONF_BSET_MIN_THROTTLE|
739 - SD_CONF_BSET_DISKSORT_DISABLED|
740 - SD_CONF_BSET_LUN_RESET_ENABLED,
741 - &pirus_properties },
523 + { "SUN T3", SD_CONF_BSET_THROTTLE|
524 + SD_CONF_BSET_BSY_RETRY_COUNT|
525 + SD_CONF_BSET_RST_RETRIES|
526 + SD_CONF_BSET_RSV_REL_TIME,
527 + &purple_properties },
528 + { "SUN SESS01", SD_CONF_BSET_THROTTLE|
529 + SD_CONF_BSET_BSY_RETRY_COUNT|
530 + SD_CONF_BSET_RST_RETRIES|
531 + SD_CONF_BSET_RSV_REL_TIME|
532 + SD_CONF_BSET_MIN_THROTTLE|
533 + SD_CONF_BSET_DISKSORT_DISABLED,
534 + &sve_properties },
535 + { "SUN T4", SD_CONF_BSET_THROTTLE|
536 + SD_CONF_BSET_BSY_RETRY_COUNT|
537 + SD_CONF_BSET_RST_RETRIES|
538 + SD_CONF_BSET_RSV_REL_TIME,
539 + &purple_properties },
540 + { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED|
541 + SD_CONF_BSET_LUN_RESET_ENABLED,
542 + &maserati_properties },
543 + { "SUN SE6920", SD_CONF_BSET_THROTTLE|
544 + SD_CONF_BSET_NRR_COUNT|
545 + SD_CONF_BSET_BSY_RETRY_COUNT|
546 + SD_CONF_BSET_RST_RETRIES|
547 + SD_CONF_BSET_MIN_THROTTLE|
548 + SD_CONF_BSET_DISKSORT_DISABLED|
549 + SD_CONF_BSET_LUN_RESET_ENABLED,
550 + &pirus_properties },
551 + { "SUN SE6940", SD_CONF_BSET_THROTTLE|
552 + SD_CONF_BSET_NRR_COUNT|
553 + SD_CONF_BSET_BSY_RETRY_COUNT|
554 + SD_CONF_BSET_RST_RETRIES|
555 + SD_CONF_BSET_MIN_THROTTLE|
556 + SD_CONF_BSET_DISKSORT_DISABLED|
557 + SD_CONF_BSET_LUN_RESET_ENABLED,
558 + &pirus_properties },
559 + { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE|
560 + SD_CONF_BSET_NRR_COUNT|
561 + SD_CONF_BSET_BSY_RETRY_COUNT|
562 + SD_CONF_BSET_RST_RETRIES|
563 + SD_CONF_BSET_MIN_THROTTLE|
564 + SD_CONF_BSET_DISKSORT_DISABLED|
565 + SD_CONF_BSET_LUN_RESET_ENABLED,
566 + &pirus_properties },
567 + { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE|
568 + SD_CONF_BSET_NRR_COUNT|
569 + SD_CONF_BSET_BSY_RETRY_COUNT|
570 + SD_CONF_BSET_RST_RETRIES|
571 + SD_CONF_BSET_MIN_THROTTLE|
572 + SD_CONF_BSET_DISKSORT_DISABLED|
573 + SD_CONF_BSET_LUN_RESET_ENABLED,
574 + &pirus_properties },
575 + { "SUN PSX1000", SD_CONF_BSET_THROTTLE|
576 + SD_CONF_BSET_NRR_COUNT|
577 + SD_CONF_BSET_BSY_RETRY_COUNT|
578 + SD_CONF_BSET_RST_RETRIES|
579 + SD_CONF_BSET_MIN_THROTTLE|
580 + SD_CONF_BSET_DISKSORT_DISABLED|
581 + SD_CONF_BSET_LUN_RESET_ENABLED,
582 + &pirus_properties },
583 + { "SUN SE6330", SD_CONF_BSET_THROTTLE|
584 + SD_CONF_BSET_NRR_COUNT|
585 + SD_CONF_BSET_BSY_RETRY_COUNT|
586 + SD_CONF_BSET_RST_RETRIES|
587 + SD_CONF_BSET_MIN_THROTTLE|
588 + SD_CONF_BSET_DISKSORT_DISABLED|
589 + SD_CONF_BSET_LUN_RESET_ENABLED,
590 + &pirus_properties },
742 591 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
743 - { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
592 + { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
744 593 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
745 594 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
746 595 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
747 596 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
748 - { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
749 -#endif /* fibre or NON-sparc platforms */
750 -#if ((defined(__sparc) && !defined(__fibre)) ||\
751 - (defined(__i386) || defined(__amd64)))
752 - { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
753 - { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
754 - { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
755 - { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
756 - { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
757 - { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
758 - { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
759 - { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
760 - { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
761 - { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
762 - { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
763 - { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL },
764 - { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
765 - &symbios_properties },
766 - { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
767 - &lsi_properties_scsi },
768 -#if defined(__i386) || defined(__amd64)
769 - { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
770 - | SD_CONF_BSET_READSUB_BCD
771 - | SD_CONF_BSET_READ_TOC_ADDR_BCD
772 - | SD_CONF_BSET_NO_READ_HEADER
773 - | SD_CONF_BSET_READ_CD_XD4), NULL },
774 -
775 - { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
776 - | SD_CONF_BSET_READSUB_BCD
777 - | SD_CONF_BSET_READ_TOC_ADDR_BCD
778 - | SD_CONF_BSET_NO_READ_HEADER
779 - | SD_CONF_BSET_READ_CD_XD4), NULL },
780 -#endif /* __i386 || __amd64 */
781 -#endif /* sparc NON-fibre or NON-sparc platforms */
782 -
597 + { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
598 + { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
599 + { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
600 + { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
601 + { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
602 + { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
603 + { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
604 + { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
605 + { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
606 + { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
607 + { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
608 + { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
609 + { "SYMBIOS INF-01-00", SD_CONF_BSET_FAB_DEVID, NULL },
610 + { "SYMBIOS", SD_CONF_BSET_THROTTLE|
611 + SD_CONF_BSET_NRR_COUNT,
612 + &symbios_properties },
613 + { "LSI", SD_CONF_BSET_THROTTLE|
614 + SD_CONF_BSET_NRR_COUNT,
615 + &lsi_properties_scsi },
616 + { " NEC CD-ROM DRIVE:260 ", SD_CONF_BSET_PLAYMSF_BCD|
617 + SD_CONF_BSET_READSUB_BCD|
618 + SD_CONF_BSET_READ_TOC_ADDR_BCD|
619 + SD_CONF_BSET_NO_READ_HEADER|
620 + SD_CONF_BSET_READ_CD_XD4,
621 + NULL },
622 + { " NEC CD-ROM DRIVE:270 ", SD_CONF_BSET_PLAYMSF_BCD|
623 + SD_CONF_BSET_READSUB_BCD|
624 + SD_CONF_BSET_READ_TOC_ADDR_BCD|
625 + SD_CONF_BSET_NO_READ_HEADER|
626 + SD_CONF_BSET_READ_CD_XD4,
627 + NULL },
783 628 #if (defined(SD_PROP_TST))
784 - { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
785 - | SD_CONF_BSET_CTYPE
786 - | SD_CONF_BSET_NRR_COUNT
787 - | SD_CONF_BSET_FAB_DEVID
788 - | SD_CONF_BSET_NOCACHE
789 - | SD_CONF_BSET_BSY_RETRY_COUNT
790 - | SD_CONF_BSET_PLAYMSF_BCD
791 - | SD_CONF_BSET_READSUB_BCD
792 - | SD_CONF_BSET_READ_TOC_TRK_BCD
793 - | SD_CONF_BSET_READ_TOC_ADDR_BCD
794 - | SD_CONF_BSET_NO_READ_HEADER
795 - | SD_CONF_BSET_READ_CD_XD4
796 - | SD_CONF_BSET_RST_RETRIES
797 - | SD_CONF_BSET_RSV_REL_TIME
798 - | SD_CONF_BSET_TUR_CHECK), &tst_properties},
629 + { "VENDOR PRODUCT ", SD_CONF_BSET_THROTTLE|
630 + SD_CONF_BSET_CTYPE|
631 + SD_CONF_BSET_NRR_COUNT|
632 + SD_CONF_BSET_FAB_DEVID|
633 + SD_CONF_BSET_NOCACHE|
634 + SD_CONF_BSET_BSY_RETRY_COUNT|
635 + SD_CONF_BSET_PLAYMSF_BCD|
636 + SD_CONF_BSET_READSUB_BCD|
637 + SD_CONF_BSET_READ_TOC_TRK_BCD|
638 + SD_CONF_BSET_READ_TOC_ADDR_BCD|
639 + SD_CONF_BSET_NO_READ_HEADER|
640 + SD_CONF_BSET_READ_CD_XD4|
641 + SD_CONF_BSET_RST_RETRIES|
642 + SD_CONF_BSET_RSV_REL_TIME|
643 + SD_CONF_BSET_TUR_CHECK,
644 + &tst_properties},
799 645 #endif
800 646 };
801 647
802 648 static const int sd_disk_table_size =
803 649 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
804 650
805 651 /*
806 652 * Emulation mode disk drive VID/PID table
807 653 */
808 654 static char sd_flash_dev_table[][25] = {
809 655 "ATA MARVELL SD88SA02",
810 656 "MARVELL SD88SA02",
811 657 "TOSHIBA THNSNV05",
812 658 };
813 659
814 660 static const int sd_flash_dev_table_size =
815 661 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]);
816 662
817 663 #define SD_INTERCONNECT_PARALLEL 0
818 664 #define SD_INTERCONNECT_FABRIC 1
819 665 #define SD_INTERCONNECT_FIBRE 2
820 666 #define SD_INTERCONNECT_SSA 3
821 667 #define SD_INTERCONNECT_SATA 4
822 668 #define SD_INTERCONNECT_SAS 5
823 669
824 670 #define SD_IS_PARALLEL_SCSI(un) \
825 671 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
826 672 #define SD_IS_SERIAL(un) \
827 673 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
828 674 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
829 675
830 676 /*
831 677 * Definitions used by device id registration routines
832 678 */
833 679 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
834 680 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */
835 681 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
836 682
837 683 static kmutex_t sd_sense_mutex = {0};
838 684
839 685 /*
840 686 * Macros for updates of the driver state
841 687 */
842 688 #define New_state(un, s) \
843 689 (un)->un_last_state = (un)->un_state, (un)->un_state = (s)
844 690 #define Restore_state(un) \
845 691 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
846 692
847 693 static struct sd_cdbinfo sd_cdbtab[] = {
848 694 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, },
849 695 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, },
850 696 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, },
851 697 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
852 698 };
|
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
853 699
854 700 /*
855 701 * Specifies the number of seconds that must have elapsed since the last
856 702 * cmd. has completed for a device to be declared idle to the PM framework.
857 703 */
858 704 static int sd_pm_idletime = 1;
859 705
860 706 /*
861 707 * Internal function prototypes
862 708 */
709 +typedef struct unmap_param_hdr_s {
710 + uint16_t uph_data_len;
711 + uint16_t uph_descr_data_len;
712 + uint32_t uph_reserved;
713 +} unmap_param_hdr_t;
863 714
864 -#if (defined(__fibre))
865 -/*
866 - * These #defines are to avoid namespace collisions that occur because this
867 - * code is currently used to compile two separate driver modules: sd and ssd.
868 - * All function names need to be treated this way (even if declared static)
869 - * in order to allow the debugger to resolve the names properly.
870 - * It is anticipated that in the near future the ssd module will be obsoleted,
871 - * at which time this ugliness should go away.
872 - */
873 -#define sd_log_trace ssd_log_trace
874 -#define sd_log_info ssd_log_info
875 -#define sd_log_err ssd_log_err
876 -#define sdprobe ssdprobe
877 -#define sdinfo ssdinfo
878 -#define sd_prop_op ssd_prop_op
879 -#define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init
880 -#define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini
881 -#define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache
882 -#define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache
883 -#define sd_scsi_target_lun_init ssd_scsi_target_lun_init
884 -#define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini
885 -#define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count
886 -#define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target
887 -#define sd_spin_up_unit ssd_spin_up_unit
888 -#define sd_enable_descr_sense ssd_enable_descr_sense
889 -#define sd_reenable_dsense_task ssd_reenable_dsense_task
890 -#define sd_set_mmc_caps ssd_set_mmc_caps
891 -#define sd_read_unit_properties ssd_read_unit_properties
892 -#define sd_process_sdconf_file ssd_process_sdconf_file
893 -#define sd_process_sdconf_table ssd_process_sdconf_table
894 -#define sd_sdconf_id_match ssd_sdconf_id_match
895 -#define sd_blank_cmp ssd_blank_cmp
896 -#define sd_chk_vers1_data ssd_chk_vers1_data
897 -#define sd_set_vers1_properties ssd_set_vers1_properties
898 -#define sd_check_bdc_vpd ssd_check_bdc_vpd
899 -#define sd_check_emulation_mode ssd_check_emulation_mode
715 +typedef struct unmap_blk_descr_s {
716 + uint64_t ubd_lba;
717 + uint32_t ubd_lba_cnt;
718 + uint32_t ubd_reserved;
719 +} unmap_blk_descr_t;
900 720
901 -#define sd_get_physical_geometry ssd_get_physical_geometry
902 -#define sd_get_virtual_geometry ssd_get_virtual_geometry
903 -#define sd_update_block_info ssd_update_block_info
904 -#define sd_register_devid ssd_register_devid
905 -#define sd_get_devid ssd_get_devid
906 -#define sd_create_devid ssd_create_devid
907 -#define sd_write_deviceid ssd_write_deviceid
908 -#define sd_check_vpd_page_support ssd_check_vpd_page_support
909 -#define sd_setup_pm ssd_setup_pm
910 -#define sd_create_pm_components ssd_create_pm_components
911 -#define sd_ddi_suspend ssd_ddi_suspend
912 -#define sd_ddi_resume ssd_ddi_resume
913 -#define sd_pm_state_change ssd_pm_state_change
914 -#define sdpower ssdpower
915 -#define sdattach ssdattach
916 -#define sddetach ssddetach
917 -#define sd_unit_attach ssd_unit_attach
918 -#define sd_unit_detach ssd_unit_detach
919 -#define sd_set_unit_attributes ssd_set_unit_attributes
920 -#define sd_create_errstats ssd_create_errstats
921 -#define sd_set_errstats ssd_set_errstats
922 -#define sd_set_pstats ssd_set_pstats
923 -#define sddump ssddump
924 -#define sd_scsi_poll ssd_scsi_poll
925 -#define sd_send_polled_RQS ssd_send_polled_RQS
926 -#define sd_ddi_scsi_poll ssd_ddi_scsi_poll
927 -#define sd_init_event_callbacks ssd_init_event_callbacks
928 -#define sd_event_callback ssd_event_callback
929 -#define sd_cache_control ssd_cache_control
930 -#define sd_get_write_cache_enabled ssd_get_write_cache_enabled
931 -#define sd_get_write_cache_changeable ssd_get_write_cache_changeable
932 -#define sd_get_nv_sup ssd_get_nv_sup
933 -#define sd_make_device ssd_make_device
934 -#define sdopen ssdopen
935 -#define sdclose ssdclose
936 -#define sd_ready_and_valid ssd_ready_and_valid
937 -#define sdmin ssdmin
938 -#define sdread ssdread
939 -#define sdwrite ssdwrite
940 -#define sdaread ssdaread
941 -#define sdawrite ssdawrite
942 -#define sdstrategy ssdstrategy
943 -#define sdioctl ssdioctl
944 -#define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart
945 -#define sd_mapblocksize_iostart ssd_mapblocksize_iostart
946 -#define sd_checksum_iostart ssd_checksum_iostart
947 -#define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart
948 -#define sd_pm_iostart ssd_pm_iostart
949 -#define sd_core_iostart ssd_core_iostart
950 -#define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone
951 -#define sd_mapblocksize_iodone ssd_mapblocksize_iodone
952 -#define sd_checksum_iodone ssd_checksum_iodone
953 -#define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone
954 -#define sd_pm_iodone ssd_pm_iodone
955 -#define sd_initpkt_for_buf ssd_initpkt_for_buf
956 -#define sd_destroypkt_for_buf ssd_destroypkt_for_buf
957 -#define sd_setup_rw_pkt ssd_setup_rw_pkt
958 -#define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt
959 -#define sd_buf_iodone ssd_buf_iodone
960 -#define sd_uscsi_strategy ssd_uscsi_strategy
961 -#define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi
962 -#define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi
963 -#define sd_uscsi_iodone ssd_uscsi_iodone
964 -#define sd_xbuf_strategy ssd_xbuf_strategy
965 -#define sd_xbuf_init ssd_xbuf_init
966 -#define sd_pm_entry ssd_pm_entry
967 -#define sd_pm_exit ssd_pm_exit
721 +/* Max number of block descriptors in UNMAP command */
722 +#define SD_UNMAP_MAX_DESCR \
723 + ((UINT16_MAX - sizeof (unmap_param_hdr_t)) / sizeof (unmap_blk_descr_t))
724 +/* Max size of the UNMAP parameter list in bytes */
725 +#define SD_UNMAP_PARAM_LIST_MAXSZ (sizeof (unmap_param_hdr_t) + \
726 + SD_UNMAP_MAX_DESCR * sizeof (unmap_blk_descr_t))
968 727
969 -#define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler
970 -#define sd_pm_timeout_handler ssd_pm_timeout_handler
971 -
972 -#define sd_add_buf_to_waitq ssd_add_buf_to_waitq
973 -#define sdintr ssdintr
974 -#define sd_start_cmds ssd_start_cmds
975 -#define sd_send_scsi_cmd ssd_send_scsi_cmd
976 -#define sd_bioclone_alloc ssd_bioclone_alloc
977 -#define sd_bioclone_free ssd_bioclone_free
978 -#define sd_shadow_buf_alloc ssd_shadow_buf_alloc
979 -#define sd_shadow_buf_free ssd_shadow_buf_free
980 -#define sd_print_transport_rejected_message \
981 - ssd_print_transport_rejected_message
982 -#define sd_retry_command ssd_retry_command
983 -#define sd_set_retry_bp ssd_set_retry_bp
984 -#define sd_send_request_sense_command ssd_send_request_sense_command
985 -#define sd_start_retry_command ssd_start_retry_command
986 -#define sd_start_direct_priority_command \
987 - ssd_start_direct_priority_command
988 -#define sd_return_failed_command ssd_return_failed_command
989 -#define sd_return_failed_command_no_restart \
990 - ssd_return_failed_command_no_restart
991 -#define sd_return_command ssd_return_command
992 -#define sd_sync_with_callback ssd_sync_with_callback
993 -#define sdrunout ssdrunout
994 -#define sd_mark_rqs_busy ssd_mark_rqs_busy
995 -#define sd_mark_rqs_idle ssd_mark_rqs_idle
996 -#define sd_reduce_throttle ssd_reduce_throttle
997 -#define sd_restore_throttle ssd_restore_throttle
998 -#define sd_print_incomplete_msg ssd_print_incomplete_msg
999 -#define sd_init_cdb_limits ssd_init_cdb_limits
1000 -#define sd_pkt_status_good ssd_pkt_status_good
1001 -#define sd_pkt_status_check_condition ssd_pkt_status_check_condition
1002 -#define sd_pkt_status_busy ssd_pkt_status_busy
1003 -#define sd_pkt_status_reservation_conflict \
1004 - ssd_pkt_status_reservation_conflict
1005 -#define sd_pkt_status_qfull ssd_pkt_status_qfull
1006 -#define sd_handle_request_sense ssd_handle_request_sense
1007 -#define sd_handle_auto_request_sense ssd_handle_auto_request_sense
1008 -#define sd_print_sense_failed_msg ssd_print_sense_failed_msg
1009 -#define sd_validate_sense_data ssd_validate_sense_data
1010 -#define sd_decode_sense ssd_decode_sense
1011 -#define sd_print_sense_msg ssd_print_sense_msg
1012 -#define sd_sense_key_no_sense ssd_sense_key_no_sense
1013 -#define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error
1014 -#define sd_sense_key_not_ready ssd_sense_key_not_ready
1015 -#define sd_sense_key_medium_or_hardware_error \
1016 - ssd_sense_key_medium_or_hardware_error
1017 -#define sd_sense_key_illegal_request ssd_sense_key_illegal_request
1018 -#define sd_sense_key_unit_attention ssd_sense_key_unit_attention
1019 -#define sd_sense_key_fail_command ssd_sense_key_fail_command
1020 -#define sd_sense_key_blank_check ssd_sense_key_blank_check
1021 -#define sd_sense_key_aborted_command ssd_sense_key_aborted_command
1022 -#define sd_sense_key_default ssd_sense_key_default
1023 -#define sd_print_retry_msg ssd_print_retry_msg
1024 -#define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg
1025 -#define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete
1026 -#define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err
1027 -#define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset
1028 -#define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted
1029 -#define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout
1030 -#define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free
1031 -#define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject
1032 -#define sd_pkt_reason_default ssd_pkt_reason_default
1033 -#define sd_reset_target ssd_reset_target
1034 -#define sd_start_stop_unit_callback ssd_start_stop_unit_callback
1035 -#define sd_start_stop_unit_task ssd_start_stop_unit_task
1036 -#define sd_taskq_create ssd_taskq_create
1037 -#define sd_taskq_delete ssd_taskq_delete
1038 -#define sd_target_change_task ssd_target_change_task
1039 -#define sd_log_dev_status_event ssd_log_dev_status_event
1040 -#define sd_log_lun_expansion_event ssd_log_lun_expansion_event
1041 -#define sd_log_eject_request_event ssd_log_eject_request_event
1042 -#define sd_media_change_task ssd_media_change_task
1043 -#define sd_handle_mchange ssd_handle_mchange
1044 -#define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK
1045 -#define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY
1046 -#define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16
1047 -#define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION
1048 -#define sd_send_scsi_feature_GET_CONFIGURATION \
1049 - sd_send_scsi_feature_GET_CONFIGURATION
1050 -#define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT
1051 -#define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY
1052 -#define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY
1053 -#define sd_send_scsi_PERSISTENT_RESERVE_IN \
1054 - ssd_send_scsi_PERSISTENT_RESERVE_IN
1055 -#define sd_send_scsi_PERSISTENT_RESERVE_OUT \
1056 - ssd_send_scsi_PERSISTENT_RESERVE_OUT
1057 -#define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE
1058 -#define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \
1059 - ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1060 -#define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE
1061 -#define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT
1062 -#define sd_send_scsi_RDWR ssd_send_scsi_RDWR
1063 -#define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE
1064 -#define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \
1065 - ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1066 -#define sd_gesn_media_data_valid ssd_gesn_media_data_valid
1067 -#define sd_alloc_rqs ssd_alloc_rqs
1068 -#define sd_free_rqs ssd_free_rqs
1069 -#define sd_dump_memory ssd_dump_memory
1070 -#define sd_get_media_info_com ssd_get_media_info_com
1071 -#define sd_get_media_info ssd_get_media_info
1072 -#define sd_get_media_info_ext ssd_get_media_info_ext
1073 -#define sd_dkio_ctrl_info ssd_dkio_ctrl_info
1074 -#define sd_nvpair_str_decode ssd_nvpair_str_decode
1075 -#define sd_strtok_r ssd_strtok_r
1076 -#define sd_set_properties ssd_set_properties
1077 -#define sd_get_tunables_from_conf ssd_get_tunables_from_conf
1078 -#define sd_setup_next_xfer ssd_setup_next_xfer
1079 -#define sd_dkio_get_temp ssd_dkio_get_temp
1080 -#define sd_check_mhd ssd_check_mhd
1081 -#define sd_mhd_watch_cb ssd_mhd_watch_cb
1082 -#define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete
1083 -#define sd_sname ssd_sname
1084 -#define sd_mhd_resvd_recover ssd_mhd_resvd_recover
1085 -#define sd_resv_reclaim_thread ssd_resv_reclaim_thread
1086 -#define sd_take_ownership ssd_take_ownership
1087 -#define sd_reserve_release ssd_reserve_release
1088 -#define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req
1089 -#define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb
1090 -#define sd_persistent_reservation_in_read_keys \
1091 - ssd_persistent_reservation_in_read_keys
1092 -#define sd_persistent_reservation_in_read_resv \
1093 - ssd_persistent_reservation_in_read_resv
1094 -#define sd_mhdioc_takeown ssd_mhdioc_takeown
1095 -#define sd_mhdioc_failfast ssd_mhdioc_failfast
1096 -#define sd_mhdioc_release ssd_mhdioc_release
1097 -#define sd_mhdioc_register_devid ssd_mhdioc_register_devid
1098 -#define sd_mhdioc_inkeys ssd_mhdioc_inkeys
1099 -#define sd_mhdioc_inresv ssd_mhdioc_inresv
1100 -#define sr_change_blkmode ssr_change_blkmode
1101 -#define sr_change_speed ssr_change_speed
1102 -#define sr_atapi_change_speed ssr_atapi_change_speed
1103 -#define sr_pause_resume ssr_pause_resume
1104 -#define sr_play_msf ssr_play_msf
1105 -#define sr_play_trkind ssr_play_trkind
1106 -#define sr_read_all_subcodes ssr_read_all_subcodes
1107 -#define sr_read_subchannel ssr_read_subchannel
1108 -#define sr_read_tocentry ssr_read_tocentry
1109 -#define sr_read_tochdr ssr_read_tochdr
1110 -#define sr_read_cdda ssr_read_cdda
1111 -#define sr_read_cdxa ssr_read_cdxa
1112 -#define sr_read_mode1 ssr_read_mode1
1113 -#define sr_read_mode2 ssr_read_mode2
1114 -#define sr_read_cd_mode2 ssr_read_cd_mode2
1115 -#define sr_sector_mode ssr_sector_mode
1116 -#define sr_eject ssr_eject
1117 -#define sr_ejected ssr_ejected
1118 -#define sr_check_wp ssr_check_wp
1119 -#define sd_watch_request_submit ssd_watch_request_submit
1120 -#define sd_check_media ssd_check_media
1121 -#define sd_media_watch_cb ssd_media_watch_cb
1122 -#define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast
1123 -#define sr_volume_ctrl ssr_volume_ctrl
1124 -#define sr_read_sony_session_offset ssr_read_sony_session_offset
1125 -#define sd_log_page_supported ssd_log_page_supported
1126 -#define sd_check_for_writable_cd ssd_check_for_writable_cd
1127 -#define sd_wm_cache_constructor ssd_wm_cache_constructor
1128 -#define sd_wm_cache_destructor ssd_wm_cache_destructor
1129 -#define sd_range_lock ssd_range_lock
1130 -#define sd_get_range ssd_get_range
1131 -#define sd_free_inlist_wmap ssd_free_inlist_wmap
1132 -#define sd_range_unlock ssd_range_unlock
1133 -#define sd_read_modify_write_task ssd_read_modify_write_task
1134 -#define sddump_do_read_of_rmw ssddump_do_read_of_rmw
1135 -
1136 -#define sd_iostart_chain ssd_iostart_chain
1137 -#define sd_iodone_chain ssd_iodone_chain
1138 -#define sd_initpkt_map ssd_initpkt_map
1139 -#define sd_destroypkt_map ssd_destroypkt_map
1140 -#define sd_chain_type_map ssd_chain_type_map
1141 -#define sd_chain_index_map ssd_chain_index_map
1142 -
1143 -#define sd_failfast_flushctl ssd_failfast_flushctl
1144 -#define sd_failfast_flushq ssd_failfast_flushq
1145 -#define sd_failfast_flushq_callback ssd_failfast_flushq_callback
1146 -
1147 -#define sd_is_lsi ssd_is_lsi
1148 -#define sd_tg_rdwr ssd_tg_rdwr
1149 -#define sd_tg_getinfo ssd_tg_getinfo
1150 -#define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler
1151 -
1152 -#endif /* #if (defined(__fibre)) */
1153 -
1154 -
1155 728 int _init(void);
1156 729 int _fini(void);
1157 730 int _info(struct modinfo *modinfop);
1158 731
1159 732 /*PRINTFLIKE3*/
1160 733 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1161 734 /*PRINTFLIKE3*/
1162 735 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1163 736 /*PRINTFLIKE3*/
1164 737 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1165 738
1166 739 static int sdprobe(dev_info_t *devi);
1167 740 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
1168 741 void **result);
1169 742 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1170 743 int mod_flags, char *name, caddr_t valuep, int *lengthp);
1171 744
1172 745 /*
1173 746 * Smart probe for parallel scsi
1174 747 */
1175 748 static void sd_scsi_probe_cache_init(void);
1176 749 static void sd_scsi_probe_cache_fini(void);
1177 750 static void sd_scsi_clear_probe_cache(void);
1178 751 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)());
1179 752
1180 753 /*
1181 754 * Attached luns on target for parallel scsi
1182 755 */
1183 756 static void sd_scsi_target_lun_init(void);
1184 757 static void sd_scsi_target_lun_fini(void);
1185 758 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target);
1186 759 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag);
1187 760
1188 761 static int sd_spin_up_unit(sd_ssc_t *ssc);
1189 762
1190 763 /*
1191 764 * Using sd_ssc_init to establish sd_ssc_t struct
1192 765 * Using sd_ssc_send to send uscsi internal command
1193 766 * Using sd_ssc_fini to free sd_ssc_t struct
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
1194 767 */
1195 768 static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
1196 769 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
1197 770 int flag, enum uio_seg dataspace, int path_flag);
1198 771 static void sd_ssc_fini(sd_ssc_t *ssc);
1199 772
1200 773 /*
1201 774 * Using sd_ssc_assessment to set correct type-of-assessment
1202 775 * Using sd_ssc_post to post ereport & system log
1203 776 * sd_ssc_post will call sd_ssc_print to print system log
1204 - * sd_ssc_post will call sd_ssd_ereport_post to post ereport
777 + * sd_ssc_post will call sd_ssc_ereport_post to post ereport
1205 778 */
1206 779 static void sd_ssc_assessment(sd_ssc_t *ssc,
1207 780 enum sd_type_assessment tp_assess);
1208 781
1209 782 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
1210 783 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
1211 784 static void sd_ssc_ereport_post(sd_ssc_t *ssc,
1212 785 enum sd_driver_assessment drv_assess);
1213 786
1214 787 /*
1215 788 * Using sd_ssc_set_info to mark an un-decodable-data error.
1216 789 * Using sd_ssc_extract_info to transfer information from internal
1217 790 * data structures to sd_ssc_t.
1218 791 */
1219 792 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp,
1220 793 const char *fmt, ...);
1221 794 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un,
1222 795 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp);
1223 796
1224 797 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1225 798 enum uio_seg dataspace, int path_flag);
1226 799
1227 800 #ifdef _LP64
1228 801 static void sd_enable_descr_sense(sd_ssc_t *ssc);
1229 802 static void sd_reenable_dsense_task(void *arg);
1230 803 #endif /* _LP64 */
1231 804
1232 805 static void sd_set_mmc_caps(sd_ssc_t *ssc);
1233 806
1234 807 static void sd_read_unit_properties(struct sd_lun *un);
1235 808 static int sd_process_sdconf_file(struct sd_lun *un);
1236 809 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str);
1237 810 static char *sd_strtok_r(char *string, const char *sepset, char **lasts);
1238 811 static void sd_set_properties(struct sd_lun *un, char *name, char *value);
1239 812 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags,
1240 813 int *data_list, sd_tunables *values);
1241 814 static void sd_process_sdconf_table(struct sd_lun *un);
1242 815 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen);
1243 816 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen);
1244 817 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
1245 818 int list_len, char *dataname_ptr);
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
1246 819 static void sd_set_vers1_properties(struct sd_lun *un, int flags,
1247 820 sd_tunables *prop_list);
1248 821
1249 822 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
1250 823 int reservation_flag);
1251 824 static int sd_get_devid(sd_ssc_t *ssc);
1252 825 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
1253 826 static int sd_write_deviceid(sd_ssc_t *ssc);
1254 827 static int sd_check_vpd_page_support(sd_ssc_t *ssc);
1255 828
829 +#ifdef notyet
1256 830 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
1257 831 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
832 +#endif
1258 833
1259 834 static int sd_ddi_suspend(dev_info_t *devi);
1260 835 static int sd_ddi_resume(dev_info_t *devi);
1261 836 static int sd_pm_state_change(struct sd_lun *un, int level, int flag);
1262 837 static int sdpower(dev_info_t *devi, int component, int level);
1263 838
1264 839 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
1265 840 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
1266 -static int sd_unit_attach(dev_info_t *devi);
841 +static void sd_unit_attach(void *arg);
1267 842 static int sd_unit_detach(dev_info_t *devi);
1268 843
1269 844 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
1270 845 static void sd_create_errstats(struct sd_lun *un, int instance);
1271 846 static void sd_set_errstats(struct sd_lun *un);
1272 847 static void sd_set_pstats(struct sd_lun *un);
1273 848
1274 849 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
1275 850 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
1276 851 static int sd_send_polled_RQS(struct sd_lun *un);
1277 852 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt);
1278 853
1279 -#if (defined(__fibre))
1280 854 /*
1281 - * Event callbacks (photon)
1282 - */
1283 -static void sd_init_event_callbacks(struct sd_lun *un);
1284 -static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *);
1285 -#endif
1286 -
1287 -/*
1288 855 * Defines for sd_cache_control
1289 856 */
1290 -
1291 857 #define SD_CACHE_ENABLE 1
1292 858 #define SD_CACHE_DISABLE 0
1293 859 #define SD_CACHE_NOCHANGE -1
1294 860
1295 861 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
1296 862 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
1297 863 static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable);
1298 864 static void sd_get_nv_sup(sd_ssc_t *ssc);
1299 865 static dev_t sd_make_device(dev_info_t *devi);
1300 866 static void sd_check_bdc_vpd(sd_ssc_t *ssc);
1301 867 static void sd_check_emulation_mode(sd_ssc_t *ssc);
1302 868 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize,
1303 869 uint64_t capacity);
1304 870
1305 871 /*
1306 872 * Driver entry point functions.
1307 873 */
1308 874 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
1309 875 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
1310 876 static int sd_ready_and_valid(sd_ssc_t *ssc, int part);
1311 877
1312 878 static void sdmin(struct buf *bp);
1313 879 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p);
1314 880 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
1315 881 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1316 882 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1317 883
1318 884 static int sdstrategy(struct buf *bp);
1319 885 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1320 886
1321 887 /*
1322 888 * Function prototypes for layering functions in the iostart chain.
1323 889 */
1324 890 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un,
1325 891 struct buf *bp);
1326 892 static void sd_mapblocksize_iostart(int index, struct sd_lun *un,
1327 893 struct buf *bp);
1328 894 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp);
1329 895 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un,
1330 896 struct buf *bp);
1331 897 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp);
1332 898 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp);
1333 899
1334 900 /*
1335 901 * Function prototypes for layering functions in the iodone chain.
1336 902 */
1337 903 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp);
1338 904 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp);
1339 905 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un,
1340 906 struct buf *bp);
1341 907 static void sd_mapblocksize_iodone(int index, struct sd_lun *un,
1342 908 struct buf *bp);
1343 909 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp);
1344 910 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un,
1345 911 struct buf *bp);
1346 912 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp);
1347 913
1348 914 /*
1349 915 * Prototypes for functions to support buf(9S) based IO.
1350 916 */
1351 917 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg);
1352 918 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **);
1353 919 static void sd_destroypkt_for_buf(struct buf *);
1354 920 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp,
1355 921 struct buf *bp, int flags,
1356 922 int (*callback)(caddr_t), caddr_t callback_arg,
1357 923 diskaddr_t lba, uint32_t blockcount);
1358 924 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp,
1359 925 struct buf *bp, diskaddr_t lba, uint32_t blockcount);
1360 926
1361 927 /*
1362 928 * Prototypes for functions to support USCSI IO.
1363 929 */
1364 930 static int sd_uscsi_strategy(struct buf *bp);
1365 931 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **);
1366 932 static void sd_destroypkt_for_uscsi(struct buf *);
1367 933
1368 934 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
1369 935 uchar_t chain_type, void *pktinfop);
1370 936
1371 937 static int sd_pm_entry(struct sd_lun *un);
1372 938 static void sd_pm_exit(struct sd_lun *un);
1373 939
1374 940 static void sd_pm_idletimeout_handler(void *arg);
1375 941
1376 942 /*
1377 943 * sd_core internal functions (used at the sd_core_io layer).
1378 944 */
1379 945 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp);
1380 946 static void sdintr(struct scsi_pkt *pktp);
1381 947 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp);
1382 948
1383 949 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1384 950 enum uio_seg dataspace, int path_flag);
1385 951
1386 952 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen,
1387 953 daddr_t blkno, int (*func)(struct buf *));
1388 954 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen,
1389 955 uint_t bflags, daddr_t blkno, int (*func)(struct buf *));
1390 956 static void sd_bioclone_free(struct buf *bp);
1391 957 static void sd_shadow_buf_free(struct buf *bp);
1392 958
1393 959 static void sd_print_transport_rejected_message(struct sd_lun *un,
1394 960 struct sd_xbuf *xp, int code);
1395 961 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp,
1396 962 void *arg, int code);
1397 963 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp,
1398 964 void *arg, int code);
1399 965 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp,
1400 966 void *arg, int code);
1401 967
1402 968 static void sd_retry_command(struct sd_lun *un, struct buf *bp,
|
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
1403 969 int retry_check_flag,
1404 970 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp,
1405 971 int c),
1406 972 void *user_arg, int failure_code, clock_t retry_delay,
1407 973 void (*statp)(kstat_io_t *));
1408 974
1409 975 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
1410 976 clock_t retry_delay, void (*statp)(kstat_io_t *));
1411 977
1412 978 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
1413 - struct scsi_pkt *pktp);
979 + int retry_check_flag, struct scsi_pkt *pktp);
1414 980 static void sd_start_retry_command(void *arg);
1415 981 static void sd_start_direct_priority_command(void *arg);
1416 982 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
1417 983 int errcode);
1418 984 static void sd_return_failed_command_no_restart(struct sd_lun *un,
1419 985 struct buf *bp, int errcode);
1420 986 static void sd_return_command(struct sd_lun *un, struct buf *bp);
1421 987 static void sd_sync_with_callback(struct sd_lun *un);
1422 988 static int sdrunout(caddr_t arg);
1423 989
1424 990 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp);
1425 991 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp);
1426 992
1427 993 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type);
1428 994 static void sd_restore_throttle(void *arg);
1429 995
1430 996 static void sd_init_cdb_limits(struct sd_lun *un);
1431 997
1432 998 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
1433 999 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1434 1000
1435 1001 /*
1436 1002 * Error handling functions
1437 1003 */
1438 1004 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
1439 1005 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1440 1006 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp,
1441 1007 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1442 1008 static void sd_pkt_status_reservation_conflict(struct sd_lun *un,
1443 1009 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1444 1010 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
1445 1011 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1446 1012
1447 1013 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp,
1448 1014 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1449 1015 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
1450 1016 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1451 1017 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp,
1452 1018 struct sd_xbuf *xp, size_t actual_len);
1453 1019 static void sd_decode_sense(struct sd_lun *un, struct buf *bp,
1454 1020 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1455 1021
1456 1022 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp,
1457 1023 void *arg, int code);
1458 1024
1459 1025 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
1460 1026 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1461 1027 static void sd_sense_key_recoverable_error(struct sd_lun *un,
1462 1028 uint8_t *sense_datap,
1463 1029 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1464 1030 static void sd_sense_key_not_ready(struct sd_lun *un,
1465 1031 uint8_t *sense_datap,
1466 1032 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1467 1033 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
1468 1034 uint8_t *sense_datap,
1469 1035 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1470 1036 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
1471 1037 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1472 1038 static void sd_sense_key_unit_attention(struct sd_lun *un,
1473 1039 uint8_t *sense_datap,
1474 1040 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1475 1041 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
1476 1042 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1477 1043 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
1478 1044 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1479 1045 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
1480 1046 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1481 1047 static void sd_sense_key_default(struct sd_lun *un,
1482 1048 uint8_t *sense_datap,
1483 1049 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1484 1050
1485 1051 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp,
1486 1052 void *arg, int flag);
1487 1053
1488 1054 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
1489 1055 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1490 1056 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
1491 1057 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1492 1058 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
1493 1059 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1494 1060 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
1495 1061 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1496 1062 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
1497 1063 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1498 1064 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
1499 1065 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1500 1066 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
1501 1067 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1502 1068 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
1503 1069 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1504 1070
1505 1071 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp);
1506 1072
1507 1073 static void sd_start_stop_unit_callback(void *arg);
1508 1074 static void sd_start_stop_unit_task(void *arg);
1509 1075
1510 1076 static void sd_taskq_create(void);
1511 1077 static void sd_taskq_delete(void);
1512 1078 static void sd_target_change_task(void *arg);
1513 1079 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag);
1514 1080 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag);
1515 1081 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag);
1516 1082 static void sd_media_change_task(void *arg);
1517 1083
1518 1084 static int sd_handle_mchange(struct sd_lun *un);
1519 1085 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag);
1520 1086 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp,
1521 1087 uint32_t *lbap, int path_flag);
1522 1088 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
1523 1089 uint32_t *lbap, uint32_t *psp, int path_flag);
1524 1090 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag,
1525 1091 int flag, int path_flag);
|
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
1526 1092 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
1527 1093 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
1528 1094 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
1529 1095 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
1530 1096 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
1531 1097 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
1532 1098 uchar_t usr_cmd, uchar_t *usr_bufp);
1533 1099 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
1534 1100 struct dk_callback *dkc);
1535 1101 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
1102 +static int sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl,
1103 + int flag);
1536 1104 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
1537 1105 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1538 1106 uchar_t *bufaddr, uint_t buflen, int path_flag);
1539 1107 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
1540 1108 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1541 1109 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
1542 1110 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
1543 1111 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
1544 1112 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
1545 1113 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
1546 1114 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
1547 1115 size_t buflen, daddr_t start_block, int path_flag);
1548 1116 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
1549 1117 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
1550 1118 path_flag)
1551 1119 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
1552 1120 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
1553 1121 path_flag)
1554 1122
1555 1123 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr,
1556 1124 uint16_t buflen, uchar_t page_code, uchar_t page_control,
1557 1125 uint16_t param_ptr, int path_flag);
1558 1126 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc,
1559 1127 uchar_t *bufaddr, size_t buflen, uchar_t class_req);
1560 1128 static boolean_t sd_gesn_media_data_valid(uchar_t *data);
1561 1129
1562 1130 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un);
1563 1131 static void sd_free_rqs(struct sd_lun *un);
1564 1132
1565 1133 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title,
1566 1134 uchar_t *data, int len, int fmt);
1567 1135 static void sd_panic_for_res_conflict(struct sd_lun *un);
1568 1136
1569 1137 /*
1570 1138 * Disk Ioctl Function Prototypes
1571 1139 */
1572 1140 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag);
1573 1141 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag);
1574 1142 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag);
1575 1143 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag);
1576 1144
1577 1145 /*
1578 1146 * Multi-host Ioctl Prototypes
1579 1147 */
1580 1148 static int sd_check_mhd(dev_t dev, int interval);
1581 1149 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1582 1150 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt);
1583 1151 static char *sd_sname(uchar_t status);
1584 1152 static void sd_mhd_resvd_recover(void *arg);
1585 1153 static void sd_resv_reclaim_thread();
1586 1154 static int sd_take_ownership(dev_t dev, struct mhioctkown *p);
1587 1155 static int sd_reserve_release(dev_t dev, int cmd);
1588 1156 static void sd_rmv_resv_reclaim_req(dev_t dev);
1589 1157 static void sd_mhd_reset_notify_cb(caddr_t arg);
1590 1158 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un,
1591 1159 mhioc_inkeys_t *usrp, int flag);
1592 1160 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un,
1593 1161 mhioc_inresvs_t *usrp, int flag);
1594 1162 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag);
1595 1163 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag);
1596 1164 static int sd_mhdioc_release(dev_t dev);
1597 1165 static int sd_mhdioc_register_devid(dev_t dev);
1598 1166 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag);
1599 1167 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag);
1600 1168
1601 1169 /*
1602 1170 * SCSI removable prototypes
1603 1171 */
1604 1172 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag);
1605 1173 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1606 1174 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1607 1175 static int sr_pause_resume(dev_t dev, int mode);
1608 1176 static int sr_play_msf(dev_t dev, caddr_t data, int flag);
1609 1177 static int sr_play_trkind(dev_t dev, caddr_t data, int flag);
1610 1178 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag);
1611 1179 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag);
1612 1180 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag);
1613 1181 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag);
1614 1182 static int sr_read_cdda(dev_t dev, caddr_t data, int flag);
1615 1183 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag);
1616 1184 static int sr_read_mode1(dev_t dev, caddr_t data, int flag);
1617 1185 static int sr_read_mode2(dev_t dev, caddr_t data, int flag);
1618 1186 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag);
1619 1187 static int sr_sector_mode(dev_t dev, uint32_t blksize);
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
1620 1188 static int sr_eject(dev_t dev);
1621 1189 static void sr_ejected(register struct sd_lun *un);
1622 1190 static int sr_check_wp(dev_t dev);
1623 1191 static opaque_t sd_watch_request_submit(struct sd_lun *un);
1624 1192 static int sd_check_media(dev_t dev, enum dkio_state state);
1625 1193 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1626 1194 static void sd_delayed_cv_broadcast(void *arg);
1627 1195 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
1628 1196 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
1629 1197
1198 +#ifdef notyet
1630 1199 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
1200 +#endif
1631 1201
1632 1202 /*
1633 1203 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1634 1204 */
1635 1205 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
1636 1206 static int sd_wm_cache_constructor(void *wm, void *un, int flags);
1637 1207 static void sd_wm_cache_destructor(void *wm, void *un);
1638 1208 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
1639 1209 daddr_t endb, ushort_t typ);
1640 1210 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
1641 1211 daddr_t endb);
1642 1212 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp);
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1643 1213 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
1644 1214 static void sd_read_modify_write_task(void * arg);
1645 1215 static int
1646 1216 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
1647 1217 struct buf **bpp);
1648 1218
1649 1219
1650 1220 /*
1651 1221 * Function prototypes for failfast support.
1652 1222 */
1653 -static void sd_failfast_flushq(struct sd_lun *un);
1223 +static void sd_failfast_flushq(struct sd_lun *un, boolean_t flush_all);
1654 1224 static int sd_failfast_flushq_callback(struct buf *bp);
1655 1225
1656 1226 /*
1657 1227 * Function prototypes to check for lsi devices
1658 1228 */
1659 1229 static void sd_is_lsi(struct sd_lun *un);
1660 1230
1661 1231 /*
1662 1232 * Function prototypes for partial DMA support
1663 1233 */
1664 1234 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
1665 1235 struct scsi_pkt *pkt, struct sd_xbuf *xp);
1666 1236
1667 1237
1668 1238 /* Function prototypes for cmlb */
1669 1239 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
1670 1240 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
1671 1241
1672 1242 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie);
1673 1243
1674 1244 /*
1675 1245 * For printing RMW warning message timely
1676 1246 */
1677 1247 static void sd_rmw_msg_print_handler(void *arg);
1678 1248
1679 1249 /*
1680 1250 * Constants for failfast support:
1681 1251 *
1682 1252 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1683 1253 * failfast processing being performed.
1684 1254 *
1685 1255 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1686 1256 * failfast processing on all bufs with B_FAILFAST set.
1687 1257 */
1688 1258
1689 1259 #define SD_FAILFAST_INACTIVE 0
1690 1260 #define SD_FAILFAST_ACTIVE 1
1691 1261
1692 1262 /*
1263 + * Bitmask to control behaviour in failfast active state:
1264 + *
1265 + * SD_FAILFAST_ENABLE_FORCE_INACTIVE: When set, allow retries without
1266 + * SD_RETRIES_FAILFAST to cause transition to failfast inactive state.
1267 + *
1268 + * SD_FAILFAST_ENABLE_FAIL_RETRIES: When set, cause retries with the flag
1269 + * SD_RETRIES_FAILFAST set (following a timeout) to fail when in failfast
1270 + * active state.
1271 + *
1272 + * SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES: When set, cause ALL retries,
1273 + * regardless of reason, to fail when in failfast active state. This takes
1274 + * precedence over SD_FAILFAST_FAIL_RETRIES.
1275 + *
1276 + * SD_FAILFAST_ENABLE_FAIL_USCSI: When set, discard all commands in the USCSI
1277 + * chain (sdioctl or driver generated) when in failfast active state.
1278 + * To prevent problems with sdopen, this is limited to when there are
1279 + * multiple pending commands.
1280 + */
1281 +
1282 +#define SD_FAILFAST_ENABLE_FORCE_INACTIVE 0x01
1283 +#define SD_FAILFAST_ENABLE_FAIL_RETRIES 0x02
1284 +#define SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES 0x04
1285 +#define SD_FAILFAST_ENABLE_FAIL_USCSI 0x08
1286 +
1287 +/*
1288 + * The default behaviour is to fail all retries due to timeout when in failfast
1289 + * active state, and not allow other retries to transition to inactive.
1290 + */
1291 +static int sd_failfast_enable = SD_FAILFAST_ENABLE_FAIL_RETRIES |
1292 + SD_FAILFAST_ENABLE_FAIL_USCSI;
1293 +
1294 +/*
1693 1295 * Bitmask to control behavior of buf(9S) flushes when a transition to
1694 1296 * the failfast state occurs. Optional bits include:
1695 1297 *
1696 1298 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1697 1299 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1698 1300 * be flushed.
1699 1301 *
1700 1302 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1701 1303 * driver, in addition to the regular wait queue. This includes the xbuf
1702 1304 * queues. When clear, only the driver's wait queue will be flushed.
1703 1305 */
1704 1306 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
1705 1307 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
1706 1308
1707 1309 /*
1708 - * The default behavior is to only flush bufs that have B_FAILFAST set, but
1709 - * to flush all queues within the driver.
1310 + * The default behavior is to flush all bufs in all queues within the driver.
1710 1311 */
1711 -static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
1312 +static int sd_failfast_flushctl =
1313 + SD_FAILFAST_FLUSH_ALL_BUFS | SD_FAILFAST_FLUSH_ALL_QUEUES;
1712 1314
1315 +#ifdef SD_FAULT_INJECTION
1316 +static uint_t sd_fault_injection_on = 0;
1317 +#endif
1713 1318
1714 1319 /*
1715 1320 * SD Testing Fault Injection
1716 1321 */
1717 1322 #ifdef SD_FAULT_INJECTION
1718 -static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1323 +static int sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1719 1324 static void sd_faultinjection(struct scsi_pkt *pktp);
1325 +static void sd_prefaultinjection(struct scsi_pkt *pktp);
1720 1326 static void sd_injection_log(char *buf, struct sd_lun *un);
1721 1327 #endif
1722 1328
1723 1329 /*
1724 1330 * Device driver ops vector
1725 1331 */
1726 1332 static struct cb_ops sd_cb_ops = {
1727 1333 sdopen, /* open */
1728 1334 sdclose, /* close */
1729 1335 sdstrategy, /* strategy */
1730 1336 nodev, /* print */
1731 1337 sddump, /* dump */
1732 1338 sdread, /* read */
1733 1339 sdwrite, /* write */
1734 1340 sdioctl, /* ioctl */
1735 1341 nodev, /* devmap */
1736 1342 nodev, /* mmap */
1737 1343 nodev, /* segmap */
1738 1344 nochpoll, /* poll */
1739 1345 sd_prop_op, /* cb_prop_op */
1740 1346 0, /* streamtab */
1741 1347 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */
1742 1348 CB_REV, /* cb_rev */
1743 1349 sdaread, /* async I/O read entry point */
1744 1350 sdawrite /* async I/O write entry point */
1745 1351 };
1746 1352
1747 1353 struct dev_ops sd_ops = {
1748 1354 DEVO_REV, /* devo_rev, */
1749 1355 0, /* refcnt */
1750 1356 sdinfo, /* info */
1751 1357 nulldev, /* identify */
1752 1358 sdprobe, /* probe */
1753 1359 sdattach, /* attach */
1754 1360 sddetach, /* detach */
1755 1361 nodev, /* reset */
1756 1362 &sd_cb_ops, /* driver operations */
1757 1363 NULL, /* bus operations */
1758 1364 sdpower, /* power */
1759 1365 ddi_quiesce_not_needed, /* quiesce */
1760 1366 };
1761 1367
1762 1368 /*
1763 1369 * This is the loadable module wrapper.
1764 1370 */
1765 1371 #include <sys/modctl.h>
1766 1372
1767 1373 static struct modldrv modldrv = {
1768 1374 &mod_driverops, /* Type of module. This one is a driver */
1769 1375 SD_MODULE_NAME, /* Module name. */
1770 1376 &sd_ops /* driver ops */
1771 1377 };
1772 1378
1773 1379 static struct modlinkage modlinkage = {
1774 1380 MODREV_1, &modldrv, NULL
1775 1381 };
1776 1382
1777 1383 static cmlb_tg_ops_t sd_tgops = {
1778 1384 TG_DK_OPS_VERSION_1,
1779 1385 sd_tg_rdwr,
1780 1386 sd_tg_getinfo
1781 1387 };
1782 1388
1783 1389 static struct scsi_asq_key_strings sd_additional_codes[] = {
1784 1390 0x81, 0, "Logical Unit is Reserved",
1785 1391 0x85, 0, "Audio Address Not Valid",
1786 1392 0xb6, 0, "Media Load Mechanism Failed",
1787 1393 0xB9, 0, "Audio Play Operation Aborted",
1788 1394 0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
1789 1395 0x53, 2, "Medium removal prevented",
1790 1396 0x6f, 0, "Authentication failed during key exchange",
1791 1397 0x6f, 1, "Key not present",
1792 1398 0x6f, 2, "Key not established",
1793 1399 0x6f, 3, "Read without proper authentication",
1794 1400 0x6f, 4, "Mismatched region to this logical unit",
1795 1401 0x6f, 5, "Region reset count error",
1796 1402 0xffff, 0x0, NULL
1797 1403 };
1798 1404
1799 1405
1800 1406 /*
1801 1407 * Struct for passing printing information for sense data messages
1802 1408 */
1803 1409 struct sd_sense_info {
1804 1410 int ssi_severity;
1805 1411 int ssi_pfa_flag;
1806 1412 };
1807 1413
1808 1414 /*
1809 1415 * Table of function pointers for iostart-side routines. Separate "chains"
1810 1416 * of layered function calls are formed by placing the function pointers
1811 1417 * sequentially in the desired order. Functions are called according to an
1812 1418 * incrementing table index ordering. The last function in each chain must
1813 1419 * be sd_core_iostart(). The corresponding iodone-side routines are expected
1814 1420 * in the sd_iodone_chain[] array.
1815 1421 *
1816 1422 * Note: It may seem more natural to organize both the iostart and iodone
1817 1423 * functions together, into an array of structures (or some similar
1818 1424 * organization) with a common index, rather than two separate arrays which
1819 1425 * must be maintained in synchronization. The purpose of this division is
1820 1426 * to achieve improved performance: individual arrays allows for more
1821 1427 * effective cache line utilization on certain platforms.
1822 1428 */
1823 1429
1824 1430 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp);
1825 1431
1826 1432
1827 1433 static sd_chain_t sd_iostart_chain[] = {
1828 1434
1829 1435 /* Chain for buf IO for disk drive targets (PM enabled) */
1830 1436 sd_mapblockaddr_iostart, /* Index: 0 */
1831 1437 sd_pm_iostart, /* Index: 1 */
1832 1438 sd_core_iostart, /* Index: 2 */
1833 1439
1834 1440 /* Chain for buf IO for disk drive targets (PM disabled) */
1835 1441 sd_mapblockaddr_iostart, /* Index: 3 */
1836 1442 sd_core_iostart, /* Index: 4 */
1837 1443
1838 1444 /*
1839 1445 * Chain for buf IO for removable-media or large sector size
1840 1446 * disk drive targets with RMW needed (PM enabled)
1841 1447 */
1842 1448 sd_mapblockaddr_iostart, /* Index: 5 */
1843 1449 sd_mapblocksize_iostart, /* Index: 6 */
1844 1450 sd_pm_iostart, /* Index: 7 */
1845 1451 sd_core_iostart, /* Index: 8 */
1846 1452
1847 1453 /*
1848 1454 * Chain for buf IO for removable-media or large sector size
1849 1455 * disk drive targets with RMW needed (PM disabled)
1850 1456 */
1851 1457 sd_mapblockaddr_iostart, /* Index: 9 */
1852 1458 sd_mapblocksize_iostart, /* Index: 10 */
1853 1459 sd_core_iostart, /* Index: 11 */
1854 1460
1855 1461 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1856 1462 sd_mapblockaddr_iostart, /* Index: 12 */
1857 1463 sd_checksum_iostart, /* Index: 13 */
1858 1464 sd_pm_iostart, /* Index: 14 */
1859 1465 sd_core_iostart, /* Index: 15 */
1860 1466
1861 1467 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1862 1468 sd_mapblockaddr_iostart, /* Index: 16 */
1863 1469 sd_checksum_iostart, /* Index: 17 */
1864 1470 sd_core_iostart, /* Index: 18 */
1865 1471
1866 1472 /* Chain for USCSI commands (all targets) */
1867 1473 sd_pm_iostart, /* Index: 19 */
1868 1474 sd_core_iostart, /* Index: 20 */
1869 1475
1870 1476 /* Chain for checksumming USCSI commands (all targets) */
1871 1477 sd_checksum_uscsi_iostart, /* Index: 21 */
1872 1478 sd_pm_iostart, /* Index: 22 */
1873 1479 sd_core_iostart, /* Index: 23 */
1874 1480
1875 1481 /* Chain for "direct" USCSI commands (all targets) */
1876 1482 sd_core_iostart, /* Index: 24 */
1877 1483
1878 1484 /* Chain for "direct priority" USCSI commands (all targets) */
1879 1485 sd_core_iostart, /* Index: 25 */
1880 1486
1881 1487 /*
1882 1488 * Chain for buf IO for large sector size disk drive targets
1883 1489 * with RMW needed with checksumming (PM enabled)
1884 1490 */
1885 1491 sd_mapblockaddr_iostart, /* Index: 26 */
1886 1492 sd_mapblocksize_iostart, /* Index: 27 */
1887 1493 sd_checksum_iostart, /* Index: 28 */
1888 1494 sd_pm_iostart, /* Index: 29 */
1889 1495 sd_core_iostart, /* Index: 30 */
1890 1496
1891 1497 /*
1892 1498 * Chain for buf IO for large sector size disk drive targets
1893 1499 * with RMW needed with checksumming (PM disabled)
1894 1500 */
1895 1501 sd_mapblockaddr_iostart, /* Index: 31 */
1896 1502 sd_mapblocksize_iostart, /* Index: 32 */
1897 1503 sd_checksum_iostart, /* Index: 33 */
1898 1504 sd_core_iostart, /* Index: 34 */
1899 1505
1900 1506 };
1901 1507
1902 1508 /*
1903 1509 * Macros to locate the first function of each iostart chain in the
1904 1510 * sd_iostart_chain[] array. These are located by the index in the array.
1905 1511 */
1906 1512 #define SD_CHAIN_DISK_IOSTART 0
1907 1513 #define SD_CHAIN_DISK_IOSTART_NO_PM 3
1908 1514 #define SD_CHAIN_MSS_DISK_IOSTART 5
1909 1515 #define SD_CHAIN_RMMEDIA_IOSTART 5
1910 1516 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
1911 1517 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
1912 1518 #define SD_CHAIN_CHKSUM_IOSTART 12
1913 1519 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
1914 1520 #define SD_CHAIN_USCSI_CMD_IOSTART 19
1915 1521 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
1916 1522 #define SD_CHAIN_DIRECT_CMD_IOSTART 24
1917 1523 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25
1918 1524 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26
1919 1525 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
1920 1526
1921 1527
1922 1528 /*
1923 1529 * Table of function pointers for the iodone-side routines for the driver-
1924 1530 * internal layering mechanism. The calling sequence for iodone routines
1925 1531 * uses a decrementing table index, so the last routine called in a chain
1926 1532 * must be at the lowest array index location for that chain. The last
1927 1533 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs)
1928 1534 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering
1929 1535 * of the functions in an iodone side chain must correspond to the ordering
1930 1536 * of the iostart routines for that chain. Note that there is no iodone
1931 1537 * side routine that corresponds to sd_core_iostart(), so there is no
1932 1538 * entry in the table for this.
1933 1539 */
1934 1540
1935 1541 static sd_chain_t sd_iodone_chain[] = {
1936 1542
1937 1543 /* Chain for buf IO for disk drive targets (PM enabled) */
1938 1544 sd_buf_iodone, /* Index: 0 */
1939 1545 sd_mapblockaddr_iodone, /* Index: 1 */
1940 1546 sd_pm_iodone, /* Index: 2 */
1941 1547
1942 1548 /* Chain for buf IO for disk drive targets (PM disabled) */
1943 1549 sd_buf_iodone, /* Index: 3 */
1944 1550 sd_mapblockaddr_iodone, /* Index: 4 */
1945 1551
1946 1552 /*
1947 1553 * Chain for buf IO for removable-media or large sector size
1948 1554 * disk drive targets with RMW needed (PM enabled)
1949 1555 */
1950 1556 sd_buf_iodone, /* Index: 5 */
1951 1557 sd_mapblockaddr_iodone, /* Index: 6 */
1952 1558 sd_mapblocksize_iodone, /* Index: 7 */
1953 1559 sd_pm_iodone, /* Index: 8 */
1954 1560
1955 1561 /*
1956 1562 * Chain for buf IO for removable-media or large sector size
1957 1563 * disk drive targets with RMW needed (PM disabled)
1958 1564 */
1959 1565 sd_buf_iodone, /* Index: 9 */
1960 1566 sd_mapblockaddr_iodone, /* Index: 10 */
1961 1567 sd_mapblocksize_iodone, /* Index: 11 */
1962 1568
1963 1569 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1964 1570 sd_buf_iodone, /* Index: 12 */
1965 1571 sd_mapblockaddr_iodone, /* Index: 13 */
1966 1572 sd_checksum_iodone, /* Index: 14 */
1967 1573 sd_pm_iodone, /* Index: 15 */
1968 1574
1969 1575 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1970 1576 sd_buf_iodone, /* Index: 16 */
1971 1577 sd_mapblockaddr_iodone, /* Index: 17 */
1972 1578 sd_checksum_iodone, /* Index: 18 */
1973 1579
1974 1580 /* Chain for USCSI commands (non-checksum targets) */
1975 1581 sd_uscsi_iodone, /* Index: 19 */
1976 1582 sd_pm_iodone, /* Index: 20 */
1977 1583
1978 1584 /* Chain for USCSI commands (checksum targets) */
1979 1585 sd_uscsi_iodone, /* Index: 21 */
1980 1586 sd_checksum_uscsi_iodone, /* Index: 22 */
1981 1587 sd_pm_iodone, /* Index: 22 */
1982 1588
1983 1589 /* Chain for "direct" USCSI commands (all targets) */
1984 1590 sd_uscsi_iodone, /* Index: 24 */
1985 1591
1986 1592 /* Chain for "direct priority" USCSI commands (all targets) */
1987 1593 sd_uscsi_iodone, /* Index: 25 */
1988 1594
1989 1595 /*
1990 1596 * Chain for buf IO for large sector size disk drive targets
1991 1597 * with checksumming (PM enabled)
1992 1598 */
1993 1599 sd_buf_iodone, /* Index: 26 */
1994 1600 sd_mapblockaddr_iodone, /* Index: 27 */
1995 1601 sd_mapblocksize_iodone, /* Index: 28 */
1996 1602 sd_checksum_iodone, /* Index: 29 */
1997 1603 sd_pm_iodone, /* Index: 30 */
1998 1604
1999 1605 /*
2000 1606 * Chain for buf IO for large sector size disk drive targets
2001 1607 * with checksumming (PM disabled)
2002 1608 */
2003 1609 sd_buf_iodone, /* Index: 31 */
2004 1610 sd_mapblockaddr_iodone, /* Index: 32 */
2005 1611 sd_mapblocksize_iodone, /* Index: 33 */
2006 1612 sd_checksum_iodone, /* Index: 34 */
2007 1613 };
2008 1614
2009 1615
2010 1616 /*
2011 1617 * Macros to locate the "first" function in the sd_iodone_chain[] array for
2012 1618 * each iodone-side chain. These are located by the array index, but as the
2013 1619 * iodone side functions are called in a decrementing-index order, the
2014 1620 * highest index number in each chain must be specified (as these correspond
2015 1621 * to the first function in the iodone chain that will be called by the core
2016 1622 * at IO completion time).
2017 1623 */
2018 1624
2019 1625 #define SD_CHAIN_DISK_IODONE 2
2020 1626 #define SD_CHAIN_DISK_IODONE_NO_PM 4
2021 1627 #define SD_CHAIN_RMMEDIA_IODONE 8
2022 1628 #define SD_CHAIN_MSS_DISK_IODONE 8
2023 1629 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
2024 1630 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
2025 1631 #define SD_CHAIN_CHKSUM_IODONE 15
2026 1632 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
2027 1633 #define SD_CHAIN_USCSI_CMD_IODONE 20
2028 1634 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22
2029 1635 #define SD_CHAIN_DIRECT_CMD_IODONE 24
2030 1636 #define SD_CHAIN_PRIORITY_CMD_IODONE 25
2031 1637 #define SD_CHAIN_MSS_CHKSUM_IODONE 30
2032 1638 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
2033 1639
2034 1640
2035 1641
2036 1642 /*
2037 1643 * Array to map a layering chain index to the appropriate initpkt routine.
2038 1644 * The redundant entries are present so that the index used for accessing
2039 1645 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2040 1646 * with this table as well.
2041 1647 */
2042 1648 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **);
2043 1649
2044 1650 static sd_initpkt_t sd_initpkt_map[] = {
2045 1651
2046 1652 /* Chain for buf IO for disk drive targets (PM enabled) */
2047 1653 sd_initpkt_for_buf, /* Index: 0 */
2048 1654 sd_initpkt_for_buf, /* Index: 1 */
2049 1655 sd_initpkt_for_buf, /* Index: 2 */
2050 1656
2051 1657 /* Chain for buf IO for disk drive targets (PM disabled) */
2052 1658 sd_initpkt_for_buf, /* Index: 3 */
2053 1659 sd_initpkt_for_buf, /* Index: 4 */
2054 1660
2055 1661 /*
2056 1662 * Chain for buf IO for removable-media or large sector size
2057 1663 * disk drive targets (PM enabled)
2058 1664 */
2059 1665 sd_initpkt_for_buf, /* Index: 5 */
2060 1666 sd_initpkt_for_buf, /* Index: 6 */
2061 1667 sd_initpkt_for_buf, /* Index: 7 */
2062 1668 sd_initpkt_for_buf, /* Index: 8 */
2063 1669
2064 1670 /*
2065 1671 * Chain for buf IO for removable-media or large sector size
2066 1672 * disk drive targets (PM disabled)
2067 1673 */
2068 1674 sd_initpkt_for_buf, /* Index: 9 */
2069 1675 sd_initpkt_for_buf, /* Index: 10 */
2070 1676 sd_initpkt_for_buf, /* Index: 11 */
2071 1677
2072 1678 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2073 1679 sd_initpkt_for_buf, /* Index: 12 */
2074 1680 sd_initpkt_for_buf, /* Index: 13 */
2075 1681 sd_initpkt_for_buf, /* Index: 14 */
2076 1682 sd_initpkt_for_buf, /* Index: 15 */
2077 1683
2078 1684 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2079 1685 sd_initpkt_for_buf, /* Index: 16 */
2080 1686 sd_initpkt_for_buf, /* Index: 17 */
2081 1687 sd_initpkt_for_buf, /* Index: 18 */
2082 1688
2083 1689 /* Chain for USCSI commands (non-checksum targets) */
2084 1690 sd_initpkt_for_uscsi, /* Index: 19 */
2085 1691 sd_initpkt_for_uscsi, /* Index: 20 */
2086 1692
2087 1693 /* Chain for USCSI commands (checksum targets) */
2088 1694 sd_initpkt_for_uscsi, /* Index: 21 */
2089 1695 sd_initpkt_for_uscsi, /* Index: 22 */
2090 1696 sd_initpkt_for_uscsi, /* Index: 22 */
2091 1697
2092 1698 /* Chain for "direct" USCSI commands (all targets) */
2093 1699 sd_initpkt_for_uscsi, /* Index: 24 */
2094 1700
2095 1701 /* Chain for "direct priority" USCSI commands (all targets) */
2096 1702 sd_initpkt_for_uscsi, /* Index: 25 */
2097 1703
2098 1704 /*
2099 1705 * Chain for buf IO for large sector size disk drive targets
2100 1706 * with checksumming (PM enabled)
2101 1707 */
2102 1708 sd_initpkt_for_buf, /* Index: 26 */
2103 1709 sd_initpkt_for_buf, /* Index: 27 */
2104 1710 sd_initpkt_for_buf, /* Index: 28 */
2105 1711 sd_initpkt_for_buf, /* Index: 29 */
2106 1712 sd_initpkt_for_buf, /* Index: 30 */
2107 1713
2108 1714 /*
2109 1715 * Chain for buf IO for large sector size disk drive targets
2110 1716 * with checksumming (PM disabled)
2111 1717 */
2112 1718 sd_initpkt_for_buf, /* Index: 31 */
2113 1719 sd_initpkt_for_buf, /* Index: 32 */
2114 1720 sd_initpkt_for_buf, /* Index: 33 */
2115 1721 sd_initpkt_for_buf, /* Index: 34 */
2116 1722 };
2117 1723
2118 1724
2119 1725 /*
2120 1726 * Array to map a layering chain index to the appropriate destroypktpkt routine.
2121 1727 * The redundant entries are present so that the index used for accessing
2122 1728 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2123 1729 * with this table as well.
2124 1730 */
2125 1731 typedef void (*sd_destroypkt_t)(struct buf *);
2126 1732
2127 1733 static sd_destroypkt_t sd_destroypkt_map[] = {
2128 1734
2129 1735 /* Chain for buf IO for disk drive targets (PM enabled) */
2130 1736 sd_destroypkt_for_buf, /* Index: 0 */
2131 1737 sd_destroypkt_for_buf, /* Index: 1 */
2132 1738 sd_destroypkt_for_buf, /* Index: 2 */
2133 1739
2134 1740 /* Chain for buf IO for disk drive targets (PM disabled) */
2135 1741 sd_destroypkt_for_buf, /* Index: 3 */
2136 1742 sd_destroypkt_for_buf, /* Index: 4 */
2137 1743
2138 1744 /*
2139 1745 * Chain for buf IO for removable-media or large sector size
2140 1746 * disk drive targets (PM enabled)
2141 1747 */
2142 1748 sd_destroypkt_for_buf, /* Index: 5 */
2143 1749 sd_destroypkt_for_buf, /* Index: 6 */
2144 1750 sd_destroypkt_for_buf, /* Index: 7 */
2145 1751 sd_destroypkt_for_buf, /* Index: 8 */
2146 1752
2147 1753 /*
2148 1754 * Chain for buf IO for removable-media or large sector size
2149 1755 * disk drive targets (PM disabled)
2150 1756 */
2151 1757 sd_destroypkt_for_buf, /* Index: 9 */
2152 1758 sd_destroypkt_for_buf, /* Index: 10 */
2153 1759 sd_destroypkt_for_buf, /* Index: 11 */
2154 1760
2155 1761 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2156 1762 sd_destroypkt_for_buf, /* Index: 12 */
2157 1763 sd_destroypkt_for_buf, /* Index: 13 */
2158 1764 sd_destroypkt_for_buf, /* Index: 14 */
2159 1765 sd_destroypkt_for_buf, /* Index: 15 */
2160 1766
2161 1767 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2162 1768 sd_destroypkt_for_buf, /* Index: 16 */
2163 1769 sd_destroypkt_for_buf, /* Index: 17 */
2164 1770 sd_destroypkt_for_buf, /* Index: 18 */
2165 1771
2166 1772 /* Chain for USCSI commands (non-checksum targets) */
2167 1773 sd_destroypkt_for_uscsi, /* Index: 19 */
2168 1774 sd_destroypkt_for_uscsi, /* Index: 20 */
2169 1775
2170 1776 /* Chain for USCSI commands (checksum targets) */
2171 1777 sd_destroypkt_for_uscsi, /* Index: 21 */
2172 1778 sd_destroypkt_for_uscsi, /* Index: 22 */
2173 1779 sd_destroypkt_for_uscsi, /* Index: 22 */
2174 1780
2175 1781 /* Chain for "direct" USCSI commands (all targets) */
2176 1782 sd_destroypkt_for_uscsi, /* Index: 24 */
2177 1783
2178 1784 /* Chain for "direct priority" USCSI commands (all targets) */
2179 1785 sd_destroypkt_for_uscsi, /* Index: 25 */
2180 1786
2181 1787 /*
2182 1788 * Chain for buf IO for large sector size disk drive targets
2183 1789 * with checksumming (PM disabled)
2184 1790 */
2185 1791 sd_destroypkt_for_buf, /* Index: 26 */
2186 1792 sd_destroypkt_for_buf, /* Index: 27 */
2187 1793 sd_destroypkt_for_buf, /* Index: 28 */
2188 1794 sd_destroypkt_for_buf, /* Index: 29 */
2189 1795 sd_destroypkt_for_buf, /* Index: 30 */
2190 1796
2191 1797 /*
2192 1798 * Chain for buf IO for large sector size disk drive targets
2193 1799 * with checksumming (PM enabled)
2194 1800 */
2195 1801 sd_destroypkt_for_buf, /* Index: 31 */
2196 1802 sd_destroypkt_for_buf, /* Index: 32 */
2197 1803 sd_destroypkt_for_buf, /* Index: 33 */
2198 1804 sd_destroypkt_for_buf, /* Index: 34 */
2199 1805 };
2200 1806
2201 1807
2202 1808
2203 1809 /*
2204 1810 * Array to map a layering chain index to the appropriate chain "type".
2205 1811 * The chain type indicates a specific property/usage of the chain.
2206 1812 * The redundant entries are present so that the index used for accessing
2207 1813 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2208 1814 * with this table as well.
2209 1815 */
2210 1816
2211 1817 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */
2212 1818 #define SD_CHAIN_BUFIO 1 /* regular buf IO */
2213 1819 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */
2214 1820 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */
2215 1821 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */
2216 1822 /* (for error recovery) */
2217 1823
2218 1824 static int sd_chain_type_map[] = {
2219 1825
2220 1826 /* Chain for buf IO for disk drive targets (PM enabled) */
2221 1827 SD_CHAIN_BUFIO, /* Index: 0 */
2222 1828 SD_CHAIN_BUFIO, /* Index: 1 */
2223 1829 SD_CHAIN_BUFIO, /* Index: 2 */
2224 1830
2225 1831 /* Chain for buf IO for disk drive targets (PM disabled) */
2226 1832 SD_CHAIN_BUFIO, /* Index: 3 */
2227 1833 SD_CHAIN_BUFIO, /* Index: 4 */
2228 1834
2229 1835 /*
2230 1836 * Chain for buf IO for removable-media or large sector size
2231 1837 * disk drive targets (PM enabled)
2232 1838 */
2233 1839 SD_CHAIN_BUFIO, /* Index: 5 */
2234 1840 SD_CHAIN_BUFIO, /* Index: 6 */
2235 1841 SD_CHAIN_BUFIO, /* Index: 7 */
2236 1842 SD_CHAIN_BUFIO, /* Index: 8 */
2237 1843
2238 1844 /*
2239 1845 * Chain for buf IO for removable-media or large sector size
2240 1846 * disk drive targets (PM disabled)
2241 1847 */
2242 1848 SD_CHAIN_BUFIO, /* Index: 9 */
2243 1849 SD_CHAIN_BUFIO, /* Index: 10 */
2244 1850 SD_CHAIN_BUFIO, /* Index: 11 */
2245 1851
2246 1852 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2247 1853 SD_CHAIN_BUFIO, /* Index: 12 */
2248 1854 SD_CHAIN_BUFIO, /* Index: 13 */
2249 1855 SD_CHAIN_BUFIO, /* Index: 14 */
2250 1856 SD_CHAIN_BUFIO, /* Index: 15 */
2251 1857
2252 1858 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2253 1859 SD_CHAIN_BUFIO, /* Index: 16 */
2254 1860 SD_CHAIN_BUFIO, /* Index: 17 */
2255 1861 SD_CHAIN_BUFIO, /* Index: 18 */
2256 1862
2257 1863 /* Chain for USCSI commands (non-checksum targets) */
2258 1864 SD_CHAIN_USCSI, /* Index: 19 */
2259 1865 SD_CHAIN_USCSI, /* Index: 20 */
2260 1866
2261 1867 /* Chain for USCSI commands (checksum targets) */
2262 1868 SD_CHAIN_USCSI, /* Index: 21 */
2263 1869 SD_CHAIN_USCSI, /* Index: 22 */
2264 1870 SD_CHAIN_USCSI, /* Index: 23 */
2265 1871
2266 1872 /* Chain for "direct" USCSI commands (all targets) */
2267 1873 SD_CHAIN_DIRECT, /* Index: 24 */
2268 1874
2269 1875 /* Chain for "direct priority" USCSI commands (all targets) */
2270 1876 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */
2271 1877
2272 1878 /*
2273 1879 * Chain for buf IO for large sector size disk drive targets
2274 1880 * with checksumming (PM enabled)
2275 1881 */
2276 1882 SD_CHAIN_BUFIO, /* Index: 26 */
2277 1883 SD_CHAIN_BUFIO, /* Index: 27 */
2278 1884 SD_CHAIN_BUFIO, /* Index: 28 */
2279 1885 SD_CHAIN_BUFIO, /* Index: 29 */
2280 1886 SD_CHAIN_BUFIO, /* Index: 30 */
2281 1887
2282 1888 /*
2283 1889 * Chain for buf IO for large sector size disk drive targets
2284 1890 * with checksumming (PM disabled)
2285 1891 */
2286 1892 SD_CHAIN_BUFIO, /* Index: 31 */
2287 1893 SD_CHAIN_BUFIO, /* Index: 32 */
2288 1894 SD_CHAIN_BUFIO, /* Index: 33 */
2289 1895 SD_CHAIN_BUFIO, /* Index: 34 */
2290 1896 };
2291 1897
2292 1898
2293 1899 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */
2294 1900 #define SD_IS_BUFIO(xp) \
2295 1901 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
2296 1902
2297 1903 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */
2298 1904 #define SD_IS_DIRECT_PRIORITY(xp) \
2299 1905 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
2300 1906
2301 1907
2302 1908
2303 1909 /*
2304 1910 * Struct, array, and macros to map a specific chain to the appropriate
2305 1911 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays.
2306 1912 *
2307 1913 * The sd_chain_index_map[] array is used at attach time to set the various
2308 1914 * un_xxx_chain type members of the sd_lun softstate to the specific layering
2309 1915 * chain to be used with the instance. This allows different instances to use
2310 1916 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart
2311 1917 * and xb_chain_iodone index values in the sd_xbuf are initialized to these
2312 1918 * values at sd_xbuf init time, this allows (1) layering chains may be changed
2313 1919 * dynamically & without the use of locking; and (2) a layer may update the
2314 1920 * xb_chain_io[start|done] member in a given xbuf with its current index value,
2315 1921 * to allow for deferred processing of an IO within the same chain from a
2316 1922 * different execution context.
2317 1923 */
2318 1924
2319 1925 struct sd_chain_index {
2320 1926 int sci_iostart_index;
2321 1927 int sci_iodone_index;
2322 1928 };
2323 1929
2324 1930 static struct sd_chain_index sd_chain_index_map[] = {
2325 1931 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE },
2326 1932 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM },
2327 1933 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE },
2328 1934 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM },
2329 1935 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE },
2330 1936 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM },
2331 1937 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE },
2332 1938 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE },
2333 1939 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE },
2334 1940 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE },
2335 1941 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE },
2336 1942 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM },
2337 1943
2338 1944 };
2339 1945
2340 1946
2341 1947 /*
2342 1948 * The following are indexes into the sd_chain_index_map[] array.
2343 1949 */
2344 1950
2345 1951 /* un->un_buf_chain_type must be set to one of these */
2346 1952 #define SD_CHAIN_INFO_DISK 0
2347 1953 #define SD_CHAIN_INFO_DISK_NO_PM 1
2348 1954 #define SD_CHAIN_INFO_RMMEDIA 2
2349 1955 #define SD_CHAIN_INFO_MSS_DISK 2
2350 1956 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
2351 1957 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
2352 1958 #define SD_CHAIN_INFO_CHKSUM 4
2353 1959 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5
2354 1960 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
2355 1961 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
2356 1962
2357 1963 /* un->un_uscsi_chain_type must be set to one of these */
2358 1964 #define SD_CHAIN_INFO_USCSI_CMD 6
2359 1965 /* USCSI with PM disabled is the same as DIRECT */
2360 1966 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
2361 1967 #define SD_CHAIN_INFO_USCSI_CHKSUM 7
2362 1968
2363 1969 /* un->un_direct_chain_type must be set to one of these */
2364 1970 #define SD_CHAIN_INFO_DIRECT_CMD 8
|
↓ open down ↓ |
635 lines elided |
↑ open up ↑ |
2365 1971
2366 1972 /* un->un_priority_chain_type must be set to one of these */
2367 1973 #define SD_CHAIN_INFO_PRIORITY_CMD 9
2368 1974
2369 1975 /* size for devid inquiries */
2370 1976 #define MAX_INQUIRY_SIZE 0xF0
2371 1977
2372 1978 /*
2373 1979 * Macros used by functions to pass a given buf(9S) struct along to the
2374 1980 * next function in the layering chain for further processing.
2375 - *
2376 - * In the following macros, passing more than three arguments to the called
2377 - * routines causes the optimizer for the SPARC compiler to stop doing tail
2378 - * call elimination which results in significant performance degradation.
2379 1981 */
2380 1982 #define SD_BEGIN_IOSTART(index, un, bp) \
2381 1983 ((*(sd_iostart_chain[index]))(index, un, bp))
2382 1984
2383 1985 #define SD_BEGIN_IODONE(index, un, bp) \
2384 1986 ((*(sd_iodone_chain[index]))(index, un, bp))
2385 1987
2386 1988 #define SD_NEXT_IOSTART(index, un, bp) \
2387 1989 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2388 1990
2389 1991 #define SD_NEXT_IODONE(index, un, bp) \
2390 1992 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
2391 1993
2392 1994 /*
2393 1995 * Function: _init
2394 1996 *
2395 1997 * Description: This is the driver _init(9E) entry point.
2396 1998 *
2397 1999 * Return Code: Returns the value from mod_install(9F) or
2398 2000 * ddi_soft_state_init(9F) as appropriate.
2399 2001 *
2400 2002 * Context: Called when driver module loaded.
2401 2003 */
2402 2004
2403 2005 int
2404 2006 _init(void)
2405 2007 {
2406 2008 int err;
2407 2009
2408 2010 /* establish driver name from module name */
2409 2011 sd_label = (char *)mod_modname(&modlinkage);
2410 2012
2411 2013 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun),
2412 2014 SD_MAXUNIT);
2413 2015 if (err != 0) {
2414 2016 return (err);
2415 2017 }
2416 2018
2417 2019 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL);
2418 2020 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL);
2419 2021 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL);
2420 2022
2421 2023 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL);
2422 2024 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL);
2423 2025 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL);
2424 2026
2425 2027 /*
2426 2028 * it's ok to init here even for fibre device
2427 2029 */
2428 2030 sd_scsi_probe_cache_init();
2429 2031
2430 2032 sd_scsi_target_lun_init();
2431 2033
2432 2034 /*
2433 2035 * Creating taskq before mod_install ensures that all callers (threads)
2434 2036 * that enter the module after a successful mod_install encounter
2435 2037 * a valid taskq.
2436 2038 */
2437 2039 sd_taskq_create();
2438 2040
2439 2041 err = mod_install(&modlinkage);
2440 2042 if (err != 0) {
2441 2043 /* delete taskq if install fails */
2442 2044 sd_taskq_delete();
2443 2045
2444 2046 mutex_destroy(&sd_detach_mutex);
2445 2047 mutex_destroy(&sd_log_mutex);
2446 2048 mutex_destroy(&sd_label_mutex);
2447 2049
2448 2050 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2449 2051 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2450 2052 cv_destroy(&sd_tr.srq_inprocess_cv);
2451 2053
2452 2054 sd_scsi_probe_cache_fini();
2453 2055
2454 2056 sd_scsi_target_lun_fini();
2455 2057
2456 2058 ddi_soft_state_fini(&sd_state);
2457 2059
2458 2060 return (err);
2459 2061 }
2460 2062
2461 2063 return (err);
2462 2064 }
2463 2065
2464 2066
2465 2067 /*
2466 2068 * Function: _fini
2467 2069 *
2468 2070 * Description: This is the driver _fini(9E) entry point.
2469 2071 *
2470 2072 * Return Code: Returns the value from mod_remove(9F)
2471 2073 *
2472 2074 * Context: Called when driver module is unloaded.
2473 2075 */
2474 2076
2475 2077 int
2476 2078 _fini(void)
2477 2079 {
2478 2080 int err;
2479 2081
2480 2082 if ((err = mod_remove(&modlinkage)) != 0) {
2481 2083 return (err);
2482 2084 }
2483 2085
2484 2086 sd_taskq_delete();
2485 2087
2486 2088 mutex_destroy(&sd_detach_mutex);
2487 2089 mutex_destroy(&sd_log_mutex);
2488 2090 mutex_destroy(&sd_label_mutex);
2489 2091 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2490 2092
2491 2093 sd_scsi_probe_cache_fini();
2492 2094
2493 2095 sd_scsi_target_lun_fini();
2494 2096
2495 2097 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2496 2098 cv_destroy(&sd_tr.srq_inprocess_cv);
2497 2099
2498 2100 ddi_soft_state_fini(&sd_state);
2499 2101
2500 2102 return (err);
2501 2103 }
2502 2104
2503 2105
2504 2106 /*
2505 2107 * Function: _info
2506 2108 *
2507 2109 * Description: This is the driver _info(9E) entry point.
2508 2110 *
2509 2111 * Arguments: modinfop - pointer to the driver modinfo structure
2510 2112 *
2511 2113 * Return Code: Returns the value from mod_info(9F).
2512 2114 *
2513 2115 * Context: Kernel thread context
2514 2116 */
2515 2117
2516 2118 int
2517 2119 _info(struct modinfo *modinfop)
2518 2120 {
2519 2121 return (mod_info(&modlinkage, modinfop));
2520 2122 }
2521 2123
2522 2124
2523 2125 /*
2524 2126 * The following routines implement the driver message logging facility.
2525 2127 * They provide component- and level- based debug output filtering.
2526 2128 * Output may also be restricted to messages for a single instance by
2527 2129 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set
2528 2130 * to NULL, then messages for all instances are printed.
2529 2131 *
2530 2132 * These routines have been cloned from each other due to the language
2531 2133 * constraints of macros and variable argument list processing.
2532 2134 */
2533 2135
2534 2136
2535 2137 /*
2536 2138 * Function: sd_log_err
2537 2139 *
2538 2140 * Description: This routine is called by the SD_ERROR macro for debug
2539 2141 * logging of error conditions.
2540 2142 *
2541 2143 * Arguments: comp - driver component being logged
2542 2144 * dev - pointer to driver info structure
2543 2145 * fmt - error string and format to be logged
2544 2146 */
2545 2147
2546 2148 static void
2547 2149 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...)
2548 2150 {
2549 2151 va_list ap;
2550 2152 dev_info_t *dev;
2551 2153
2552 2154 ASSERT(un != NULL);
2553 2155 dev = SD_DEVINFO(un);
2554 2156 ASSERT(dev != NULL);
2555 2157
2556 2158 /*
2557 2159 * Filter messages based on the global component and level masks.
2558 2160 * Also print if un matches the value of sd_debug_un, or if
2559 2161 * sd_debug_un is set to NULL.
2560 2162 */
|
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
2561 2163 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
2562 2164 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2563 2165 mutex_enter(&sd_log_mutex);
2564 2166 va_start(ap, fmt);
2565 2167 (void) vsprintf(sd_log_buf, fmt, ap);
2566 2168 va_end(ap);
2567 2169 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2568 2170 mutex_exit(&sd_log_mutex);
2569 2171 }
2570 2172 #ifdef SD_FAULT_INJECTION
2571 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2572 2173 if (un->sd_injection_mask & comp) {
2573 2174 mutex_enter(&sd_log_mutex);
2574 2175 va_start(ap, fmt);
2575 2176 (void) vsprintf(sd_log_buf, fmt, ap);
2576 2177 va_end(ap);
2577 2178 sd_injection_log(sd_log_buf, un);
2578 2179 mutex_exit(&sd_log_mutex);
2579 2180 }
2580 2181 #endif
2581 2182 }
2582 2183
2583 2184
2584 2185 /*
2585 2186 * Function: sd_log_info
2586 2187 *
2587 2188 * Description: This routine is called by the SD_INFO macro for debug
2588 2189 * logging of general purpose informational conditions.
2589 2190 *
2590 2191 * Arguments: comp - driver component being logged
2591 2192 * dev - pointer to driver info structure
2592 2193 * fmt - info string and format to be logged
2593 2194 */
2594 2195
2595 2196 static void
2596 2197 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...)
2597 2198 {
2598 2199 va_list ap;
2599 2200 dev_info_t *dev;
2600 2201
2601 2202 ASSERT(un != NULL);
2602 2203 dev = SD_DEVINFO(un);
2603 2204 ASSERT(dev != NULL);
2604 2205
2605 2206 /*
2606 2207 * Filter messages based on the global component and level masks.
2607 2208 * Also print if un matches the value of sd_debug_un, or if
2608 2209 * sd_debug_un is set to NULL.
2609 2210 */
2610 2211 if ((sd_component_mask & component) &&
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2611 2212 (sd_level_mask & SD_LOGMASK_INFO) &&
2612 2213 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2613 2214 mutex_enter(&sd_log_mutex);
2614 2215 va_start(ap, fmt);
2615 2216 (void) vsprintf(sd_log_buf, fmt, ap);
2616 2217 va_end(ap);
2617 2218 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2618 2219 mutex_exit(&sd_log_mutex);
2619 2220 }
2620 2221 #ifdef SD_FAULT_INJECTION
2621 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2622 2222 if (un->sd_injection_mask & component) {
2623 2223 mutex_enter(&sd_log_mutex);
2624 2224 va_start(ap, fmt);
2625 2225 (void) vsprintf(sd_log_buf, fmt, ap);
2626 2226 va_end(ap);
2627 2227 sd_injection_log(sd_log_buf, un);
2628 2228 mutex_exit(&sd_log_mutex);
2629 2229 }
2630 2230 #endif
2631 2231 }
2632 2232
2633 2233
2634 2234 /*
2635 2235 * Function: sd_log_trace
2636 2236 *
2637 2237 * Description: This routine is called by the SD_TRACE macro for debug
2638 2238 * logging of trace conditions (i.e. function entry/exit).
2639 2239 *
2640 2240 * Arguments: comp - driver component being logged
2641 2241 * dev - pointer to driver info structure
2642 2242 * fmt - trace string and format to be logged
2643 2243 */
2644 2244
2645 2245 static void
2646 2246 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...)
2647 2247 {
2648 2248 va_list ap;
2649 2249 dev_info_t *dev;
2650 2250
2651 2251 ASSERT(un != NULL);
2652 2252 dev = SD_DEVINFO(un);
2653 2253 ASSERT(dev != NULL);
2654 2254
2655 2255 /*
2656 2256 * Filter messages based on the global component and level masks.
2657 2257 * Also print if un matches the value of sd_debug_un, or if
2658 2258 * sd_debug_un is set to NULL.
2659 2259 */
2660 2260 if ((sd_component_mask & component) &&
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2661 2261 (sd_level_mask & SD_LOGMASK_TRACE) &&
2662 2262 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2663 2263 mutex_enter(&sd_log_mutex);
2664 2264 va_start(ap, fmt);
2665 2265 (void) vsprintf(sd_log_buf, fmt, ap);
2666 2266 va_end(ap);
2667 2267 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2668 2268 mutex_exit(&sd_log_mutex);
2669 2269 }
2670 2270 #ifdef SD_FAULT_INJECTION
2671 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2672 2271 if (un->sd_injection_mask & component) {
2673 2272 mutex_enter(&sd_log_mutex);
2674 2273 va_start(ap, fmt);
2675 2274 (void) vsprintf(sd_log_buf, fmt, ap);
2676 2275 va_end(ap);
2677 2276 sd_injection_log(sd_log_buf, un);
2678 2277 mutex_exit(&sd_log_mutex);
2679 2278 }
2680 2279 #endif
2681 2280 }
2682 2281
2683 2282
2684 2283 /*
2685 2284 * Function: sdprobe
2686 2285 *
2687 2286 * Description: This is the driver probe(9e) entry point function.
2688 2287 *
2689 2288 * Arguments: devi - opaque device info handle
2690 2289 *
2691 2290 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful.
2692 2291 * DDI_PROBE_FAILURE: If the probe failed.
2693 2292 * DDI_PROBE_PARTIAL: If the instance is not present now,
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
2694 2293 * but may be present in the future.
2695 2294 */
2696 2295
2697 2296 static int
2698 2297 sdprobe(dev_info_t *devi)
2699 2298 {
2700 2299 struct scsi_device *devp;
2701 2300 int rval;
2702 2301 int instance = ddi_get_instance(devi);
2703 2302
2704 - /*
2705 - * if it wasn't for pln, sdprobe could actually be nulldev
2706 - * in the "__fibre" case.
2707 - */
2708 2303 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
2709 2304 return (DDI_PROBE_DONTCARE);
2710 2305 }
2711 2306
2712 2307 devp = ddi_get_driver_private(devi);
2713 2308
2714 2309 if (devp == NULL) {
2715 2310 /* Ooops... nexus driver is mis-configured... */
2716 2311 return (DDI_PROBE_FAILURE);
2717 2312 }
2718 2313
2719 2314 if (ddi_get_soft_state(sd_state, instance) != NULL) {
2720 2315 return (DDI_PROBE_PARTIAL);
2721 2316 }
2722 2317
2723 2318 /*
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
2724 2319 * Call the SCSA utility probe routine to see if we actually
2725 2320 * have a target at this SCSI nexus.
2726 2321 */
2727 2322 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
2728 2323 case SCSIPROBE_EXISTS:
2729 2324 switch (devp->sd_inq->inq_dtype) {
2730 2325 case DTYPE_DIRECT:
2731 2326 rval = DDI_PROBE_SUCCESS;
2732 2327 break;
2733 2328 case DTYPE_RODIRECT:
2734 - /* CDs etc. Can be removable media */
2329 + /* CDs etc. Can be removable media. */
2735 2330 rval = DDI_PROBE_SUCCESS;
2736 2331 break;
2737 2332 case DTYPE_OPTICAL:
2738 2333 /*
2739 - * Rewritable optical driver HP115AA
2740 - * Can also be removable media
2334 + * Rewritable optical driver HP115AA.
2335 + * Can also be removable media.
2741 2336 */
2742 -
2743 - /*
2744 - * Do not attempt to bind to DTYPE_OPTICAL if
2745 - * pre solaris 9 sparc sd behavior is required
2746 - *
2747 - * If first time through and sd_dtype_optical_bind
2748 - * has not been set in /etc/system check properties
2749 - */
2750 -
2751 - if (sd_dtype_optical_bind < 0) {
2752 - sd_dtype_optical_bind = ddi_prop_get_int
2753 - (DDI_DEV_T_ANY, devi, 0,
2754 - "optical-device-bind", 1);
2755 - }
2756 -
2757 - if (sd_dtype_optical_bind == 0) {
2758 - rval = DDI_PROBE_FAILURE;
2759 - } else {
2760 - rval = DDI_PROBE_SUCCESS;
2761 - }
2337 + rval = DDI_PROBE_SUCCESS;
2762 2338 break;
2763 -
2764 2339 case DTYPE_NOTPRESENT:
2765 2340 default:
2766 2341 rval = DDI_PROBE_FAILURE;
2767 2342 break;
2768 2343 }
2769 2344 break;
2770 2345 default:
2771 2346 rval = DDI_PROBE_PARTIAL;
2772 2347 break;
2773 2348 }
2774 2349
2775 2350 /*
2776 2351 * This routine checks for resource allocation prior to freeing,
2777 2352 * so it will take care of the "smart probing" case where a
2778 2353 * scsi_probe() may or may not have been issued and will *not*
2779 2354 * free previously-freed resources.
2780 2355 */
2781 2356 scsi_unprobe(devp);
2782 2357 return (rval);
2783 2358 }
2784 2359
2785 2360
2786 2361 /*
2787 2362 * Function: sdinfo
2788 2363 *
2789 2364 * Description: This is the driver getinfo(9e) entry point function.
2790 2365 * Given the device number, return the devinfo pointer from
2791 2366 * the scsi_device structure or the instance number
2792 2367 * associated with the dev_t.
2793 2368 *
2794 2369 * Arguments: dip - pointer to device info structure
2795 2370 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO,
2796 2371 * DDI_INFO_DEVT2INSTANCE)
2797 2372 * arg - driver dev_t
2798 2373 * resultp - user buffer for request response
2799 2374 *
2800 2375 * Return Code: DDI_SUCCESS
2801 2376 * DDI_FAILURE
2802 2377 */
2803 2378 /* ARGSUSED */
2804 2379 static int
2805 2380 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2806 2381 {
2807 2382 struct sd_lun *un;
2808 2383 dev_t dev;
2809 2384 int instance;
2810 2385 int error;
2811 2386
2812 2387 switch (infocmd) {
2813 2388 case DDI_INFO_DEVT2DEVINFO:
2814 2389 dev = (dev_t)arg;
2815 2390 instance = SDUNIT(dev);
2816 2391 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
2817 2392 return (DDI_FAILURE);
2818 2393 }
2819 2394 *result = (void *) SD_DEVINFO(un);
2820 2395 error = DDI_SUCCESS;
2821 2396 break;
2822 2397 case DDI_INFO_DEVT2INSTANCE:
2823 2398 dev = (dev_t)arg;
2824 2399 instance = SDUNIT(dev);
2825 2400 *result = (void *)(uintptr_t)instance;
2826 2401 error = DDI_SUCCESS;
2827 2402 break;
2828 2403 default:
2829 2404 error = DDI_FAILURE;
2830 2405 }
2831 2406 return (error);
2832 2407 }
2833 2408
2834 2409 /*
2835 2410 * Function: sd_prop_op
2836 2411 *
2837 2412 * Description: This is the driver prop_op(9e) entry point function.
2838 2413 * Return the number of blocks for the partition in question
2839 2414 * or forward the request to the property facilities.
2840 2415 *
2841 2416 * Arguments: dev - device number
2842 2417 * dip - pointer to device info structure
2843 2418 * prop_op - property operator
2844 2419 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent
2845 2420 * name - pointer to property name
2846 2421 * valuep - pointer or address of the user buffer
2847 2422 * lengthp - property length
2848 2423 *
2849 2424 * Return Code: DDI_PROP_SUCCESS
2850 2425 * DDI_PROP_NOT_FOUND
2851 2426 * DDI_PROP_UNDEFINED
2852 2427 * DDI_PROP_NO_MEMORY
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
2853 2428 * DDI_PROP_BUF_TOO_SMALL
2854 2429 */
2855 2430
2856 2431 static int
2857 2432 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
2858 2433 char *name, caddr_t valuep, int *lengthp)
2859 2434 {
2860 2435 struct sd_lun *un;
2861 2436
2862 2437 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
2863 - return (ddi_prop_op(dev, dip, prop_op, mod_flags,
2864 - name, valuep, lengthp));
2438 + goto fallback;
2865 2439
2440 + mutex_enter(SD_MUTEX(un));
2441 + while ((un->un_state == SD_STATE_ATTACHING))
2442 + cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
2443 +
2444 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
2445 + mutex_exit(SD_MUTEX(un));
2446 + goto fallback;
2447 + }
2448 + mutex_exit(SD_MUTEX(un));
2449 +
2866 2450 return (cmlb_prop_op(un->un_cmlbhandle,
2867 2451 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
2868 2452 SDPART(dev), (void *)SD_PATH_DIRECT));
2453 +
2454 +fallback:
2455 + return (ddi_prop_op(dev, dip, prop_op, mod_flags, name, valuep,
2456 + lengthp));
2869 2457 }
2870 2458
2871 2459 /*
2872 2460 * The following functions are for smart probing:
2873 2461 * sd_scsi_probe_cache_init()
2874 2462 * sd_scsi_probe_cache_fini()
2875 2463 * sd_scsi_clear_probe_cache()
2876 2464 * sd_scsi_probe_with_cache()
2877 2465 */
2878 2466
2879 2467 /*
2880 2468 * Function: sd_scsi_probe_cache_init
2881 2469 *
2882 2470 * Description: Initializes the probe response cache mutex and head pointer.
2883 2471 *
2884 2472 * Context: Kernel thread context
2885 2473 */
2886 2474
2887 2475 static void
2888 2476 sd_scsi_probe_cache_init(void)
2889 2477 {
2890 2478 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL);
2891 2479 sd_scsi_probe_cache_head = NULL;
2892 2480 }
2893 2481
2894 2482
2895 2483 /*
2896 2484 * Function: sd_scsi_probe_cache_fini
2897 2485 *
2898 2486 * Description: Frees all resources associated with the probe response cache.
2899 2487 *
2900 2488 * Context: Kernel thread context
2901 2489 */
2902 2490
2903 2491 static void
2904 2492 sd_scsi_probe_cache_fini(void)
2905 2493 {
2906 2494 struct sd_scsi_probe_cache *cp;
2907 2495 struct sd_scsi_probe_cache *ncp;
2908 2496
2909 2497 /* Clean up our smart probing linked list */
2910 2498 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) {
2911 2499 ncp = cp->next;
2912 2500 kmem_free(cp, sizeof (struct sd_scsi_probe_cache));
2913 2501 }
2914 2502 sd_scsi_probe_cache_head = NULL;
2915 2503 mutex_destroy(&sd_scsi_probe_cache_mutex);
2916 2504 }
2917 2505
2918 2506
2919 2507 /*
2920 2508 * Function: sd_scsi_clear_probe_cache
2921 2509 *
2922 2510 * Description: This routine clears the probe response cache. This is
2923 2511 * done when open() returns ENXIO so that when deferred
2924 2512 * attach is attempted (possibly after a device has been
2925 2513 * turned on) we will retry the probe. Since we don't know
2926 2514 * which target we failed to open, we just clear the
2927 2515 * entire cache.
2928 2516 *
2929 2517 * Context: Kernel thread context
2930 2518 */
2931 2519
2932 2520 static void
2933 2521 sd_scsi_clear_probe_cache(void)
2934 2522 {
2935 2523 struct sd_scsi_probe_cache *cp;
2936 2524 int i;
2937 2525
2938 2526 mutex_enter(&sd_scsi_probe_cache_mutex);
2939 2527 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2940 2528 /*
2941 2529 * Reset all entries to SCSIPROBE_EXISTS. This will
2942 2530 * force probing to be performed the next time
2943 2531 * sd_scsi_probe_with_cache is called.
2944 2532 */
2945 2533 for (i = 0; i < NTARGETS_WIDE; i++) {
2946 2534 cp->cache[i] = SCSIPROBE_EXISTS;
2947 2535 }
2948 2536 }
2949 2537 mutex_exit(&sd_scsi_probe_cache_mutex);
2950 2538 }
2951 2539
2952 2540
2953 2541 /*
2954 2542 * Function: sd_scsi_probe_with_cache
2955 2543 *
2956 2544 * Description: This routine implements support for a scsi device probe
2957 2545 * with cache. The driver maintains a cache of the target
2958 2546 * responses to scsi probes. If we get no response from a
2959 2547 * target during a probe inquiry, we remember that, and we
2960 2548 * avoid additional calls to scsi_probe on non-zero LUNs
2961 2549 * on the same target until the cache is cleared. By doing
2962 2550 * so we avoid the 1/4 sec selection timeout for nonzero
2963 2551 * LUNs. lun0 of a target is always probed.
2964 2552 *
2965 2553 * Arguments: devp - Pointer to a scsi_device(9S) structure
2966 2554 * waitfunc - indicates what the allocator routines should
2967 2555 * do when resources are not available. This value
2968 2556 * is passed on to scsi_probe() when that routine
2969 2557 * is called.
2970 2558 *
2971 2559 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache;
2972 2560 * otherwise the value returned by scsi_probe(9F).
2973 2561 *
2974 2562 * Context: Kernel thread context
2975 2563 */
2976 2564
2977 2565 static int
2978 2566 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)())
2979 2567 {
2980 2568 struct sd_scsi_probe_cache *cp;
2981 2569 dev_info_t *pdip = ddi_get_parent(devp->sd_dev);
2982 2570 int lun, tgt;
2983 2571
2984 2572 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
2985 2573 SCSI_ADDR_PROP_LUN, 0);
2986 2574 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
2987 2575 SCSI_ADDR_PROP_TARGET, -1);
2988 2576
2989 2577 /* Make sure caching enabled and target in range */
2990 2578 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) {
2991 2579 /* do it the old way (no cache) */
2992 2580 return (scsi_probe(devp, waitfn));
2993 2581 }
2994 2582
2995 2583 mutex_enter(&sd_scsi_probe_cache_mutex);
2996 2584
2997 2585 /* Find the cache for this scsi bus instance */
2998 2586 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2999 2587 if (cp->pdip == pdip) {
3000 2588 break;
3001 2589 }
3002 2590 }
3003 2591
3004 2592 /* If we can't find a cache for this pdip, create one */
3005 2593 if (cp == NULL) {
3006 2594 int i;
3007 2595
3008 2596 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache),
3009 2597 KM_SLEEP);
3010 2598 cp->pdip = pdip;
3011 2599 cp->next = sd_scsi_probe_cache_head;
3012 2600 sd_scsi_probe_cache_head = cp;
3013 2601 for (i = 0; i < NTARGETS_WIDE; i++) {
3014 2602 cp->cache[i] = SCSIPROBE_EXISTS;
3015 2603 }
3016 2604 }
3017 2605
3018 2606 mutex_exit(&sd_scsi_probe_cache_mutex);
3019 2607
3020 2608 /* Recompute the cache for this target if LUN zero */
3021 2609 if (lun == 0) {
3022 2610 cp->cache[tgt] = SCSIPROBE_EXISTS;
3023 2611 }
3024 2612
3025 2613 /* Don't probe if cache remembers a NORESP from a previous LUN. */
3026 2614 if (cp->cache[tgt] != SCSIPROBE_EXISTS) {
3027 2615 return (SCSIPROBE_NORESP);
3028 2616 }
3029 2617
3030 2618 /* Do the actual probe; save & return the result */
3031 2619 return (cp->cache[tgt] = scsi_probe(devp, waitfn));
3032 2620 }
3033 2621
3034 2622
3035 2623 /*
3036 2624 * Function: sd_scsi_target_lun_init
3037 2625 *
3038 2626 * Description: Initializes the attached lun chain mutex and head pointer.
3039 2627 *
3040 2628 * Context: Kernel thread context
3041 2629 */
3042 2630
3043 2631 static void
3044 2632 sd_scsi_target_lun_init(void)
3045 2633 {
3046 2634 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL);
3047 2635 sd_scsi_target_lun_head = NULL;
3048 2636 }
3049 2637
3050 2638
3051 2639 /*
3052 2640 * Function: sd_scsi_target_lun_fini
3053 2641 *
3054 2642 * Description: Frees all resources associated with the attached lun
3055 2643 * chain
3056 2644 *
3057 2645 * Context: Kernel thread context
3058 2646 */
3059 2647
3060 2648 static void
3061 2649 sd_scsi_target_lun_fini(void)
3062 2650 {
3063 2651 struct sd_scsi_hba_tgt_lun *cp;
3064 2652 struct sd_scsi_hba_tgt_lun *ncp;
3065 2653
3066 2654 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) {
3067 2655 ncp = cp->next;
3068 2656 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun));
3069 2657 }
3070 2658 sd_scsi_target_lun_head = NULL;
3071 2659 mutex_destroy(&sd_scsi_target_lun_mutex);
3072 2660 }
3073 2661
3074 2662
3075 2663 /*
3076 2664 * Function: sd_scsi_get_target_lun_count
3077 2665 *
3078 2666 * Description: This routine will check in the attached lun chain to see
3079 2667 * how many luns are attached on the required SCSI controller
3080 2668 * and target. Currently, some capabilities like tagged queue
3081 2669 * are supported per target based by HBA. So all luns in a
3082 2670 * target have the same capabilities. Based on this assumption,
3083 2671 * sd should only set these capabilities once per target. This
3084 2672 * function is called when sd needs to decide how many luns
3085 2673 * already attached on a target.
3086 2674 *
3087 2675 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3088 2676 * controller device.
3089 2677 * target - The target ID on the controller's SCSI bus.
3090 2678 *
3091 2679 * Return Code: The number of luns attached on the required target and
3092 2680 * controller.
3093 2681 * -1 if target ID is not in parallel SCSI scope or the given
3094 2682 * dip is not in the chain.
3095 2683 *
3096 2684 * Context: Kernel thread context
3097 2685 */
3098 2686
3099 2687 static int
3100 2688 sd_scsi_get_target_lun_count(dev_info_t *dip, int target)
3101 2689 {
3102 2690 struct sd_scsi_hba_tgt_lun *cp;
3103 2691
3104 2692 if ((target < 0) || (target >= NTARGETS_WIDE)) {
3105 2693 return (-1);
3106 2694 }
3107 2695
3108 2696 mutex_enter(&sd_scsi_target_lun_mutex);
3109 2697
3110 2698 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3111 2699 if (cp->pdip == dip) {
3112 2700 break;
3113 2701 }
3114 2702 }
3115 2703
3116 2704 mutex_exit(&sd_scsi_target_lun_mutex);
3117 2705
3118 2706 if (cp == NULL) {
3119 2707 return (-1);
3120 2708 }
3121 2709
3122 2710 return (cp->nlun[target]);
3123 2711 }
3124 2712
3125 2713
3126 2714 /*
3127 2715 * Function: sd_scsi_update_lun_on_target
3128 2716 *
3129 2717 * Description: This routine is used to update the attached lun chain when a
3130 2718 * lun is attached or detached on a target.
3131 2719 *
3132 2720 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3133 2721 * controller device.
3134 2722 * target - The target ID on the controller's SCSI bus.
3135 2723 * flag - Indicate the lun is attached or detached.
3136 2724 *
3137 2725 * Context: Kernel thread context
3138 2726 */
3139 2727
3140 2728 static void
3141 2729 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag)
3142 2730 {
3143 2731 struct sd_scsi_hba_tgt_lun *cp;
3144 2732
3145 2733 mutex_enter(&sd_scsi_target_lun_mutex);
3146 2734
3147 2735 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3148 2736 if (cp->pdip == dip) {
3149 2737 break;
3150 2738 }
3151 2739 }
3152 2740
3153 2741 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) {
3154 2742 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun),
3155 2743 KM_SLEEP);
3156 2744 cp->pdip = dip;
3157 2745 cp->next = sd_scsi_target_lun_head;
3158 2746 sd_scsi_target_lun_head = cp;
3159 2747 }
3160 2748
3161 2749 mutex_exit(&sd_scsi_target_lun_mutex);
3162 2750
3163 2751 if (cp != NULL) {
3164 2752 if (flag == SD_SCSI_LUN_ATTACH) {
3165 2753 cp->nlun[target] ++;
3166 2754 } else {
3167 2755 cp->nlun[target] --;
3168 2756 }
3169 2757 }
3170 2758 }
3171 2759
3172 2760
3173 2761 /*
3174 2762 * Function: sd_spin_up_unit
3175 2763 *
3176 2764 * Description: Issues the following commands to spin-up the device:
3177 2765 * START STOP UNIT, and INQUIRY.
3178 2766 *
3179 2767 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3180 2768 * structure for this target.
3181 2769 *
3182 2770 * Return Code: 0 - success
3183 2771 * EIO - failure
3184 2772 * EACCES - reservation conflict
3185 2773 *
3186 2774 * Context: Kernel thread context
3187 2775 */
3188 2776
3189 2777 static int
3190 2778 sd_spin_up_unit(sd_ssc_t *ssc)
3191 2779 {
3192 2780 size_t resid = 0;
3193 2781 int has_conflict = FALSE;
3194 2782 uchar_t *bufaddr;
3195 2783 int status;
3196 2784 struct sd_lun *un;
3197 2785
3198 2786 ASSERT(ssc != NULL);
3199 2787 un = ssc->ssc_un;
3200 2788 ASSERT(un != NULL);
3201 2789
3202 2790 /*
3203 2791 * Send a throwaway START UNIT command.
3204 2792 *
3205 2793 * If we fail on this, we don't care presently what precisely
|
↓ open down ↓ |
327 lines elided |
↑ open up ↑ |
3206 2794 * is wrong. EMC's arrays will also fail this with a check
3207 2795 * condition (0x2/0x4/0x3) if the device is "inactive," but
3208 2796 * we don't want to fail the attach because it may become
3209 2797 * "active" later.
3210 2798 * We don't know if power condition is supported or not at
3211 2799 * this stage, use START STOP bit.
3212 2800 */
3213 2801 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
3214 2802 SD_TARGET_START, SD_PATH_DIRECT);
3215 2803
3216 - if (status != 0) {
3217 - if (status == EACCES)
3218 - has_conflict = TRUE;
2804 + switch (status) {
2805 + case EIO:
2806 + sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
2807 + return (status);
2808 + case EACCES:
2809 + has_conflict = TRUE;
2810 + default: /*FALLTHROUGH*/
3219 2811 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3220 2812 }
3221 2813
3222 2814 /*
3223 2815 * Send another INQUIRY command to the target. This is necessary for
3224 2816 * non-removable media direct access devices because their INQUIRY data
3225 2817 * may not be fully qualified until they are spun up (perhaps via the
3226 2818 * START command above). Note: This seems to be needed for some
3227 2819 * legacy devices only.) The INQUIRY command should succeed even if a
3228 2820 * Reservation Conflict is present.
3229 2821 */
3230 2822 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP);
3231 2823
3232 2824 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid)
3233 2825 != 0) {
3234 2826 kmem_free(bufaddr, SUN_INQSIZE);
3235 2827 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
3236 2828 return (EIO);
3237 2829 }
3238 2830
3239 2831 /*
3240 2832 * If we got enough INQUIRY data, copy it over the old INQUIRY data.
3241 2833 * Note that this routine does not return a failure here even if the
3242 2834 * INQUIRY command did not return any data. This is a legacy behavior.
3243 2835 */
3244 2836 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) {
3245 2837 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE);
3246 2838 }
3247 2839
3248 2840 kmem_free(bufaddr, SUN_INQSIZE);
3249 2841
3250 2842 /* If we hit a reservation conflict above, tell the caller. */
3251 2843 if (has_conflict == TRUE) {
3252 2844 return (EACCES);
3253 2845 }
3254 2846
3255 2847 return (0);
3256 2848 }
3257 2849
3258 2850 #ifdef _LP64
3259 2851 /*
3260 2852 * Function: sd_enable_descr_sense
3261 2853 *
3262 2854 * Description: This routine attempts to select descriptor sense format
3263 2855 * using the Control mode page. Devices that support 64 bit
3264 2856 * LBAs (for >2TB luns) should also implement descriptor
3265 2857 * sense data so we will call this function whenever we see
3266 2858 * a lun larger than 2TB. If for some reason the device
3267 2859 * supports 64 bit LBAs but doesn't support descriptor sense
3268 2860 * presumably the mode select will fail. Everything will
3269 2861 * continue to work normally except that we will not get
3270 2862 * complete sense data for commands that fail with an LBA
3271 2863 * larger than 32 bits.
3272 2864 *
3273 2865 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3274 2866 * structure for this target.
3275 2867 *
3276 2868 * Context: Kernel thread context only
3277 2869 */
3278 2870
3279 2871 static void
3280 2872 sd_enable_descr_sense(sd_ssc_t *ssc)
3281 2873 {
3282 2874 uchar_t *header;
3283 2875 struct mode_control_scsi3 *ctrl_bufp;
3284 2876 size_t buflen;
3285 2877 size_t bd_len;
3286 2878 int status;
3287 2879 struct sd_lun *un;
3288 2880
3289 2881 ASSERT(ssc != NULL);
3290 2882 un = ssc->ssc_un;
3291 2883 ASSERT(un != NULL);
3292 2884
3293 2885 /*
3294 2886 * Read MODE SENSE page 0xA, Control Mode Page
3295 2887 */
3296 2888 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH +
3297 2889 sizeof (struct mode_control_scsi3);
3298 2890 header = kmem_zalloc(buflen, KM_SLEEP);
3299 2891
3300 2892 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
3301 2893 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT);
3302 2894
3303 2895 if (status != 0) {
3304 2896 SD_ERROR(SD_LOG_COMMON, un,
3305 2897 "sd_enable_descr_sense: mode sense ctrl page failed\n");
3306 2898 goto eds_exit;
3307 2899 }
3308 2900
3309 2901 /*
3310 2902 * Determine size of Block Descriptors in order to locate
3311 2903 * the mode page data. ATAPI devices return 0, SCSI devices
3312 2904 * should return MODE_BLK_DESC_LENGTH.
3313 2905 */
3314 2906 bd_len = ((struct mode_header *)header)->bdesc_length;
3315 2907
3316 2908 /* Clear the mode data length field for MODE SELECT */
3317 2909 ((struct mode_header *)header)->length = 0;
3318 2910
3319 2911 ctrl_bufp = (struct mode_control_scsi3 *)
3320 2912 (header + MODE_HEADER_LENGTH + bd_len);
3321 2913
3322 2914 /*
3323 2915 * If the page length is smaller than the expected value,
3324 2916 * the target device doesn't support D_SENSE. Bail out here.
3325 2917 */
3326 2918 if (ctrl_bufp->mode_page.length <
3327 2919 sizeof (struct mode_control_scsi3) - 2) {
3328 2920 SD_ERROR(SD_LOG_COMMON, un,
3329 2921 "sd_enable_descr_sense: enable D_SENSE failed\n");
3330 2922 goto eds_exit;
3331 2923 }
3332 2924
3333 2925 /*
3334 2926 * Clear PS bit for MODE SELECT
3335 2927 */
3336 2928 ctrl_bufp->mode_page.ps = 0;
3337 2929
3338 2930 /*
3339 2931 * Set D_SENSE to enable descriptor sense format.
3340 2932 */
3341 2933 ctrl_bufp->d_sense = 1;
3342 2934
3343 2935 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3344 2936
3345 2937 /*
3346 2938 * Use MODE SELECT to commit the change to the D_SENSE bit
3347 2939 */
3348 2940 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
3349 2941 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT);
3350 2942
3351 2943 if (status != 0) {
3352 2944 SD_INFO(SD_LOG_COMMON, un,
3353 2945 "sd_enable_descr_sense: mode select ctrl page failed\n");
3354 2946 } else {
3355 2947 kmem_free(header, buflen);
3356 2948 return;
3357 2949 }
3358 2950
3359 2951 eds_exit:
3360 2952 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3361 2953 kmem_free(header, buflen);
3362 2954 }
3363 2955
3364 2956 /*
3365 2957 * Function: sd_reenable_dsense_task
3366 2958 *
3367 2959 * Description: Re-enable descriptor sense after device or bus reset
3368 2960 *
3369 2961 * Context: Executes in a taskq() thread context
3370 2962 */
3371 2963 static void
3372 2964 sd_reenable_dsense_task(void *arg)
3373 2965 {
3374 2966 struct sd_lun *un = arg;
3375 2967 sd_ssc_t *ssc;
3376 2968
3377 2969 ASSERT(un != NULL);
3378 2970
3379 2971 ssc = sd_ssc_init(un);
3380 2972 sd_enable_descr_sense(ssc);
3381 2973 sd_ssc_fini(ssc);
3382 2974 }
3383 2975 #endif /* _LP64 */
3384 2976
3385 2977 /*
3386 2978 * Function: sd_set_mmc_caps
3387 2979 *
3388 2980 * Description: This routine determines if the device is MMC compliant and if
3389 2981 * the device supports CDDA via a mode sense of the CDVD
3390 2982 * capabilities mode page. Also checks if the device is a
3391 2983 * dvdram writable device.
3392 2984 *
3393 2985 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3394 2986 * structure for this target.
3395 2987 *
3396 2988 * Context: Kernel thread context only
3397 2989 */
3398 2990
3399 2991 static void
3400 2992 sd_set_mmc_caps(sd_ssc_t *ssc)
3401 2993 {
3402 2994 struct mode_header_grp2 *sense_mhp;
3403 2995 uchar_t *sense_page;
3404 2996 caddr_t buf;
3405 2997 int bd_len;
3406 2998 int status;
3407 2999 struct uscsi_cmd com;
3408 3000 int rtn;
3409 3001 uchar_t *out_data_rw, *out_data_hd;
3410 3002 uchar_t *rqbuf_rw, *rqbuf_hd;
3411 3003 uchar_t *out_data_gesn;
3412 3004 int gesn_len;
3413 3005 struct sd_lun *un;
3414 3006
3415 3007 ASSERT(ssc != NULL);
3416 3008 un = ssc->ssc_un;
3417 3009 ASSERT(un != NULL);
3418 3010
3419 3011 /*
3420 3012 * The flags which will be set in this function are - mmc compliant,
3421 3013 * dvdram writable device, cdda support. Initialize them to FALSE
3422 3014 * and if a capability is detected - it will be set to TRUE.
3423 3015 */
3424 3016 un->un_f_mmc_cap = FALSE;
3425 3017 un->un_f_dvdram_writable_device = FALSE;
3426 3018 un->un_f_cfg_cdda = FALSE;
3427 3019
3428 3020 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3429 3021 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3430 3022 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT);
3431 3023
3432 3024 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3433 3025
3434 3026 if (status != 0) {
3435 3027 /* command failed; just return */
3436 3028 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3437 3029 return;
3438 3030 }
3439 3031 /*
3440 3032 * If the mode sense request for the CDROM CAPABILITIES
3441 3033 * page (0x2A) succeeds the device is assumed to be MMC.
3442 3034 */
3443 3035 un->un_f_mmc_cap = TRUE;
3444 3036
3445 3037 /* See if GET STATUS EVENT NOTIFICATION is supported */
3446 3038 if (un->un_f_mmc_gesn_polling) {
3447 3039 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN;
3448 3040 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP);
3449 3041
3450 3042 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc,
3451 3043 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS);
3452 3044
3453 3045 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3454 3046
3455 3047 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) {
3456 3048 un->un_f_mmc_gesn_polling = FALSE;
3457 3049 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3458 3050 "sd_set_mmc_caps: gesn not supported "
3459 3051 "%d %x %x %x %x\n", rtn,
3460 3052 out_data_gesn[0], out_data_gesn[1],
3461 3053 out_data_gesn[2], out_data_gesn[3]);
3462 3054 }
3463 3055
3464 3056 kmem_free(out_data_gesn, gesn_len);
3465 3057 }
3466 3058
3467 3059 /* Get to the page data */
3468 3060 sense_mhp = (struct mode_header_grp2 *)buf;
3469 3061 bd_len = (sense_mhp->bdesc_length_hi << 8) |
3470 3062 sense_mhp->bdesc_length_lo;
3471 3063 if (bd_len > MODE_BLK_DESC_LENGTH) {
3472 3064 /*
3473 3065 * We did not get back the expected block descriptor
3474 3066 * length so we cannot determine if the device supports
3475 3067 * CDDA. However, we still indicate the device is MMC
3476 3068 * according to the successful response to the page
3477 3069 * 0x2A mode sense request.
3478 3070 */
3479 3071 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3480 3072 "sd_set_mmc_caps: Mode Sense returned "
3481 3073 "invalid block descriptor length\n");
3482 3074 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3483 3075 return;
3484 3076 }
3485 3077
3486 3078 /* See if read CDDA is supported */
3487 3079 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 +
3488 3080 bd_len);
3489 3081 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE;
3490 3082
3491 3083 /* See if writing DVD RAM is supported. */
3492 3084 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE;
3493 3085 if (un->un_f_dvdram_writable_device == TRUE) {
3494 3086 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3495 3087 return;
3496 3088 }
3497 3089
3498 3090 /*
3499 3091 * If the device presents DVD or CD capabilities in the mode
3500 3092 * page, we can return here since a RRD will not have
3501 3093 * these capabilities.
3502 3094 */
3503 3095 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3504 3096 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3505 3097 return;
3506 3098 }
3507 3099 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3508 3100
3509 3101 /*
3510 3102 * If un->un_f_dvdram_writable_device is still FALSE,
3511 3103 * check for a Removable Rigid Disk (RRD). A RRD
3512 3104 * device is identified by the features RANDOM_WRITABLE and
3513 3105 * HARDWARE_DEFECT_MANAGEMENT.
3514 3106 */
3515 3107 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3516 3108 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3517 3109
3518 3110 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3519 3111 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3520 3112 RANDOM_WRITABLE, SD_PATH_STANDARD);
3521 3113
3522 3114 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3523 3115
3524 3116 if (rtn != 0) {
3525 3117 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3526 3118 kmem_free(rqbuf_rw, SENSE_LENGTH);
3527 3119 return;
3528 3120 }
3529 3121
3530 3122 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3531 3123 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3532 3124
3533 3125 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3534 3126 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3535 3127 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD);
3536 3128
3537 3129 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3538 3130
3539 3131 if (rtn == 0) {
3540 3132 /*
3541 3133 * We have good information, check for random writable
3542 3134 * and hardware defect features.
3543 3135 */
3544 3136 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3545 3137 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) {
3546 3138 un->un_f_dvdram_writable_device = TRUE;
3547 3139 }
3548 3140 }
3549 3141
3550 3142 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3551 3143 kmem_free(rqbuf_rw, SENSE_LENGTH);
3552 3144 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3553 3145 kmem_free(rqbuf_hd, SENSE_LENGTH);
3554 3146 }
3555 3147
3556 3148 /*
3557 3149 * Function: sd_check_for_writable_cd
3558 3150 *
3559 3151 * Description: This routine determines if the media in the device is
3560 3152 * writable or not. It uses the get configuration command (0x46)
3561 3153 * to determine if the media is writable
3562 3154 *
3563 3155 * Arguments: un - driver soft state (unit) structure
3564 3156 * path_flag - SD_PATH_DIRECT to use the USCSI "direct"
3565 3157 * chain and the normal command waitq, or
3566 3158 * SD_PATH_DIRECT_PRIORITY to use the USCSI
3567 3159 * "direct" chain and bypass the normal command
3568 3160 * waitq.
3569 3161 *
3570 3162 * Context: Never called at interrupt context.
3571 3163 */
3572 3164
3573 3165 static void
3574 3166 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag)
3575 3167 {
3576 3168 struct uscsi_cmd com;
3577 3169 uchar_t *out_data;
3578 3170 uchar_t *rqbuf;
3579 3171 int rtn;
3580 3172 uchar_t *out_data_rw, *out_data_hd;
3581 3173 uchar_t *rqbuf_rw, *rqbuf_hd;
3582 3174 struct mode_header_grp2 *sense_mhp;
3583 3175 uchar_t *sense_page;
3584 3176 caddr_t buf;
3585 3177 int bd_len;
3586 3178 int status;
3587 3179 struct sd_lun *un;
3588 3180
3589 3181 ASSERT(ssc != NULL);
3590 3182 un = ssc->ssc_un;
3591 3183 ASSERT(un != NULL);
3592 3184 ASSERT(mutex_owned(SD_MUTEX(un)));
3593 3185
3594 3186 /*
3595 3187 * Initialize the writable media to false, if configuration info.
3596 3188 * tells us otherwise then only we will set it.
3597 3189 */
3598 3190 un->un_f_mmc_writable_media = FALSE;
3599 3191 mutex_exit(SD_MUTEX(un));
3600 3192
3601 3193 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
3602 3194 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3603 3195
3604 3196 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH,
3605 3197 out_data, SD_PROFILE_HEADER_LEN, path_flag);
3606 3198
3607 3199 if (rtn != 0)
3608 3200 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3609 3201
3610 3202 mutex_enter(SD_MUTEX(un));
3611 3203 if (rtn == 0) {
3612 3204 /*
3613 3205 * We have good information, check for writable DVD.
3614 3206 */
3615 3207 if ((out_data[6] == 0) && (out_data[7] == 0x12)) {
3616 3208 un->un_f_mmc_writable_media = TRUE;
3617 3209 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3618 3210 kmem_free(rqbuf, SENSE_LENGTH);
3619 3211 return;
3620 3212 }
3621 3213 }
3622 3214
3623 3215 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3624 3216 kmem_free(rqbuf, SENSE_LENGTH);
3625 3217
3626 3218 /*
3627 3219 * Determine if this is a RRD type device.
3628 3220 */
3629 3221 mutex_exit(SD_MUTEX(un));
3630 3222 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3631 3223 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3632 3224 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag);
3633 3225
3634 3226 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3635 3227
3636 3228 mutex_enter(SD_MUTEX(un));
3637 3229 if (status != 0) {
3638 3230 /* command failed; just return */
3639 3231 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3640 3232 return;
3641 3233 }
3642 3234
3643 3235 /* Get to the page data */
3644 3236 sense_mhp = (struct mode_header_grp2 *)buf;
3645 3237 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
3646 3238 if (bd_len > MODE_BLK_DESC_LENGTH) {
3647 3239 /*
3648 3240 * We did not get back the expected block descriptor length so
3649 3241 * we cannot check the mode page.
3650 3242 */
3651 3243 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3652 3244 "sd_check_for_writable_cd: Mode Sense returned "
3653 3245 "invalid block descriptor length\n");
3654 3246 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3655 3247 return;
3656 3248 }
3657 3249
3658 3250 /*
3659 3251 * If the device presents DVD or CD capabilities in the mode
3660 3252 * page, we can return here since a RRD device will not have
3661 3253 * these capabilities.
3662 3254 */
3663 3255 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len);
3664 3256 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3665 3257 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3666 3258 return;
3667 3259 }
3668 3260 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3669 3261
3670 3262 /*
3671 3263 * If un->un_f_mmc_writable_media is still FALSE,
3672 3264 * check for RRD type media. A RRD device is identified
3673 3265 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT.
3674 3266 */
3675 3267 mutex_exit(SD_MUTEX(un));
3676 3268 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3677 3269 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3678 3270
3679 3271 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3680 3272 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3681 3273 RANDOM_WRITABLE, path_flag);
3682 3274
3683 3275 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3684 3276 if (rtn != 0) {
3685 3277 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3686 3278 kmem_free(rqbuf_rw, SENSE_LENGTH);
3687 3279 mutex_enter(SD_MUTEX(un));
3688 3280 return;
3689 3281 }
3690 3282
3691 3283 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3692 3284 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3693 3285
3694 3286 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3695 3287 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3696 3288 HARDWARE_DEFECT_MANAGEMENT, path_flag);
3697 3289
3698 3290 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3699 3291 mutex_enter(SD_MUTEX(un));
3700 3292 if (rtn == 0) {
3701 3293 /*
3702 3294 * We have good information, check for random writable
3703 3295 * and hardware defect features as current.
3704 3296 */
3705 3297 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3706 3298 (out_data_rw[10] & 0x1) &&
3707 3299 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) &&
3708 3300 (out_data_hd[10] & 0x1)) {
3709 3301 un->un_f_mmc_writable_media = TRUE;
3710 3302 }
3711 3303 }
3712 3304
3713 3305 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3714 3306 kmem_free(rqbuf_rw, SENSE_LENGTH);
3715 3307 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3716 3308 kmem_free(rqbuf_hd, SENSE_LENGTH);
3717 3309 }
3718 3310
3719 3311 /*
3720 3312 * Function: sd_read_unit_properties
3721 3313 *
3722 3314 * Description: The following implements a property lookup mechanism.
3723 3315 * Properties for particular disks (keyed on vendor, model
3724 3316 * and rev numbers) are sought in the sd.conf file via
3725 3317 * sd_process_sdconf_file(), and if not found there, are
3726 3318 * looked for in a list hardcoded in this driver via
3727 3319 * sd_process_sdconf_table() Once located the properties
3728 3320 * are used to update the driver unit structure.
3729 3321 *
3730 3322 * Arguments: un - driver soft state (unit) structure
3731 3323 */
3732 3324
3733 3325 static void
3734 3326 sd_read_unit_properties(struct sd_lun *un)
3735 3327 {
3736 3328 /*
3737 3329 * sd_process_sdconf_file returns SD_FAILURE if it cannot find
3738 3330 * the "sd-config-list" property (from the sd.conf file) or if
3739 3331 * there was not a match for the inquiry vid/pid. If this event
3740 3332 * occurs the static driver configuration table is searched for
3741 3333 * a match.
3742 3334 */
3743 3335 ASSERT(un != NULL);
3744 3336 if (sd_process_sdconf_file(un) == SD_FAILURE) {
3745 3337 sd_process_sdconf_table(un);
3746 3338 }
3747 3339
3748 3340 /* check for LSI device */
3749 3341 sd_is_lsi(un);
3750 3342
3751 3343
3752 3344 }
3753 3345
3754 3346
3755 3347 /*
3756 3348 * Function: sd_process_sdconf_file
3757 3349 *
3758 3350 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the
3759 3351 * driver's config file (ie, sd.conf) and update the driver
3760 3352 * soft state structure accordingly.
3761 3353 *
3762 3354 * Arguments: un - driver soft state (unit) structure
3763 3355 *
3764 3356 * Return Code: SD_SUCCESS - The properties were successfully set according
3765 3357 * to the driver configuration file.
3766 3358 * SD_FAILURE - The driver config list was not obtained or
3767 3359 * there was no vid/pid match. This indicates that
3768 3360 * the static config table should be used.
3769 3361 *
3770 3362 * The config file has a property, "sd-config-list". Currently we support
3771 3363 * two kinds of formats. For both formats, the value of this property
3772 3364 * is a list of duplets:
3773 3365 *
3774 3366 * sd-config-list=
3775 3367 * <duplet>,
3776 3368 * [,<duplet>]*;
3777 3369 *
3778 3370 * For the improved format, where
3779 3371 *
3780 3372 * <duplet>:= "<vid+pid>","<tunable-list>"
3781 3373 *
3782 3374 * and
3783 3375 *
3784 3376 * <tunable-list>:= <tunable> [, <tunable> ]*;
3785 3377 * <tunable> = <name> : <value>
3786 3378 *
3787 3379 * The <vid+pid> is the string that is returned by the target device on a
3788 3380 * SCSI inquiry command, the <tunable-list> contains one or more tunables
3789 3381 * to apply to all target devices with the specified <vid+pid>.
3790 3382 *
3791 3383 * Each <tunable> is a "<name> : <value>" pair.
3792 3384 *
3793 3385 * For the old format, the structure of each duplet is as follows:
3794 3386 *
3795 3387 * <duplet>:= "<vid+pid>","<data-property-name_list>"
3796 3388 *
3797 3389 * The first entry of the duplet is the device ID string (the concatenated
3798 3390 * vid & pid; not to be confused with a device_id). This is defined in
3799 3391 * the same way as in the sd_disk_table.
3800 3392 *
3801 3393 * The second part of the duplet is a string that identifies a
3802 3394 * data-property-name-list. The data-property-name-list is defined as
3803 3395 * follows:
3804 3396 *
3805 3397 * <data-property-name-list>:=<data-property-name> [<data-property-name>]
3806 3398 *
3807 3399 * The syntax of <data-property-name> depends on the <version> field.
3808 3400 *
3809 3401 * If version = SD_CONF_VERSION_1 we have the following syntax:
3810 3402 *
3811 3403 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
3812 3404 *
3813 3405 * where the prop0 value will be used to set prop0 if bit0 set in the
3814 3406 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1
3815 3407 *
3816 3408 */
3817 3409
3818 3410 static int
3819 3411 sd_process_sdconf_file(struct sd_lun *un)
3820 3412 {
3821 3413 char **config_list = NULL;
3822 3414 uint_t nelements;
3823 3415 char *vidptr;
3824 3416 int vidlen;
3825 3417 char *dnlist_ptr;
3826 3418 char *dataname_ptr;
|
↓ open down ↓ |
598 lines elided |
↑ open up ↑ |
3827 3419 char *dataname_lasts;
3828 3420 int *data_list = NULL;
3829 3421 uint_t data_list_len;
3830 3422 int rval = SD_FAILURE;
3831 3423 int i;
3832 3424
3833 3425 ASSERT(un != NULL);
3834 3426
3835 3427 /* Obtain the configuration list associated with the .conf file */
3836 3428 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
3837 - DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
3429 + DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sd-config-list",
3838 3430 &config_list, &nelements) != DDI_PROP_SUCCESS) {
3839 3431 return (SD_FAILURE);
3840 3432 }
3841 3433
3842 3434 /*
3843 3435 * Compare vids in each duplet to the inquiry vid - if a match is
3844 3436 * made, get the data value and update the soft state structure
3845 3437 * accordingly.
3846 3438 *
3847 3439 * Each duplet should show as a pair of strings, return SD_FAILURE
3848 3440 * otherwise.
3849 3441 */
3850 3442 if (nelements & 1) {
3851 3443 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3852 3444 "sd-config-list should show as pairs of strings.\n");
3853 3445 if (config_list)
3854 3446 ddi_prop_free(config_list);
3855 3447 return (SD_FAILURE);
3856 3448 }
3857 3449
3858 3450 for (i = 0; i < nelements; i += 2) {
3859 3451 /*
3860 3452 * Note: The assumption here is that each vid entry is on
3861 3453 * a unique line from its associated duplet.
3862 3454 */
3863 3455 vidptr = config_list[i];
3864 3456 vidlen = (int)strlen(vidptr);
3865 3457 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) {
3866 3458 continue;
3867 3459 }
3868 3460
3869 3461 /*
3870 3462 * dnlist contains 1 or more blank separated
3871 3463 * data-property-name entries
3872 3464 */
3873 3465 dnlist_ptr = config_list[i + 1];
3874 3466
3875 3467 if (strchr(dnlist_ptr, ':') != NULL) {
3876 3468 /*
3877 3469 * Decode the improved format sd-config-list.
3878 3470 */
3879 3471 sd_nvpair_str_decode(un, dnlist_ptr);
3880 3472 } else {
3881 3473 /*
3882 3474 * The old format sd-config-list, loop through all
3883 3475 * data-property-name entries in the
3884 3476 * data-property-name-list
3885 3477 * setting the properties for each.
3886 3478 */
3887 3479 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t",
3888 3480 &dataname_lasts); dataname_ptr != NULL;
3889 3481 dataname_ptr = sd_strtok_r(NULL, " \t",
3890 3482 &dataname_lasts)) {
3891 3483 int version;
3892 3484
3893 3485 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3894 3486 "sd_process_sdconf_file: disk:%s, "
3895 3487 "data:%s\n", vidptr, dataname_ptr);
3896 3488
3897 3489 /* Get the data list */
3898 3490 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
3899 3491 SD_DEVINFO(un), 0, dataname_ptr, &data_list,
3900 3492 &data_list_len) != DDI_PROP_SUCCESS) {
3901 3493 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3902 3494 "sd_process_sdconf_file: data "
3903 3495 "property (%s) has no value\n",
3904 3496 dataname_ptr);
3905 3497 continue;
3906 3498 }
3907 3499
3908 3500 version = data_list[0];
3909 3501
3910 3502 if (version == SD_CONF_VERSION_1) {
3911 3503 sd_tunables values;
3912 3504
3913 3505 /* Set the properties */
3914 3506 if (sd_chk_vers1_data(un, data_list[1],
3915 3507 &data_list[2], data_list_len,
3916 3508 dataname_ptr) == SD_SUCCESS) {
3917 3509 sd_get_tunables_from_conf(un,
3918 3510 data_list[1], &data_list[2],
3919 3511 &values);
3920 3512 sd_set_vers1_properties(un,
3921 3513 data_list[1], &values);
3922 3514 rval = SD_SUCCESS;
3923 3515 } else {
3924 3516 rval = SD_FAILURE;
3925 3517 }
3926 3518 } else {
3927 3519 scsi_log(SD_DEVINFO(un), sd_label,
3928 3520 CE_WARN, "data property %s version "
3929 3521 "0x%x is invalid.",
3930 3522 dataname_ptr, version);
3931 3523 rval = SD_FAILURE;
3932 3524 }
3933 3525 if (data_list)
3934 3526 ddi_prop_free(data_list);
3935 3527 }
3936 3528 }
3937 3529 }
3938 3530
3939 3531 /* free up the memory allocated by ddi_prop_lookup_string_array(). */
3940 3532 if (config_list) {
3941 3533 ddi_prop_free(config_list);
3942 3534 }
3943 3535
3944 3536 return (rval);
3945 3537 }
3946 3538
3947 3539 /*
3948 3540 * Function: sd_nvpair_str_decode()
3949 3541 *
3950 3542 * Description: Parse the improved format sd-config-list to get
3951 3543 * each entry of tunable, which includes a name-value pair.
3952 3544 * Then call sd_set_properties() to set the property.
3953 3545 *
3954 3546 * Arguments: un - driver soft state (unit) structure
3955 3547 * nvpair_str - the tunable list
3956 3548 */
3957 3549 static void
3958 3550 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str)
3959 3551 {
3960 3552 char *nv, *name, *value, *token;
3961 3553 char *nv_lasts, *v_lasts, *x_lasts;
3962 3554
3963 3555 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL;
3964 3556 nv = sd_strtok_r(NULL, ",", &nv_lasts)) {
3965 3557 token = sd_strtok_r(nv, ":", &v_lasts);
3966 3558 name = sd_strtok_r(token, " \t", &x_lasts);
3967 3559 token = sd_strtok_r(NULL, ":", &v_lasts);
3968 3560 value = sd_strtok_r(token, " \t", &x_lasts);
3969 3561 if (name == NULL || value == NULL) {
3970 3562 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3971 3563 "sd_nvpair_str_decode: "
3972 3564 "name or value is not valid!\n");
3973 3565 } else {
3974 3566 sd_set_properties(un, name, value);
3975 3567 }
3976 3568 }
3977 3569 }
3978 3570
3979 3571 /*
3980 3572 * Function: sd_strtok_r()
3981 3573 *
3982 3574 * Description: This function uses strpbrk and strspn to break
3983 3575 * string into tokens on sequentially subsequent calls. Return
3984 3576 * NULL when no non-separator characters remain. The first
3985 3577 * argument is NULL for subsequent calls.
3986 3578 */
3987 3579 static char *
3988 3580 sd_strtok_r(char *string, const char *sepset, char **lasts)
3989 3581 {
3990 3582 char *q, *r;
3991 3583
3992 3584 /* First or subsequent call */
3993 3585 if (string == NULL)
3994 3586 string = *lasts;
3995 3587
3996 3588 if (string == NULL)
3997 3589 return (NULL);
3998 3590
3999 3591 /* Skip leading separators */
4000 3592 q = string + strspn(string, sepset);
4001 3593
4002 3594 if (*q == '\0')
4003 3595 return (NULL);
4004 3596
4005 3597 if ((r = strpbrk(q, sepset)) == NULL)
4006 3598 *lasts = NULL;
4007 3599 else {
4008 3600 *r = '\0';
4009 3601 *lasts = r + 1;
4010 3602 }
4011 3603 return (q);
4012 3604 }
4013 3605
4014 3606 /*
4015 3607 * Function: sd_set_properties()
4016 3608 *
4017 3609 * Description: Set device properties based on the improved
4018 3610 * format sd-config-list.
4019 3611 *
4020 3612 * Arguments: un - driver soft state (unit) structure
4021 3613 * name - supported tunable name
4022 3614 * value - tunable value
4023 3615 */
4024 3616 static void
4025 3617 sd_set_properties(struct sd_lun *un, char *name, char *value)
4026 3618 {
4027 3619 char *endptr = NULL;
4028 3620 long val = 0;
4029 3621
4030 3622 if (strcasecmp(name, "cache-nonvolatile") == 0) {
4031 3623 if (strcasecmp(value, "true") == 0) {
4032 3624 un->un_f_suppress_cache_flush = TRUE;
4033 3625 } else if (strcasecmp(value, "false") == 0) {
4034 3626 un->un_f_suppress_cache_flush = FALSE;
4035 3627 } else {
4036 3628 goto value_invalid;
4037 3629 }
4038 3630 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4039 3631 "suppress_cache_flush flag set to %d\n",
4040 3632 un->un_f_suppress_cache_flush);
4041 3633 return;
4042 3634 }
4043 3635
4044 3636 if (strcasecmp(name, "controller-type") == 0) {
4045 3637 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4046 3638 un->un_ctype = val;
4047 3639 } else {
4048 3640 goto value_invalid;
4049 3641 }
4050 3642 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4051 3643 "ctype set to %d\n", un->un_ctype);
4052 3644 return;
4053 3645 }
4054 3646
4055 3647 if (strcasecmp(name, "delay-busy") == 0) {
4056 3648 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4057 3649 un->un_busy_timeout = drv_usectohz(val / 1000);
4058 3650 } else {
4059 3651 goto value_invalid;
4060 3652 }
4061 3653 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4062 3654 "busy_timeout set to %d\n", un->un_busy_timeout);
4063 3655 return;
4064 3656 }
4065 3657
4066 3658 if (strcasecmp(name, "disksort") == 0) {
4067 3659 if (strcasecmp(value, "true") == 0) {
4068 3660 un->un_f_disksort_disabled = FALSE;
4069 3661 } else if (strcasecmp(value, "false") == 0) {
4070 3662 un->un_f_disksort_disabled = TRUE;
4071 3663 } else {
4072 3664 goto value_invalid;
4073 3665 }
4074 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4075 3667 "disksort disabled flag set to %d\n",
4076 3668 un->un_f_disksort_disabled);
4077 3669 return;
4078 3670 }
4079 3671
4080 3672 if (strcasecmp(name, "power-condition") == 0) {
4081 3673 if (strcasecmp(value, "true") == 0) {
4082 3674 un->un_f_power_condition_disabled = FALSE;
4083 3675 } else if (strcasecmp(value, "false") == 0) {
4084 3676 un->un_f_power_condition_disabled = TRUE;
4085 3677 } else {
4086 3678 goto value_invalid;
4087 3679 }
4088 3680 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4089 3681 "power condition disabled flag set to %d\n",
4090 3682 un->un_f_power_condition_disabled);
4091 3683 return;
4092 3684 }
4093 3685
4094 3686 if (strcasecmp(name, "timeout-releasereservation") == 0) {
4095 3687 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4096 3688 un->un_reserve_release_time = val;
4097 3689 } else {
4098 3690 goto value_invalid;
4099 3691 }
4100 3692 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4101 3693 "reservation release timeout set to %d\n",
4102 3694 un->un_reserve_release_time);
4103 3695 return;
4104 3696 }
4105 3697
4106 3698 if (strcasecmp(name, "reset-lun") == 0) {
4107 3699 if (strcasecmp(value, "true") == 0) {
4108 3700 un->un_f_lun_reset_enabled = TRUE;
4109 3701 } else if (strcasecmp(value, "false") == 0) {
4110 3702 un->un_f_lun_reset_enabled = FALSE;
4111 3703 } else {
4112 3704 goto value_invalid;
4113 3705 }
4114 3706 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4115 3707 "lun reset enabled flag set to %d\n",
4116 3708 un->un_f_lun_reset_enabled);
4117 3709 return;
4118 3710 }
4119 3711
4120 3712 if (strcasecmp(name, "retries-busy") == 0) {
4121 3713 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4122 3714 un->un_busy_retry_count = val;
4123 3715 } else {
4124 3716 goto value_invalid;
4125 3717 }
4126 3718 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4127 3719 "busy retry count set to %d\n", un->un_busy_retry_count);
4128 3720 return;
4129 3721 }
4130 3722
4131 3723 if (strcasecmp(name, "retries-timeout") == 0) {
4132 3724 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4133 3725 un->un_retry_count = val;
4134 3726 } else {
4135 3727 goto value_invalid;
4136 3728 }
4137 3729 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4138 3730 "timeout retry count set to %d\n", un->un_retry_count);
4139 3731 return;
4140 3732 }
4141 3733
4142 3734 if (strcasecmp(name, "retries-notready") == 0) {
4143 3735 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4144 3736 un->un_notready_retry_count = val;
4145 3737 } else {
4146 3738 goto value_invalid;
4147 3739 }
4148 3740 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4149 3741 "notready retry count set to %d\n",
4150 3742 un->un_notready_retry_count);
4151 3743 return;
4152 3744 }
4153 3745
4154 3746 if (strcasecmp(name, "retries-reset") == 0) {
4155 3747 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4156 3748 un->un_reset_retry_count = val;
4157 3749 } else {
4158 3750 goto value_invalid;
4159 3751 }
4160 3752 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4161 3753 "reset retry count set to %d\n",
4162 3754 un->un_reset_retry_count);
4163 3755 return;
4164 3756 }
4165 3757
4166 3758 if (strcasecmp(name, "throttle-max") == 0) {
4167 3759 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4168 3760 un->un_saved_throttle = un->un_throttle = val;
4169 3761 } else {
4170 3762 goto value_invalid;
4171 3763 }
4172 3764 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4173 3765 "throttle set to %d\n", un->un_throttle);
4174 3766 }
4175 3767
4176 3768 if (strcasecmp(name, "throttle-min") == 0) {
4177 3769 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4178 3770 un->un_min_throttle = val;
4179 3771 } else {
4180 3772 goto value_invalid;
4181 3773 }
4182 3774 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4183 3775 "min throttle set to %d\n", un->un_min_throttle);
4184 3776 }
4185 3777
4186 3778 if (strcasecmp(name, "rmw-type") == 0) {
4187 3779 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4188 3780 un->un_f_rmw_type = val;
4189 3781 } else {
4190 3782 goto value_invalid;
|
↓ open down ↓ |
343 lines elided |
↑ open up ↑ |
4191 3783 }
4192 3784 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4193 3785 "RMW type set to %d\n", un->un_f_rmw_type);
4194 3786 }
4195 3787
4196 3788 if (strcasecmp(name, "physical-block-size") == 0) {
4197 3789 if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
4198 3790 ISP2(val) && val >= un->un_tgt_blocksize &&
4199 3791 val >= un->un_sys_blocksize) {
4200 3792 un->un_phy_blocksize = val;
3793 + un->un_f_sdconf_phy_blocksize = TRUE;
4201 3794 } else {
4202 3795 goto value_invalid;
4203 3796 }
4204 3797 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4205 3798 "physical block size set to %d\n", un->un_phy_blocksize);
4206 3799 }
4207 3800
3801 + if (strcasecmp(name, "slow-io-threshold") == 0) {
3802 + if (ddi_strtol(value, &endptr, 0, &val) == 0) {
3803 + un->un_slow_io_threshold = (hrtime_t)val * NANOSEC;
3804 + } else {
3805 + un->un_slow_io_threshold =
3806 + (hrtime_t)sd_slow_io_threshold;
3807 + goto value_invalid;
3808 + }
3809 + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
3810 + "slow IO threshold set to %llu\n",
3811 + un->un_slow_io_threshold);
3812 + }
3813 +
3814 + if (strcasecmp(name, "io-time") == 0) {
3815 + if (ddi_strtol(value, &endptr, 0, &val) == 0) {
3816 + un->un_io_time = val;
3817 + } else {
3818 + un->un_io_time = sd_io_time;
3819 + goto value_invalid;
3820 + }
3821 + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
3822 + "IO time set to %llu\n", un->un_io_time);
3823 + }
3824 +
4208 3825 if (strcasecmp(name, "retries-victim") == 0) {
4209 3826 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4210 3827 un->un_victim_retry_count = val;
4211 3828 } else {
4212 3829 goto value_invalid;
4213 3830 }
4214 3831 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4215 3832 "victim retry count set to %d\n",
4216 3833 un->un_victim_retry_count);
4217 3834 return;
4218 3835 }
4219 3836
4220 3837 /*
4221 3838 * Validate the throttle values.
4222 3839 * If any of the numbers are invalid, set everything to defaults.
4223 3840 */
4224 3841 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4225 3842 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4226 3843 (un->un_min_throttle > un->un_throttle)) {
4227 3844 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4228 3845 un->un_min_throttle = sd_min_throttle;
4229 3846 }
4230 3847
4231 3848 if (strcasecmp(name, "mmc-gesn-polling") == 0) {
4232 3849 if (strcasecmp(value, "true") == 0) {
4233 3850 un->un_f_mmc_gesn_polling = TRUE;
4234 3851 } else if (strcasecmp(value, "false") == 0) {
4235 3852 un->un_f_mmc_gesn_polling = FALSE;
4236 3853 } else {
4237 3854 goto value_invalid;
4238 3855 }
4239 3856 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4240 3857 "mmc-gesn-polling set to %d\n",
4241 3858 un->un_f_mmc_gesn_polling);
4242 3859 }
4243 3860
4244 3861 return;
4245 3862
4246 3863 value_invalid:
4247 3864 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4248 3865 "value of prop %s is invalid\n", name);
4249 3866 }
4250 3867
4251 3868 /*
4252 3869 * Function: sd_get_tunables_from_conf()
4253 3870 *
4254 3871 *
4255 3872 * This function reads the data list from the sd.conf file and pulls
4256 3873 * the values that can have numeric values as arguments and places
4257 3874 * the values in the appropriate sd_tunables member.
4258 3875 * Since the order of the data list members varies across platforms
4259 3876 * This function reads them from the data list in a platform specific
4260 3877 * order and places them into the correct sd_tunable member that is
4261 3878 * consistent across all platforms.
4262 3879 */
4263 3880 static void
4264 3881 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list,
4265 3882 sd_tunables *values)
4266 3883 {
4267 3884 int i;
4268 3885 int mask;
4269 3886
4270 3887 bzero(values, sizeof (sd_tunables));
4271 3888
4272 3889 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4273 3890
4274 3891 mask = 1 << i;
4275 3892 if (mask > flags) {
4276 3893 break;
4277 3894 }
4278 3895
4279 3896 switch (mask & flags) {
4280 3897 case 0: /* This mask bit not set in flags */
4281 3898 continue;
4282 3899 case SD_CONF_BSET_THROTTLE:
4283 3900 values->sdt_throttle = data_list[i];
4284 3901 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4285 3902 "sd_get_tunables_from_conf: throttle = %d\n",
4286 3903 values->sdt_throttle);
4287 3904 break;
4288 3905 case SD_CONF_BSET_CTYPE:
4289 3906 values->sdt_ctype = data_list[i];
4290 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4291 3908 "sd_get_tunables_from_conf: ctype = %d\n",
4292 3909 values->sdt_ctype);
4293 3910 break;
4294 3911 case SD_CONF_BSET_NRR_COUNT:
4295 3912 values->sdt_not_rdy_retries = data_list[i];
4296 3913 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4297 3914 "sd_get_tunables_from_conf: not_rdy_retries = %d\n",
4298 3915 values->sdt_not_rdy_retries);
4299 3916 break;
4300 3917 case SD_CONF_BSET_BSY_RETRY_COUNT:
4301 3918 values->sdt_busy_retries = data_list[i];
4302 3919 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4303 3920 "sd_get_tunables_from_conf: busy_retries = %d\n",
4304 3921 values->sdt_busy_retries);
4305 3922 break;
4306 3923 case SD_CONF_BSET_RST_RETRIES:
4307 3924 values->sdt_reset_retries = data_list[i];
4308 3925 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4309 3926 "sd_get_tunables_from_conf: reset_retries = %d\n",
4310 3927 values->sdt_reset_retries);
4311 3928 break;
4312 3929 case SD_CONF_BSET_RSV_REL_TIME:
4313 3930 values->sdt_reserv_rel_time = data_list[i];
4314 3931 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4315 3932 "sd_get_tunables_from_conf: reserv_rel_time = %d\n",
4316 3933 values->sdt_reserv_rel_time);
4317 3934 break;
4318 3935 case SD_CONF_BSET_MIN_THROTTLE:
4319 3936 values->sdt_min_throttle = data_list[i];
4320 3937 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4321 3938 "sd_get_tunables_from_conf: min_throttle = %d\n",
4322 3939 values->sdt_min_throttle);
4323 3940 break;
4324 3941 case SD_CONF_BSET_DISKSORT_DISABLED:
4325 3942 values->sdt_disk_sort_dis = data_list[i];
4326 3943 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4327 3944 "sd_get_tunables_from_conf: disk_sort_dis = %d\n",
4328 3945 values->sdt_disk_sort_dis);
4329 3946 break;
4330 3947 case SD_CONF_BSET_LUN_RESET_ENABLED:
4331 3948 values->sdt_lun_reset_enable = data_list[i];
4332 3949 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4333 3950 "sd_get_tunables_from_conf: lun_reset_enable = %d"
4334 3951 "\n", values->sdt_lun_reset_enable);
4335 3952 break;
4336 3953 case SD_CONF_BSET_CACHE_IS_NV:
4337 3954 values->sdt_suppress_cache_flush = data_list[i];
4338 3955 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4339 3956 "sd_get_tunables_from_conf: \
4340 3957 suppress_cache_flush = %d"
4341 3958 "\n", values->sdt_suppress_cache_flush);
4342 3959 break;
4343 3960 case SD_CONF_BSET_PC_DISABLED:
4344 3961 values->sdt_disk_sort_dis = data_list[i];
4345 3962 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4346 3963 "sd_get_tunables_from_conf: power_condition_dis = "
4347 3964 "%d\n", values->sdt_power_condition_dis);
4348 3965 break;
4349 3966 }
4350 3967 }
4351 3968 }
4352 3969
4353 3970 /*
4354 3971 * Function: sd_process_sdconf_table
4355 3972 *
4356 3973 * Description: Search the static configuration table for a match on the
4357 3974 * inquiry vid/pid and update the driver soft state structure
4358 3975 * according to the table property values for the device.
4359 3976 *
4360 3977 * The form of a configuration table entry is:
4361 3978 * <vid+pid>,<flags>,<property-data>
4362 3979 * "SEAGATE ST42400N",1,0x40000,
4363 3980 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1;
4364 3981 *
4365 3982 * Arguments: un - driver soft state (unit) structure
4366 3983 */
4367 3984
4368 3985 static void
4369 3986 sd_process_sdconf_table(struct sd_lun *un)
4370 3987 {
4371 3988 char *id = NULL;
4372 3989 int table_index;
4373 3990 int idlen;
4374 3991
4375 3992 ASSERT(un != NULL);
4376 3993 for (table_index = 0; table_index < sd_disk_table_size;
4377 3994 table_index++) {
4378 3995 id = sd_disk_table[table_index].device_id;
4379 3996 idlen = strlen(id);
4380 3997
4381 3998 /*
4382 3999 * The static configuration table currently does not
4383 4000 * implement version 10 properties. Additionally,
4384 4001 * multiple data-property-name entries are not
4385 4002 * implemented in the static configuration table.
4386 4003 */
4387 4004 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4388 4005 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4389 4006 "sd_process_sdconf_table: disk %s\n", id);
4390 4007 sd_set_vers1_properties(un,
4391 4008 sd_disk_table[table_index].flags,
4392 4009 sd_disk_table[table_index].properties);
4393 4010 break;
4394 4011 }
4395 4012 }
4396 4013 }
4397 4014
4398 4015
4399 4016 /*
4400 4017 * Function: sd_sdconf_id_match
4401 4018 *
4402 4019 * Description: This local function implements a case sensitive vid/pid
4403 4020 * comparison as well as the boundary cases of wild card and
4404 4021 * multiple blanks.
4405 4022 *
4406 4023 * Note: An implicit assumption made here is that the scsi
4407 4024 * inquiry structure will always keep the vid, pid and
4408 4025 * revision strings in consecutive sequence, so they can be
4409 4026 * read as a single string. If this assumption is not the
4410 4027 * case, a separate string, to be used for the check, needs
4411 4028 * to be built with these strings concatenated.
4412 4029 *
4413 4030 * Arguments: un - driver soft state (unit) structure
4414 4031 * id - table or config file vid/pid
4415 4032 * idlen - length of the vid/pid (bytes)
4416 4033 *
4417 4034 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4418 4035 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4419 4036 */
4420 4037
4421 4038 static int
4422 4039 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
4423 4040 {
4424 4041 struct scsi_inquiry *sd_inq;
4425 4042 int rval = SD_SUCCESS;
4426 4043
4427 4044 ASSERT(un != NULL);
4428 4045 sd_inq = un->un_sd->sd_inq;
4429 4046 ASSERT(id != NULL);
4430 4047
4431 4048 /*
4432 4049 * We use the inq_vid as a pointer to a buffer containing the
4433 4050 * vid and pid and use the entire vid/pid length of the table
4434 4051 * entry for the comparison. This works because the inq_pid
4435 4052 * data member follows inq_vid in the scsi_inquiry structure.
4436 4053 */
4437 4054 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
4438 4055 /*
4439 4056 * The user id string is compared to the inquiry vid/pid
4440 4057 * using a case insensitive comparison and ignoring
4441 4058 * multiple spaces.
4442 4059 */
4443 4060 rval = sd_blank_cmp(un, id, idlen);
4444 4061 if (rval != SD_SUCCESS) {
4445 4062 /*
4446 4063 * User id strings that start and end with a "*"
4447 4064 * are a special case. These do not have a
4448 4065 * specific vendor, and the product string can
4449 4066 * appear anywhere in the 16 byte PID portion of
4450 4067 * the inquiry data. This is a simple strstr()
4451 4068 * type search for the user id in the inquiry data.
4452 4069 */
4453 4070 if ((id[0] == '*') && (id[idlen - 1] == '*')) {
4454 4071 char *pidptr = &id[1];
4455 4072 int i;
4456 4073 int j;
4457 4074 int pidstrlen = idlen - 2;
4458 4075 j = sizeof (SD_INQUIRY(un)->inq_pid) -
4459 4076 pidstrlen;
4460 4077
4461 4078 if (j < 0) {
4462 4079 return (SD_FAILURE);
4463 4080 }
4464 4081 for (i = 0; i < j; i++) {
4465 4082 if (bcmp(&SD_INQUIRY(un)->inq_pid[i],
4466 4083 pidptr, pidstrlen) == 0) {
4467 4084 rval = SD_SUCCESS;
4468 4085 break;
4469 4086 }
4470 4087 }
4471 4088 }
4472 4089 }
4473 4090 }
4474 4091 return (rval);
4475 4092 }
4476 4093
4477 4094
4478 4095 /*
4479 4096 * Function: sd_blank_cmp
4480 4097 *
4481 4098 * Description: If the id string starts and ends with a space, treat
4482 4099 * multiple consecutive spaces as equivalent to a single
4483 4100 * space. For example, this causes a sd_disk_table entry
4484 4101 * of " NEC CDROM " to match a device's id string of
4485 4102 * "NEC CDROM".
4486 4103 *
4487 4104 * Note: The success exit condition for this routine is if
4488 4105 * the pointer to the table entry is '\0' and the cnt of
4489 4106 * the inquiry length is zero. This will happen if the inquiry
4490 4107 * string returned by the device is padded with spaces to be
4491 4108 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The
4492 4109 * SCSI spec states that the inquiry string is to be padded with
4493 4110 * spaces.
4494 4111 *
4495 4112 * Arguments: un - driver soft state (unit) structure
4496 4113 * id - table or config file vid/pid
4497 4114 * idlen - length of the vid/pid (bytes)
4498 4115 *
4499 4116 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4500 4117 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4501 4118 */
4502 4119
4503 4120 static int
4504 4121 sd_blank_cmp(struct sd_lun *un, char *id, int idlen)
4505 4122 {
4506 4123 char *p1;
4507 4124 char *p2;
4508 4125 int cnt;
4509 4126 cnt = sizeof (SD_INQUIRY(un)->inq_vid) +
4510 4127 sizeof (SD_INQUIRY(un)->inq_pid);
4511 4128
4512 4129 ASSERT(un != NULL);
4513 4130 p2 = un->un_sd->sd_inq->inq_vid;
4514 4131 ASSERT(id != NULL);
4515 4132 p1 = id;
4516 4133
4517 4134 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) {
4518 4135 /*
4519 4136 * Note: string p1 is terminated by a NUL but string p2
4520 4137 * isn't. The end of p2 is determined by cnt.
4521 4138 */
4522 4139 for (;;) {
4523 4140 /* skip over any extra blanks in both strings */
4524 4141 while ((*p1 != '\0') && (*p1 == ' ')) {
4525 4142 p1++;
4526 4143 }
4527 4144 while ((cnt != 0) && (*p2 == ' ')) {
4528 4145 p2++;
4529 4146 cnt--;
4530 4147 }
4531 4148
4532 4149 /* compare the two strings */
4533 4150 if ((cnt == 0) ||
4534 4151 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) {
4535 4152 break;
4536 4153 }
4537 4154 while ((cnt > 0) &&
4538 4155 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) {
4539 4156 p1++;
4540 4157 p2++;
4541 4158 cnt--;
4542 4159 }
4543 4160 }
4544 4161 }
4545 4162
4546 4163 /* return SD_SUCCESS if both strings match */
4547 4164 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE);
4548 4165 }
4549 4166
4550 4167
4551 4168 /*
4552 4169 * Function: sd_chk_vers1_data
4553 4170 *
4554 4171 * Description: Verify the version 1 device properties provided by the
4555 4172 * user via the configuration file
4556 4173 *
4557 4174 * Arguments: un - driver soft state (unit) structure
4558 4175 * flags - integer mask indicating properties to be set
4559 4176 * prop_list - integer list of property values
4560 4177 * list_len - number of the elements
4561 4178 *
4562 4179 * Return Code: SD_SUCCESS - Indicates the user provided data is valid
4563 4180 * SD_FAILURE - Indicates the user provided data is invalid
4564 4181 */
4565 4182
4566 4183 static int
4567 4184 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
4568 4185 int list_len, char *dataname_ptr)
4569 4186 {
4570 4187 int i;
4571 4188 int mask = 1;
4572 4189 int index = 0;
4573 4190
4574 4191 ASSERT(un != NULL);
4575 4192
4576 4193 /* Check for a NULL property name and list */
4577 4194 if (dataname_ptr == NULL) {
4578 4195 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4579 4196 "sd_chk_vers1_data: NULL data property name.");
4580 4197 return (SD_FAILURE);
4581 4198 }
4582 4199 if (prop_list == NULL) {
4583 4200 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4584 4201 "sd_chk_vers1_data: %s NULL data property list.",
4585 4202 dataname_ptr);
4586 4203 return (SD_FAILURE);
4587 4204 }
4588 4205
4589 4206 /* Display a warning if undefined bits are set in the flags */
4590 4207 if (flags & ~SD_CONF_BIT_MASK) {
4591 4208 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4592 4209 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
4593 4210 "Properties not set.",
4594 4211 (flags & ~SD_CONF_BIT_MASK), dataname_ptr);
4595 4212 return (SD_FAILURE);
4596 4213 }
4597 4214
4598 4215 /*
4599 4216 * Verify the length of the list by identifying the highest bit set
4600 4217 * in the flags and validating that the property list has a length
4601 4218 * up to the index of this bit.
4602 4219 */
4603 4220 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4604 4221 if (flags & mask) {
4605 4222 index++;
4606 4223 }
4607 4224 mask = 1 << i;
4608 4225 }
4609 4226 if (list_len < (index + 2)) {
4610 4227 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4611 4228 "sd_chk_vers1_data: "
4612 4229 "Data property list %s size is incorrect. "
4613 4230 "Properties not set.", dataname_ptr);
4614 4231 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: "
4615 4232 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS);
4616 4233 return (SD_FAILURE);
4617 4234 }
4618 4235 return (SD_SUCCESS);
4619 4236 }
4620 4237
4621 4238
4622 4239 /*
4623 4240 * Function: sd_set_vers1_properties
4624 4241 *
4625 4242 * Description: Set version 1 device properties based on a property list
4626 4243 * retrieved from the driver configuration file or static
4627 4244 * configuration table. Version 1 properties have the format:
4628 4245 *
4629 4246 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
4630 4247 *
4631 4248 * where the prop0 value will be used to set prop0 if bit0
4632 4249 * is set in the flags
4633 4250 *
4634 4251 * Arguments: un - driver soft state (unit) structure
4635 4252 * flags - integer mask indicating properties to be set
4636 4253 * prop_list - integer list of property values
4637 4254 */
4638 4255
4639 4256 static void
4640 4257 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list)
4641 4258 {
4642 4259 ASSERT(un != NULL);
4643 4260
4644 4261 /*
4645 4262 * Set the flag to indicate cache is to be disabled. An attempt
4646 4263 * to disable the cache via sd_cache_control() will be made
4647 4264 * later during attach once the basic initialization is complete.
4648 4265 */
4649 4266 if (flags & SD_CONF_BSET_NOCACHE) {
4650 4267 un->un_f_opt_disable_cache = TRUE;
4651 4268 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4652 4269 "sd_set_vers1_properties: caching disabled flag set\n");
4653 4270 }
4654 4271
4655 4272 /* CD-specific configuration parameters */
4656 4273 if (flags & SD_CONF_BSET_PLAYMSF_BCD) {
4657 4274 un->un_f_cfg_playmsf_bcd = TRUE;
4658 4275 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4659 4276 "sd_set_vers1_properties: playmsf_bcd set\n");
4660 4277 }
4661 4278 if (flags & SD_CONF_BSET_READSUB_BCD) {
4662 4279 un->un_f_cfg_readsub_bcd = TRUE;
4663 4280 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4664 4281 "sd_set_vers1_properties: readsub_bcd set\n");
4665 4282 }
4666 4283 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) {
4667 4284 un->un_f_cfg_read_toc_trk_bcd = TRUE;
4668 4285 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4669 4286 "sd_set_vers1_properties: read_toc_trk_bcd set\n");
4670 4287 }
4671 4288 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) {
4672 4289 un->un_f_cfg_read_toc_addr_bcd = TRUE;
4673 4290 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4674 4291 "sd_set_vers1_properties: read_toc_addr_bcd set\n");
4675 4292 }
4676 4293 if (flags & SD_CONF_BSET_NO_READ_HEADER) {
4677 4294 un->un_f_cfg_no_read_header = TRUE;
4678 4295 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4679 4296 "sd_set_vers1_properties: no_read_header set\n");
4680 4297 }
4681 4298 if (flags & SD_CONF_BSET_READ_CD_XD4) {
4682 4299 un->un_f_cfg_read_cd_xd4 = TRUE;
4683 4300 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4684 4301 "sd_set_vers1_properties: read_cd_xd4 set\n");
4685 4302 }
4686 4303
4687 4304 /* Support for devices which do not have valid/unique serial numbers */
4688 4305 if (flags & SD_CONF_BSET_FAB_DEVID) {
4689 4306 un->un_f_opt_fab_devid = TRUE;
4690 4307 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4691 4308 "sd_set_vers1_properties: fab_devid bit set\n");
4692 4309 }
4693 4310
4694 4311 /* Support for user throttle configuration */
4695 4312 if (flags & SD_CONF_BSET_THROTTLE) {
4696 4313 ASSERT(prop_list != NULL);
4697 4314 un->un_saved_throttle = un->un_throttle =
4698 4315 prop_list->sdt_throttle;
4699 4316 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4700 4317 "sd_set_vers1_properties: throttle set to %d\n",
4701 4318 prop_list->sdt_throttle);
4702 4319 }
4703 4320
4704 4321 /* Set the per disk retry count according to the conf file or table. */
4705 4322 if (flags & SD_CONF_BSET_NRR_COUNT) {
4706 4323 ASSERT(prop_list != NULL);
4707 4324 if (prop_list->sdt_not_rdy_retries) {
4708 4325 un->un_notready_retry_count =
4709 4326 prop_list->sdt_not_rdy_retries;
4710 4327 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4711 4328 "sd_set_vers1_properties: not ready retry count"
4712 4329 " set to %d\n", un->un_notready_retry_count);
4713 4330 }
4714 4331 }
4715 4332
4716 4333 /* The controller type is reported for generic disk driver ioctls */
4717 4334 if (flags & SD_CONF_BSET_CTYPE) {
4718 4335 ASSERT(prop_list != NULL);
4719 4336 switch (prop_list->sdt_ctype) {
4720 4337 case CTYPE_CDROM:
4721 4338 un->un_ctype = prop_list->sdt_ctype;
4722 4339 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4723 4340 "sd_set_vers1_properties: ctype set to "
4724 4341 "CTYPE_CDROM\n");
4725 4342 break;
4726 4343 case CTYPE_CCS:
4727 4344 un->un_ctype = prop_list->sdt_ctype;
4728 4345 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4729 4346 "sd_set_vers1_properties: ctype set to "
4730 4347 "CTYPE_CCS\n");
4731 4348 break;
4732 4349 case CTYPE_ROD: /* RW optical */
4733 4350 un->un_ctype = prop_list->sdt_ctype;
4734 4351 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4735 4352 "sd_set_vers1_properties: ctype set to "
4736 4353 "CTYPE_ROD\n");
4737 4354 break;
4738 4355 default:
4739 4356 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4740 4357 "sd_set_vers1_properties: Could not set "
4741 4358 "invalid ctype value (%d)",
4742 4359 prop_list->sdt_ctype);
4743 4360 }
4744 4361 }
4745 4362
4746 4363 /* Purple failover timeout */
4747 4364 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) {
4748 4365 ASSERT(prop_list != NULL);
4749 4366 un->un_busy_retry_count =
4750 4367 prop_list->sdt_busy_retries;
4751 4368 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4752 4369 "sd_set_vers1_properties: "
4753 4370 "busy retry count set to %d\n",
4754 4371 un->un_busy_retry_count);
4755 4372 }
4756 4373
4757 4374 /* Purple reset retry count */
4758 4375 if (flags & SD_CONF_BSET_RST_RETRIES) {
4759 4376 ASSERT(prop_list != NULL);
4760 4377 un->un_reset_retry_count =
4761 4378 prop_list->sdt_reset_retries;
4762 4379 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4763 4380 "sd_set_vers1_properties: "
4764 4381 "reset retry count set to %d\n",
4765 4382 un->un_reset_retry_count);
4766 4383 }
4767 4384
4768 4385 /* Purple reservation release timeout */
4769 4386 if (flags & SD_CONF_BSET_RSV_REL_TIME) {
4770 4387 ASSERT(prop_list != NULL);
4771 4388 un->un_reserve_release_time =
4772 4389 prop_list->sdt_reserv_rel_time;
4773 4390 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4774 4391 "sd_set_vers1_properties: "
4775 4392 "reservation release timeout set to %d\n",
4776 4393 un->un_reserve_release_time);
4777 4394 }
4778 4395
4779 4396 /*
4780 4397 * Driver flag telling the driver to verify that no commands are pending
4781 4398 * for a device before issuing a Test Unit Ready. This is a workaround
4782 4399 * for a firmware bug in some Seagate eliteI drives.
4783 4400 */
4784 4401 if (flags & SD_CONF_BSET_TUR_CHECK) {
4785 4402 un->un_f_cfg_tur_check = TRUE;
4786 4403 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4787 4404 "sd_set_vers1_properties: tur queue check set\n");
4788 4405 }
4789 4406
4790 4407 if (flags & SD_CONF_BSET_MIN_THROTTLE) {
4791 4408 un->un_min_throttle = prop_list->sdt_min_throttle;
4792 4409 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4793 4410 "sd_set_vers1_properties: min throttle set to %d\n",
4794 4411 un->un_min_throttle);
4795 4412 }
4796 4413
4797 4414 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) {
4798 4415 un->un_f_disksort_disabled =
4799 4416 (prop_list->sdt_disk_sort_dis != 0) ?
4800 4417 TRUE : FALSE;
4801 4418 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4802 4419 "sd_set_vers1_properties: disksort disabled "
4803 4420 "flag set to %d\n",
4804 4421 prop_list->sdt_disk_sort_dis);
4805 4422 }
4806 4423
4807 4424 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) {
4808 4425 un->un_f_lun_reset_enabled =
4809 4426 (prop_list->sdt_lun_reset_enable != 0) ?
4810 4427 TRUE : FALSE;
4811 4428 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4812 4429 "sd_set_vers1_properties: lun reset enabled "
4813 4430 "flag set to %d\n",
4814 4431 prop_list->sdt_lun_reset_enable);
4815 4432 }
4816 4433
4817 4434 if (flags & SD_CONF_BSET_CACHE_IS_NV) {
4818 4435 un->un_f_suppress_cache_flush =
4819 4436 (prop_list->sdt_suppress_cache_flush != 0) ?
4820 4437 TRUE : FALSE;
4821 4438 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4822 4439 "sd_set_vers1_properties: suppress_cache_flush "
4823 4440 "flag set to %d\n",
4824 4441 prop_list->sdt_suppress_cache_flush);
4825 4442 }
4826 4443
4827 4444 if (flags & SD_CONF_BSET_PC_DISABLED) {
4828 4445 un->un_f_power_condition_disabled =
4829 4446 (prop_list->sdt_power_condition_dis != 0) ?
4830 4447 TRUE : FALSE;
4831 4448 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4832 4449 "sd_set_vers1_properties: power_condition_disabled "
4833 4450 "flag set to %d\n",
4834 4451 prop_list->sdt_power_condition_dis);
4835 4452 }
4836 4453
4837 4454 /*
4838 4455 * Validate the throttle values.
4839 4456 * If any of the numbers are invalid, set everything to defaults.
4840 4457 */
4841 4458 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4842 4459 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4843 4460 (un->un_min_throttle > un->un_throttle)) {
4844 4461 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4845 4462 un->un_min_throttle = sd_min_throttle;
4846 4463 }
4847 4464 }
4848 4465
4849 4466 /*
4850 4467 * Function: sd_is_lsi()
4851 4468 *
4852 4469 * Description: Check for lsi devices, step through the static device
4853 4470 * table to match vid/pid.
4854 4471 *
4855 4472 * Args: un - ptr to sd_lun
4856 4473 *
4857 4474 * Notes: When creating new LSI property, need to add the new LSI property
4858 4475 * to this function.
4859 4476 */
4860 4477 static void
4861 4478 sd_is_lsi(struct sd_lun *un)
4862 4479 {
4863 4480 char *id = NULL;
4864 4481 int table_index;
4865 4482 int idlen;
4866 4483 void *prop;
4867 4484
4868 4485 ASSERT(un != NULL);
4869 4486 for (table_index = 0; table_index < sd_disk_table_size;
4870 4487 table_index++) {
4871 4488 id = sd_disk_table[table_index].device_id;
4872 4489 idlen = strlen(id);
4873 4490 if (idlen == 0) {
4874 4491 continue;
4875 4492 }
4876 4493
4877 4494 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4878 4495 prop = sd_disk_table[table_index].properties;
4879 4496 if (prop == &lsi_properties ||
4880 4497 prop == &lsi_oem_properties ||
4881 4498 prop == &lsi_properties_scsi ||
4882 4499 prop == &symbios_properties) {
4883 4500 un->un_f_cfg_is_lsi = TRUE;
4884 4501 }
4885 4502 break;
4886 4503 }
4887 4504 }
4888 4505 }
4889 4506
4890 4507 /*
4891 4508 * Function: sd_get_physical_geometry
4892 4509 *
4893 4510 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and
4894 4511 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the
4895 4512 * target, and use this information to initialize the physical
4896 4513 * geometry cache specified by pgeom_p.
4897 4514 *
4898 4515 * MODE SENSE is an optional command, so failure in this case
4899 4516 * does not necessarily denote an error. We want to use the
4900 4517 * MODE SENSE commands to derive the physical geometry of the
4901 4518 * device, but if either command fails, the logical geometry is
4902 4519 * used as the fallback for disk label geometry in cmlb.
4903 4520 *
4904 4521 * This requires that un->un_blockcount and un->un_tgt_blocksize
4905 4522 * have already been initialized for the current target and
4906 4523 * that the current values be passed as args so that we don't
4907 4524 * end up ever trying to use -1 as a valid value. This could
4908 4525 * happen if either value is reset while we're not holding
4909 4526 * the mutex.
4910 4527 *
4911 4528 * Arguments: un - driver soft state (unit) structure
4912 4529 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
4913 4530 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
4914 4531 * to use the USCSI "direct" chain and bypass the normal
4915 4532 * command waitq.
4916 4533 *
4917 4534 * Context: Kernel thread only (can sleep).
4918 4535 */
4919 4536
4920 4537 static int
4921 4538 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p,
4922 4539 diskaddr_t capacity, int lbasize, int path_flag)
4923 4540 {
4924 4541 struct mode_format *page3p;
4925 4542 struct mode_geometry *page4p;
4926 4543 struct mode_header *headerp;
4927 4544 int sector_size;
4928 4545 int nsect;
4929 4546 int nhead;
4930 4547 int ncyl;
4931 4548 int intrlv;
4932 4549 int spc;
4933 4550 diskaddr_t modesense_capacity;
4934 4551 int rpm;
4935 4552 int bd_len;
4936 4553 int mode_header_length;
4937 4554 uchar_t *p3bufp;
4938 4555 uchar_t *p4bufp;
4939 4556 int cdbsize;
4940 4557 int ret = EIO;
4941 4558 sd_ssc_t *ssc;
4942 4559 int status;
4943 4560
4944 4561 ASSERT(un != NULL);
4945 4562
4946 4563 if (lbasize == 0) {
4947 4564 if (ISCD(un)) {
4948 4565 lbasize = 2048;
4949 4566 } else {
4950 4567 lbasize = un->un_sys_blocksize;
4951 4568 }
4952 4569 }
4953 4570 pgeom_p->g_secsize = (unsigned short)lbasize;
4954 4571
4955 4572 /*
4956 4573 * If the unit is a cd/dvd drive MODE SENSE page three
4957 4574 * and MODE SENSE page four are reserved (see SBC spec
4958 4575 * and MMC spec). To prevent soft errors just return
|
↓ open down ↓ |
741 lines elided |
↑ open up ↑ |
4959 4576 * using the default LBA size.
4960 4577 *
4961 4578 * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not
4962 4579 * implement support for mode pages 3 and 4 return here to prevent
4963 4580 * illegal requests on SATA drives.
4964 4581 *
4965 4582 * These pages are also reserved in SBC-2 and later. We assume SBC-2
4966 4583 * or later for a direct-attached block device if the SCSI version is
4967 4584 * at least SPC-3.
4968 4585 */
4969 -
4970 4586 if (ISCD(un) ||
4971 4587 un->un_interconnect_type == SD_INTERCONNECT_SATA ||
4972 4588 (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5))
4973 4589 return (ret);
4974 4590
4975 4591 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
4976 4592
4977 4593 /*
4978 4594 * Retrieve MODE SENSE page 3 - Format Device Page
4979 4595 */
4980 4596 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP);
4981 4597 ssc = sd_ssc_init(un);
4982 4598 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp,
4983 4599 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag);
4984 4600 if (status != 0) {
4985 4601 SD_ERROR(SD_LOG_COMMON, un,
4986 4602 "sd_get_physical_geometry: mode sense page 3 failed\n");
4987 4603 goto page3_exit;
4988 4604 }
4989 4605
4990 4606 /*
4991 4607 * Determine size of Block Descriptors in order to locate the mode
4992 4608 * page data. ATAPI devices return 0, SCSI devices should return
4993 4609 * MODE_BLK_DESC_LENGTH.
4994 4610 */
4995 4611 headerp = (struct mode_header *)p3bufp;
4996 4612 if (un->un_f_cfg_is_atapi == TRUE) {
4997 4613 struct mode_header_grp2 *mhp =
4998 4614 (struct mode_header_grp2 *)headerp;
4999 4615 mode_header_length = MODE_HEADER_LENGTH_GRP2;
5000 4616 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5001 4617 } else {
5002 4618 mode_header_length = MODE_HEADER_LENGTH;
5003 4619 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5004 4620 }
5005 4621
5006 4622 if (bd_len > MODE_BLK_DESC_LENGTH) {
5007 4623 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5008 4624 "sd_get_physical_geometry: received unexpected bd_len "
5009 4625 "of %d, page3\n", bd_len);
5010 4626 status = EIO;
5011 4627 goto page3_exit;
5012 4628 }
5013 4629
5014 4630 page3p = (struct mode_format *)
5015 4631 ((caddr_t)headerp + mode_header_length + bd_len);
5016 4632
5017 4633 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) {
5018 4634 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5019 4635 "sd_get_physical_geometry: mode sense pg3 code mismatch "
5020 4636 "%d\n", page3p->mode_page.code);
5021 4637 status = EIO;
5022 4638 goto page3_exit;
5023 4639 }
5024 4640
5025 4641 /*
5026 4642 * Use this physical geometry data only if BOTH MODE SENSE commands
5027 4643 * complete successfully; otherwise, revert to the logical geometry.
5028 4644 * So, we need to save everything in temporary variables.
5029 4645 */
5030 4646 sector_size = BE_16(page3p->data_bytes_sect);
5031 4647
5032 4648 /*
5033 4649 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size
5034 4650 */
5035 4651 if (sector_size == 0) {
5036 4652 sector_size = un->un_sys_blocksize;
5037 4653 } else {
5038 4654 sector_size &= ~(un->un_sys_blocksize - 1);
5039 4655 }
5040 4656
5041 4657 nsect = BE_16(page3p->sect_track);
5042 4658 intrlv = BE_16(page3p->interleave);
5043 4659
5044 4660 SD_INFO(SD_LOG_COMMON, un,
5045 4661 "sd_get_physical_geometry: Format Parameters (page 3)\n");
5046 4662 SD_INFO(SD_LOG_COMMON, un,
5047 4663 " mode page: %d; nsect: %d; sector size: %d;\n",
5048 4664 page3p->mode_page.code, nsect, sector_size);
5049 4665 SD_INFO(SD_LOG_COMMON, un,
5050 4666 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv,
5051 4667 BE_16(page3p->track_skew),
5052 4668 BE_16(page3p->cylinder_skew));
5053 4669
5054 4670 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5055 4671
5056 4672 /*
5057 4673 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page
5058 4674 */
5059 4675 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP);
5060 4676 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp,
5061 4677 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag);
5062 4678 if (status != 0) {
5063 4679 SD_ERROR(SD_LOG_COMMON, un,
5064 4680 "sd_get_physical_geometry: mode sense page 4 failed\n");
5065 4681 goto page4_exit;
5066 4682 }
5067 4683
5068 4684 /*
5069 4685 * Determine size of Block Descriptors in order to locate the mode
5070 4686 * page data. ATAPI devices return 0, SCSI devices should return
5071 4687 * MODE_BLK_DESC_LENGTH.
5072 4688 */
5073 4689 headerp = (struct mode_header *)p4bufp;
5074 4690 if (un->un_f_cfg_is_atapi == TRUE) {
5075 4691 struct mode_header_grp2 *mhp =
5076 4692 (struct mode_header_grp2 *)headerp;
5077 4693 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5078 4694 } else {
5079 4695 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5080 4696 }
5081 4697
5082 4698 if (bd_len > MODE_BLK_DESC_LENGTH) {
5083 4699 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5084 4700 "sd_get_physical_geometry: received unexpected bd_len of "
5085 4701 "%d, page4\n", bd_len);
5086 4702 status = EIO;
5087 4703 goto page4_exit;
5088 4704 }
5089 4705
5090 4706 page4p = (struct mode_geometry *)
5091 4707 ((caddr_t)headerp + mode_header_length + bd_len);
5092 4708
5093 4709 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) {
5094 4710 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5095 4711 "sd_get_physical_geometry: mode sense pg4 code mismatch "
5096 4712 "%d\n", page4p->mode_page.code);
5097 4713 status = EIO;
5098 4714 goto page4_exit;
5099 4715 }
5100 4716
5101 4717 /*
5102 4718 * Stash the data now, after we know that both commands completed.
5103 4719 */
5104 4720
5105 4721
5106 4722 nhead = (int)page4p->heads; /* uchar, so no conversion needed */
5107 4723 spc = nhead * nsect;
5108 4724 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb;
5109 4725 rpm = BE_16(page4p->rpm);
5110 4726
5111 4727 modesense_capacity = spc * ncyl;
5112 4728
5113 4729 SD_INFO(SD_LOG_COMMON, un,
5114 4730 "sd_get_physical_geometry: Geometry Parameters (page 4)\n");
5115 4731 SD_INFO(SD_LOG_COMMON, un,
5116 4732 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm);
5117 4733 SD_INFO(SD_LOG_COMMON, un,
5118 4734 " computed capacity(h*s*c): %d;\n", modesense_capacity);
5119 4735 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n",
5120 4736 (void *)pgeom_p, capacity);
5121 4737
5122 4738 /*
5123 4739 * Compensate if the drive's geometry is not rectangular, i.e.,
5124 4740 * the product of C * H * S returned by MODE SENSE >= that returned
5125 4741 * by read capacity. This is an idiosyncrasy of the original x86
5126 4742 * disk subsystem.
5127 4743 */
5128 4744 if (modesense_capacity >= capacity) {
5129 4745 SD_INFO(SD_LOG_COMMON, un,
5130 4746 "sd_get_physical_geometry: adjusting acyl; "
5131 4747 "old: %d; new: %d\n", pgeom_p->g_acyl,
5132 4748 (modesense_capacity - capacity + spc - 1) / spc);
5133 4749 if (sector_size != 0) {
5134 4750 /* 1243403: NEC D38x7 drives don't support sec size */
5135 4751 pgeom_p->g_secsize = (unsigned short)sector_size;
5136 4752 }
5137 4753 pgeom_p->g_nsect = (unsigned short)nsect;
5138 4754 pgeom_p->g_nhead = (unsigned short)nhead;
5139 4755 pgeom_p->g_capacity = capacity;
5140 4756 pgeom_p->g_acyl =
5141 4757 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc;
5142 4758 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl;
5143 4759 }
5144 4760
5145 4761 pgeom_p->g_rpm = (unsigned short)rpm;
5146 4762 pgeom_p->g_intrlv = (unsigned short)intrlv;
5147 4763 ret = 0;
5148 4764
5149 4765 SD_INFO(SD_LOG_COMMON, un,
5150 4766 "sd_get_physical_geometry: mode sense geometry:\n");
5151 4767 SD_INFO(SD_LOG_COMMON, un,
5152 4768 " nsect: %d; sector size: %d; interlv: %d\n",
5153 4769 nsect, sector_size, intrlv);
5154 4770 SD_INFO(SD_LOG_COMMON, un,
5155 4771 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
5156 4772 nhead, ncyl, rpm, modesense_capacity);
5157 4773 SD_INFO(SD_LOG_COMMON, un,
5158 4774 "sd_get_physical_geometry: (cached)\n");
5159 4775 SD_INFO(SD_LOG_COMMON, un,
5160 4776 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
5161 4777 pgeom_p->g_ncyl, pgeom_p->g_acyl,
5162 4778 pgeom_p->g_nhead, pgeom_p->g_nsect);
5163 4779 SD_INFO(SD_LOG_COMMON, un,
5164 4780 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
5165 4781 pgeom_p->g_secsize, pgeom_p->g_capacity,
5166 4782 pgeom_p->g_intrlv, pgeom_p->g_rpm);
5167 4783 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5168 4784
5169 4785 page4_exit:
5170 4786 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH);
5171 4787
5172 4788 page3_exit:
5173 4789 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH);
5174 4790
5175 4791 if (status != 0) {
5176 4792 if (status == EIO) {
5177 4793 /*
5178 4794 * Some disks do not support mode sense(6), we
5179 4795 * should ignore this kind of error(sense key is
5180 4796 * 0x5 - illegal request).
5181 4797 */
5182 4798 uint8_t *sensep;
5183 4799 int senlen;
5184 4800
5185 4801 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
5186 4802 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
5187 4803 ssc->ssc_uscsi_cmd->uscsi_rqresid);
5188 4804
5189 4805 if (senlen > 0 &&
5190 4806 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
5191 4807 sd_ssc_assessment(ssc,
5192 4808 SD_FMT_IGNORE_COMPROMISE);
5193 4809 } else {
5194 4810 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
5195 4811 }
5196 4812 } else {
5197 4813 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5198 4814 }
5199 4815 }
5200 4816 sd_ssc_fini(ssc);
5201 4817 return (ret);
5202 4818 }
5203 4819
5204 4820 /*
5205 4821 * Function: sd_get_virtual_geometry
5206 4822 *
5207 4823 * Description: Ask the controller to tell us about the target device.
5208 4824 *
5209 4825 * Arguments: un - pointer to softstate
5210 4826 * capacity - disk capacity in #blocks
5211 4827 * lbasize - disk block size in bytes
5212 4828 *
5213 4829 * Context: Kernel thread only
5214 4830 */
5215 4831
5216 4832 static int
5217 4833 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p,
5218 4834 diskaddr_t capacity, int lbasize)
5219 4835 {
5220 4836 uint_t geombuf;
5221 4837 int spc;
5222 4838
5223 4839 ASSERT(un != NULL);
5224 4840
5225 4841 /* Set sector size, and total number of sectors */
5226 4842 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1);
5227 4843 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1);
5228 4844
5229 4845 /* Let the HBA tell us its geometry */
5230 4846 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1);
5231 4847
5232 4848 /* A value of -1 indicates an undefined "geometry" property */
5233 4849 if (geombuf == (-1)) {
5234 4850 return (EINVAL);
5235 4851 }
5236 4852
5237 4853 /* Initialize the logical geometry cache. */
5238 4854 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff;
5239 4855 lgeom_p->g_nsect = geombuf & 0xffff;
5240 4856 lgeom_p->g_secsize = un->un_sys_blocksize;
5241 4857
5242 4858 spc = lgeom_p->g_nhead * lgeom_p->g_nsect;
5243 4859
5244 4860 /*
5245 4861 * Note: The driver originally converted the capacity value from
5246 4862 * target blocks to system blocks. However, the capacity value passed
5247 4863 * to this routine is already in terms of system blocks (this scaling
5248 4864 * is done when the READ CAPACITY command is issued and processed).
5249 4865 * This 'error' may have gone undetected because the usage of g_ncyl
5250 4866 * (which is based upon g_capacity) is very limited within the driver
5251 4867 */
5252 4868 lgeom_p->g_capacity = capacity;
5253 4869
5254 4870 /*
5255 4871 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The
5256 4872 * hba may return zero values if the device has been removed.
5257 4873 */
5258 4874 if (spc == 0) {
5259 4875 lgeom_p->g_ncyl = 0;
5260 4876 } else {
5261 4877 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc;
5262 4878 }
5263 4879 lgeom_p->g_acyl = 0;
5264 4880
5265 4881 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n");
5266 4882 return (0);
5267 4883
5268 4884 }
5269 4885 /*
5270 4886 * Function: sd_update_block_info
5271 4887 *
5272 4888 * Description: Calculate a byte count to sector count bitshift value
5273 4889 * from sector size.
5274 4890 *
5275 4891 * Arguments: un: unit struct.
5276 4892 * lbasize: new target sector size
5277 4893 * capacity: new target capacity, ie. block count
5278 4894 *
5279 4895 * Context: Kernel thread context
5280 4896 */
5281 4897
5282 4898 static void
5283 4899 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity)
5284 4900 {
5285 4901 if (lbasize != 0) {
5286 4902 un->un_tgt_blocksize = lbasize;
5287 4903 un->un_f_tgt_blocksize_is_valid = TRUE;
5288 4904 if (!un->un_f_has_removable_media) {
5289 4905 un->un_sys_blocksize = lbasize;
5290 4906 }
5291 4907 }
5292 4908
5293 4909 if (capacity != 0) {
5294 4910 un->un_blockcount = capacity;
5295 4911 un->un_f_blockcount_is_valid = TRUE;
5296 4912
5297 4913 /*
5298 4914 * The capacity has changed so update the errstats.
5299 4915 */
5300 4916 if (un->un_errstats != NULL) {
|
↓ open down ↓ |
321 lines elided |
↑ open up ↑ |
5301 4917 struct sd_errstats *stp;
5302 4918
5303 4919 capacity *= un->un_sys_blocksize;
5304 4920 stp = (struct sd_errstats *)un->un_errstats->ks_data;
5305 4921 if (stp->sd_capacity.value.ui64 < capacity)
5306 4922 stp->sd_capacity.value.ui64 = capacity;
5307 4923 }
5308 4924 }
5309 4925 }
5310 4926
4927 +/*
4928 + * Parses the SCSI Block Limits VPD page (0xB0). It's legal to pass NULL for
4929 + * vpd_pg, in which case all the block limits will be reset to the defaults.
4930 + */
4931 +static void
4932 +sd_parse_blk_limits_vpd(struct sd_lun *un, uchar_t *vpd_pg)
4933 +{
4934 + sd_blk_limits_t *lim = &un->un_blk_lim;
4935 + unsigned pg_len;
5311 4936
4937 + if (vpd_pg != NULL)
4938 + pg_len = BE_IN16(&vpd_pg[2]);
4939 + else
4940 + pg_len = 0;
4941 +
4942 + /* Block Limits VPD can be 16 bytes or 64 bytes long - support both */
4943 + if (pg_len >= 0x10) {
4944 + lim->lim_opt_xfer_len_gran = BE_IN16(&vpd_pg[6]);
4945 + lim->lim_max_xfer_len = BE_IN32(&vpd_pg[8]);
4946 + lim->lim_opt_xfer_len = BE_IN32(&vpd_pg[12]);
4947 + } else {
4948 + lim->lim_opt_xfer_len_gran = 0;
4949 + lim->lim_max_xfer_len = UINT32_MAX;
4950 + lim->lim_opt_xfer_len = UINT32_MAX;
4951 + }
4952 + if (pg_len >= 0x3c) {
4953 + lim->lim_max_pfetch_len = BE_IN32(&vpd_pg[16]);
4954 + /*
4955 + * A zero in either of the following two fields indicates lack
4956 + * of UNMAP support.
4957 + */
4958 + lim->lim_max_unmap_lba_cnt = BE_IN32(&vpd_pg[20]);
4959 + lim->lim_max_unmap_descr_cnt = BE_IN32(&vpd_pg[24]);
4960 + lim->lim_opt_unmap_gran = BE_IN32(&vpd_pg[28]);
4961 + if ((vpd_pg[32] >> 7) == 1) {
4962 + /* left-most bit on each byte is a flag */
4963 + lim->lim_unmap_gran_align =
4964 + ((vpd_pg[32] & 0x7f) << 24) | (vpd_pg[33] << 16) |
4965 + (vpd_pg[34] << 8) | vpd_pg[35];
4966 + } else {
4967 + lim->lim_unmap_gran_align = 0;
4968 + }
4969 + lim->lim_max_write_same_len = BE_IN64(&vpd_pg[36]);
4970 + } else {
4971 + lim->lim_max_pfetch_len = UINT32_MAX;
4972 + lim->lim_max_unmap_lba_cnt = UINT32_MAX;
4973 + lim->lim_max_unmap_descr_cnt = SD_UNMAP_MAX_DESCR;
4974 + lim->lim_opt_unmap_gran = 0;
4975 + lim->lim_unmap_gran_align = 0;
4976 + lim->lim_max_write_same_len = UINT64_MAX;
4977 + }
4978 +}
4979 +
5312 4980 /*
4981 + * Collects VPD page B0 data if available (block limits). If the data is
4982 + * not available or querying the device failed, we revert to the defaults.
4983 + */
4984 +static void
4985 +sd_setup_blk_limits(sd_ssc_t *ssc)
4986 +{
4987 + struct sd_lun *un = ssc->ssc_un;
4988 + uchar_t *inqB0 = NULL;
4989 + size_t inqB0_resid = 0;
4990 + int rval;
4991 +
4992 + if (un->un_vpd_page_mask & SD_VPD_BLK_LIMITS_PG) {
4993 + inqB0 = kmem_zalloc(MAX_INQUIRY_SIZE, KM_SLEEP);
4994 + rval = sd_send_scsi_INQUIRY(ssc, inqB0, MAX_INQUIRY_SIZE, 0x01,
4995 + 0xB0, &inqB0_resid);
4996 + if (rval != 0) {
4997 + sd_ssc_assessment(ssc, SD_FMT_IGNORE);
4998 + kmem_free(inqB0, MAX_INQUIRY_SIZE);
4999 + inqB0 = NULL;
5000 + }
5001 + }
5002 + /* passing NULL inqB0 will reset to defaults */
5003 + sd_parse_blk_limits_vpd(ssc->ssc_un, inqB0);
5004 + if (inqB0)
5005 + kmem_free(inqB0, MAX_INQUIRY_SIZE);
5006 +}
5007 +
5008 +#define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
5009 +
5010 +/*
5313 5011 * Function: sd_register_devid
5314 5012 *
5315 5013 * Description: This routine will obtain the device id information from the
5316 5014 * target, obtain the serial number, and register the device
5317 5015 * id with the ddi framework.
5318 5016 *
5319 5017 * Arguments: devi - the system's dev_info_t for the device.
5320 5018 * un - driver soft state (unit) structure
5321 5019 * reservation_flag - indicates if a reservation conflict
5322 5020 * occurred during attach
5323 5021 *
5324 5022 * Context: Kernel Thread
5325 5023 */
5326 5024 static void
5327 5025 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag)
5328 5026 {
5329 5027 int rval = 0;
5330 5028 uchar_t *inq80 = NULL;
5331 5029 size_t inq80_len = MAX_INQUIRY_SIZE;
5332 5030 size_t inq80_resid = 0;
5333 5031 uchar_t *inq83 = NULL;
5334 5032 size_t inq83_len = MAX_INQUIRY_SIZE;
5335 5033 size_t inq83_resid = 0;
5336 5034 int dlen, len;
5337 5035 char *sn;
5338 5036 struct sd_lun *un;
5339 5037
5340 5038 ASSERT(ssc != NULL);
5341 5039 un = ssc->ssc_un;
5342 5040 ASSERT(un != NULL);
5343 5041 ASSERT(mutex_owned(SD_MUTEX(un)));
5344 5042 ASSERT((SD_DEVINFO(un)) == devi);
5345 5043
5346 5044
5347 5045 /*
5348 5046 * We check the availability of the World Wide Name (0x83) and Unit
5349 5047 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using
5350 5048 * un_vpd_page_mask from them, we decide which way to get the WWN. If
5351 5049 * 0x83 is available, that is the best choice. Our next choice is
5352 5050 * 0x80. If neither are available, we munge the devid from the device
5353 5051 * vid/pid/serial # for Sun qualified disks, or use the ddi framework
5354 5052 * to fabricate a devid for non-Sun qualified disks.
5355 5053 */
5356 5054 if (sd_check_vpd_page_support(ssc) == 0) {
5357 5055 /* collect page 80 data if available */
5358 5056 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) {
5359 5057
5360 5058 mutex_exit(SD_MUTEX(un));
5361 5059 inq80 = kmem_zalloc(inq80_len, KM_SLEEP);
5362 5060
5363 5061 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len,
5364 5062 0x01, 0x80, &inq80_resid);
5365 5063
5366 5064 if (rval != 0) {
5367 5065 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5368 5066 kmem_free(inq80, inq80_len);
5369 5067 inq80 = NULL;
5370 5068 inq80_len = 0;
5371 5069 } else if (ddi_prop_exists(
5372 5070 DDI_DEV_T_NONE, SD_DEVINFO(un),
5373 5071 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
5374 5072 INQUIRY_SERIAL_NO) == 0) {
5375 5073 /*
5376 5074 * If we don't already have a serial number
5377 5075 * property, do quick verify of data returned
5378 5076 * and define property.
5379 5077 */
5380 5078 dlen = inq80_len - inq80_resid;
5381 5079 len = (size_t)inq80[3];
5382 5080 if ((dlen >= 4) && ((len + 4) <= dlen)) {
5383 5081 /*
5384 5082 * Ensure sn termination, skip leading
5385 5083 * blanks, and create property
5386 5084 * 'inquiry-serial-no'.
5387 5085 */
5388 5086 sn = (char *)&inq80[4];
5389 5087 sn[len] = 0;
5390 5088 while (*sn && (*sn == ' '))
5391 5089 sn++;
5392 5090 if (*sn) {
5393 5091 (void) ddi_prop_update_string(
5394 5092 DDI_DEV_T_NONE,
5395 5093 SD_DEVINFO(un),
5396 5094 INQUIRY_SERIAL_NO, sn);
5397 5095 }
5398 5096 }
5399 5097 }
5400 5098 mutex_enter(SD_MUTEX(un));
5401 5099 }
5402 5100
5403 5101 /* collect page 83 data if available */
5404 5102 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) {
5405 5103 mutex_exit(SD_MUTEX(un));
5406 5104 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
5407 5105
5408 5106 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len,
5409 5107 0x01, 0x83, &inq83_resid);
5410 5108
5411 5109 if (rval != 0) {
5412 5110 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5413 5111 kmem_free(inq83, inq83_len);
5414 5112 inq83 = NULL;
5415 5113 inq83_len = 0;
5416 5114 }
5417 5115 mutex_enter(SD_MUTEX(un));
5418 5116 }
5419 5117 }
5420 5118
5421 5119 /*
5422 5120 * If transport has already registered a devid for this target
5423 5121 * then that takes precedence over the driver's determination
5424 5122 * of the devid.
5425 5123 *
5426 5124 * NOTE: The reason this check is done here instead of at the beginning
5427 5125 * of the function is to allow the code above to create the
5428 5126 * 'inquiry-serial-no' property.
5429 5127 */
5430 5128 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) {
5431 5129 ASSERT(un->un_devid);
5432 5130 un->un_f_devid_transport_defined = TRUE;
|
↓ open down ↓ |
110 lines elided |
↑ open up ↑ |
5433 5131 goto cleanup; /* use devid registered by the transport */
5434 5132 }
5435 5133
5436 5134 /*
5437 5135 * This is the case of antiquated Sun disk drives that have the
5438 5136 * FAB_DEVID property set in the disk_table. These drives
5439 5137 * manage the devid's by storing them in last 2 available sectors
5440 5138 * on the drive and have them fabricated by the ddi layer by calling
5441 5139 * ddi_devid_init and passing the DEVID_FAB flag.
5442 5140 */
5443 - if (un->un_f_opt_fab_devid == TRUE) {
5444 - /*
5445 - * Depending on EINVAL isn't reliable, since a reserved disk
5446 - * may result in invalid geometry, so check to make sure a
5447 - * reservation conflict did not occur during attach.
5448 - */
5449 - if ((sd_get_devid(ssc) == EINVAL) &&
5450 - (reservation_flag != SD_TARGET_IS_RESERVED)) {
5141 + if (un->un_f_opt_fab_devid == TRUE &&
5142 + reservation_flag != SD_TARGET_IS_RESERVED) {
5143 + if (sd_get_devid(ssc) == EINVAL)
5451 5144 /*
5452 5145 * The devid is invalid AND there is no reservation
5453 5146 * conflict. Fabricate a new devid.
5454 5147 */
5455 5148 (void) sd_create_devid(ssc);
5456 - }
5457 5149
5458 5150 /* Register the devid if it exists */
5459 5151 if (un->un_devid != NULL) {
5460 5152 (void) ddi_devid_register(SD_DEVINFO(un),
5461 5153 un->un_devid);
5462 5154 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5463 5155 "sd_register_devid: Devid Fabricated\n");
5464 5156 }
5465 5157 goto cleanup;
5466 5158 }
5467 5159
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
5468 5160 /* encode best devid possible based on data available */
5469 5161 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
5470 5162 (char *)ddi_driver_name(SD_DEVINFO(un)),
5471 5163 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
5472 5164 inq80, inq80_len - inq80_resid, inq83, inq83_len -
5473 5165 inq83_resid, &un->un_devid) == DDI_SUCCESS) {
5474 5166
5475 5167 /* devid successfully encoded, register devid */
5476 5168 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
5477 5169
5478 - } else {
5170 + } else if (reservation_flag != SD_TARGET_IS_RESERVED) {
5479 5171 /*
5480 5172 * Unable to encode a devid based on data available.
5481 5173 * This is not a Sun qualified disk. Older Sun disk
5482 5174 * drives that have the SD_FAB_DEVID property
5483 5175 * set in the disk_table and non Sun qualified
5484 5176 * disks are treated in the same manner. These
5485 5177 * drives manage the devid's by storing them in
5486 5178 * last 2 available sectors on the drive and
5487 5179 * have them fabricated by the ddi layer by
5488 5180 * calling ddi_devid_init and passing the
5489 5181 * DEVID_FAB flag.
5490 5182 * Create a fabricate devid only if there's no
5491 5183 * fabricate devid existed.
5492 5184 */
5493 5185 if (sd_get_devid(ssc) == EINVAL) {
5494 5186 (void) sd_create_devid(ssc);
5495 5187 }
5496 5188 un->un_f_opt_fab_devid = TRUE;
5497 5189
5498 5190 /* Register the devid if it exists */
5499 5191 if (un->un_devid != NULL) {
5500 5192 (void) ddi_devid_register(SD_DEVINFO(un),
5501 5193 un->un_devid);
5502 5194 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5503 5195 "sd_register_devid: devid fabricated using "
5504 5196 "ddi framework\n");
5505 5197 }
5506 5198 }
5507 5199
5508 5200 cleanup:
5509 5201 /* clean up resources */
5510 5202 if (inq80 != NULL) {
5511 5203 kmem_free(inq80, inq80_len);
5512 5204 }
5513 5205 if (inq83 != NULL) {
5514 5206 kmem_free(inq83, inq83_len);
5515 5207 }
5516 5208 }
5517 5209
5518 5210
5519 5211
5520 5212 /*
5521 5213 * Function: sd_get_devid
5522 5214 *
5523 5215 * Description: This routine will return 0 if a valid device id has been
5524 5216 * obtained from the target and stored in the soft state. If a
5525 5217 * valid device id has not been previously read and stored, a
5526 5218 * read attempt will be made.
5527 5219 *
5528 5220 * Arguments: un - driver soft state (unit) structure
5529 5221 *
5530 5222 * Return Code: 0 if we successfully get the device id
5531 5223 *
5532 5224 * Context: Kernel Thread
5533 5225 */
5534 5226
5535 5227 static int
5536 5228 sd_get_devid(sd_ssc_t *ssc)
5537 5229 {
5538 5230 struct dk_devid *dkdevid;
5539 5231 ddi_devid_t tmpid;
5540 5232 uint_t *ip;
5541 5233 size_t sz;
5542 5234 diskaddr_t blk;
5543 5235 int status;
5544 5236 int chksum;
5545 5237 int i;
5546 5238 size_t buffer_size;
5547 5239 struct sd_lun *un;
5548 5240
5549 5241 ASSERT(ssc != NULL);
5550 5242 un = ssc->ssc_un;
5551 5243 ASSERT(un != NULL);
5552 5244 ASSERT(mutex_owned(SD_MUTEX(un)));
5553 5245
5554 5246 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n",
5555 5247 un);
5556 5248
5557 5249 if (un->un_devid != NULL) {
5558 5250 return (0);
5559 5251 }
5560 5252
5561 5253 mutex_exit(SD_MUTEX(un));
5562 5254 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5563 5255 (void *)SD_PATH_DIRECT) != 0) {
5564 5256 mutex_enter(SD_MUTEX(un));
5565 5257 return (EINVAL);
5566 5258 }
5567 5259
5568 5260 /*
5569 5261 * Read and verify device id, stored in the reserved cylinders at the
5570 5262 * end of the disk. Backup label is on the odd sectors of the last
5571 5263 * track of the last cylinder. Device id will be on track of the next
5572 5264 * to last cylinder.
5573 5265 */
5574 5266 mutex_enter(SD_MUTEX(un));
5575 5267 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid));
5576 5268 mutex_exit(SD_MUTEX(un));
5577 5269 dkdevid = kmem_alloc(buffer_size, KM_SLEEP);
5578 5270 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk,
5579 5271 SD_PATH_DIRECT);
5580 5272
5581 5273 if (status != 0) {
5582 5274 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5583 5275 goto error;
5584 5276 }
5585 5277
5586 5278 /* Validate the revision */
5587 5279 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
5588 5280 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
5589 5281 status = EINVAL;
5590 5282 goto error;
5591 5283 }
5592 5284
5593 5285 /* Calculate the checksum */
5594 5286 chksum = 0;
5595 5287 ip = (uint_t *)dkdevid;
5596 5288 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5597 5289 i++) {
5598 5290 chksum ^= ip[i];
5599 5291 }
5600 5292
5601 5293 /* Compare the checksums */
5602 5294 if (DKD_GETCHKSUM(dkdevid) != chksum) {
5603 5295 status = EINVAL;
5604 5296 goto error;
5605 5297 }
5606 5298
5607 5299 /* Validate the device id */
5608 5300 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
5609 5301 status = EINVAL;
5610 5302 goto error;
5611 5303 }
5612 5304
5613 5305 /*
5614 5306 * Store the device id in the driver soft state
5615 5307 */
5616 5308 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
5617 5309 tmpid = kmem_alloc(sz, KM_SLEEP);
5618 5310
5619 5311 mutex_enter(SD_MUTEX(un));
5620 5312
5621 5313 un->un_devid = tmpid;
5622 5314 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
5623 5315
5624 5316 kmem_free(dkdevid, buffer_size);
5625 5317
5626 5318 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un);
5627 5319
5628 5320 return (status);
5629 5321 error:
5630 5322 mutex_enter(SD_MUTEX(un));
5631 5323 kmem_free(dkdevid, buffer_size);
5632 5324 return (status);
5633 5325 }
5634 5326
5635 5327
5636 5328 /*
5637 5329 * Function: sd_create_devid
5638 5330 *
5639 5331 * Description: This routine will fabricate the device id and write it
5640 5332 * to the disk.
5641 5333 *
5642 5334 * Arguments: un - driver soft state (unit) structure
5643 5335 *
5644 5336 * Return Code: value of the fabricated device id
5645 5337 *
5646 5338 * Context: Kernel Thread
5647 5339 */
5648 5340
5649 5341 static ddi_devid_t
5650 5342 sd_create_devid(sd_ssc_t *ssc)
5651 5343 {
5652 5344 struct sd_lun *un;
5653 5345
5654 5346 ASSERT(ssc != NULL);
5655 5347 un = ssc->ssc_un;
5656 5348 ASSERT(un != NULL);
5657 5349
5658 5350 /* Fabricate the devid */
5659 5351 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid)
5660 5352 == DDI_FAILURE) {
5661 5353 return (NULL);
5662 5354 }
5663 5355
5664 5356 /* Write the devid to disk */
5665 5357 if (sd_write_deviceid(ssc) != 0) {
5666 5358 ddi_devid_free(un->un_devid);
5667 5359 un->un_devid = NULL;
5668 5360 }
5669 5361
5670 5362 return (un->un_devid);
5671 5363 }
5672 5364
5673 5365
5674 5366 /*
5675 5367 * Function: sd_write_deviceid
5676 5368 *
5677 5369 * Description: This routine will write the device id to the disk
5678 5370 * reserved sector.
5679 5371 *
5680 5372 * Arguments: un - driver soft state (unit) structure
5681 5373 *
5682 5374 * Return Code: EINVAL
5683 5375 * value returned by sd_send_scsi_cmd
5684 5376 *
5685 5377 * Context: Kernel Thread
5686 5378 */
5687 5379
5688 5380 static int
5689 5381 sd_write_deviceid(sd_ssc_t *ssc)
5690 5382 {
5691 5383 struct dk_devid *dkdevid;
5692 5384 uchar_t *buf;
5693 5385 diskaddr_t blk;
5694 5386 uint_t *ip, chksum;
5695 5387 int status;
5696 5388 int i;
5697 5389 struct sd_lun *un;
5698 5390
5699 5391 ASSERT(ssc != NULL);
5700 5392 un = ssc->ssc_un;
5701 5393 ASSERT(un != NULL);
5702 5394 ASSERT(mutex_owned(SD_MUTEX(un)));
5703 5395
5704 5396 mutex_exit(SD_MUTEX(un));
5705 5397 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5706 5398 (void *)SD_PATH_DIRECT) != 0) {
5707 5399 mutex_enter(SD_MUTEX(un));
5708 5400 return (-1);
5709 5401 }
5710 5402
5711 5403
5712 5404 /* Allocate the buffer */
5713 5405 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP);
5714 5406 dkdevid = (struct dk_devid *)buf;
5715 5407
5716 5408 /* Fill in the revision */
5717 5409 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
5718 5410 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
5719 5411
5720 5412 /* Copy in the device id */
5721 5413 mutex_enter(SD_MUTEX(un));
5722 5414 bcopy(un->un_devid, &dkdevid->dkd_devid,
5723 5415 ddi_devid_sizeof(un->un_devid));
5724 5416 mutex_exit(SD_MUTEX(un));
5725 5417
5726 5418 /* Calculate the checksum */
5727 5419 chksum = 0;
5728 5420 ip = (uint_t *)dkdevid;
5729 5421 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5730 5422 i++) {
5731 5423 chksum ^= ip[i];
5732 5424 }
5733 5425
5734 5426 /* Fill-in checksum */
5735 5427 DKD_FORMCHKSUM(chksum, dkdevid);
5736 5428
5737 5429 /* Write the reserved sector */
5738 5430 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk,
5739 5431 SD_PATH_DIRECT);
5740 5432 if (status != 0)
5741 5433 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5742 5434
5743 5435 kmem_free(buf, un->un_sys_blocksize);
5744 5436
5745 5437 mutex_enter(SD_MUTEX(un));
5746 5438 return (status);
5747 5439 }
5748 5440
5749 5441
5750 5442 /*
5751 5443 * Function: sd_check_vpd_page_support
5752 5444 *
5753 5445 * Description: This routine sends an inquiry command with the EVPD bit set and
5754 5446 * a page code of 0x00 to the device. It is used to determine which
5755 5447 * vital product pages are available to find the devid. We are
5756 5448 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1,
5757 5449 * the device does not support that command.
5758 5450 *
5759 5451 * Arguments: un - driver soft state (unit) structure
5760 5452 *
5761 5453 * Return Code: 0 - success
5762 5454 * 1 - check condition
5763 5455 *
5764 5456 * Context: This routine can sleep.
5765 5457 */
5766 5458
5767 5459 static int
5768 5460 sd_check_vpd_page_support(sd_ssc_t *ssc)
5769 5461 {
5770 5462 uchar_t *page_list = NULL;
5771 5463 uchar_t page_length = 0xff; /* Use max possible length */
5772 5464 uchar_t evpd = 0x01; /* Set the EVPD bit */
5773 5465 uchar_t page_code = 0x00; /* Supported VPD Pages */
5774 5466 int rval = 0;
5775 5467 int counter;
5776 5468 struct sd_lun *un;
5777 5469
5778 5470 ASSERT(ssc != NULL);
5779 5471 un = ssc->ssc_un;
5780 5472 ASSERT(un != NULL);
5781 5473 ASSERT(mutex_owned(SD_MUTEX(un)));
5782 5474
5783 5475 mutex_exit(SD_MUTEX(un));
5784 5476
5785 5477 /*
5786 5478 * We'll set the page length to the maximum to save figuring it out
5787 5479 * with an additional call.
5788 5480 */
5789 5481 page_list = kmem_zalloc(page_length, KM_SLEEP);
5790 5482
5791 5483 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd,
5792 5484 page_code, NULL);
5793 5485
5794 5486 if (rval != 0)
5795 5487 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5796 5488
5797 5489 mutex_enter(SD_MUTEX(un));
5798 5490
5799 5491 /*
5800 5492 * Now we must validate that the device accepted the command, as some
5801 5493 * drives do not support it. If the drive does support it, we will
5802 5494 * return 0, and the supported pages will be in un_vpd_page_mask. If
5803 5495 * not, we return -1.
5804 5496 */
5805 5497 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) {
5806 5498 /* Loop to find one of the 2 pages we need */
5807 5499 counter = 4; /* Supported pages start at byte 4, with 0x00 */
5808 5500
5809 5501 /*
5810 5502 * Pages are returned in ascending order, and 0x83 is what we
5811 5503 * are hoping for.
5812 5504 */
5813 5505 while ((page_list[counter] <= 0xB1) &&
5814 5506 (counter <= (page_list[VPD_PAGE_LENGTH] +
5815 5507 VPD_HEAD_OFFSET))) {
5816 5508 /*
5817 5509 * Add 3 because page_list[3] is the number of
5818 5510 * pages minus 3
5819 5511 */
5820 5512
5821 5513 switch (page_list[counter]) {
5822 5514 case 0x00:
5823 5515 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG;
5824 5516 break;
5825 5517 case 0x80:
5826 5518 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG;
5827 5519 break;
5828 5520 case 0x81:
5829 5521 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG;
|
↓ open down ↓ |
341 lines elided |
↑ open up ↑ |
5830 5522 break;
5831 5523 case 0x82:
5832 5524 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
5833 5525 break;
5834 5526 case 0x83:
5835 5527 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
5836 5528 break;
5837 5529 case 0x86:
5838 5530 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
5839 5531 break;
5532 + case 0xB0:
5533 + un->un_vpd_page_mask |= SD_VPD_BLK_LIMITS_PG;
5534 + break;
5840 5535 case 0xB1:
5841 5536 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
5842 5537 break;
5843 5538 }
5844 5539 counter++;
5845 5540 }
5846 5541
5847 5542 } else {
5848 5543 rval = -1;
5849 5544
5850 5545 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5851 5546 "sd_check_vpd_page_support: This drive does not implement "
5852 5547 "VPD pages.\n");
5853 5548 }
5854 5549
5855 5550 kmem_free(page_list, page_length);
5856 5551
5857 5552 return (rval);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
5858 5553 }
5859 5554
5860 5555
5861 5556 /*
5862 5557 * Function: sd_setup_pm
5863 5558 *
5864 5559 * Description: Initialize Power Management on the device
5865 5560 *
5866 5561 * Context: Kernel Thread
5867 5562 */
5868 -
5563 +#ifdef notyet
5869 5564 static void
5870 5565 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
5871 5566 {
5872 5567 uint_t log_page_size;
5873 5568 uchar_t *log_page_data;
5874 5569 int rval = 0;
5875 5570 struct sd_lun *un;
5876 5571
5877 5572 ASSERT(ssc != NULL);
5878 5573 un = ssc->ssc_un;
5879 5574 ASSERT(un != NULL);
5880 5575
5881 5576 /*
5882 5577 * Since we are called from attach, holding a mutex for
5883 5578 * un is unnecessary. Because some of the routines called
5884 5579 * from here require SD_MUTEX to not be held, assert this
5885 5580 * right up front.
5886 5581 */
5887 5582 ASSERT(!mutex_owned(SD_MUTEX(un)));
5888 5583 /*
5889 5584 * Since the sd device does not have the 'reg' property,
5890 5585 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
5891 5586 * The following code is to tell cpr that this device
5892 5587 * DOES need to be suspended and resumed.
5893 5588 */
5894 5589 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
5895 5590 "pm-hardware-state", "needs-suspend-resume");
5896 5591
5897 5592 /*
5898 5593 * This complies with the new power management framework
5899 5594 * for certain desktop machines. Create the pm_components
5900 5595 * property as a string array property.
5901 5596 * If un_f_pm_supported is TRUE, that means the disk
5902 5597 * attached HBA has set the "pm-capable" property and
5903 5598 * the value of this property is bigger than 0.
5904 5599 */
5905 5600 if (un->un_f_pm_supported) {
5906 5601 /*
5907 5602 * not all devices have a motor, try it first.
5908 5603 * some devices may return ILLEGAL REQUEST, some
5909 5604 * will hang
5910 5605 * The following START_STOP_UNIT is used to check if target
5911 5606 * device has a motor.
5912 5607 */
|
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
5913 5608 un->un_f_start_stop_supported = TRUE;
5914 5609
5915 5610 if (un->un_f_power_condition_supported) {
5916 5611 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5917 5612 SD_POWER_CONDITION, SD_TARGET_ACTIVE,
5918 5613 SD_PATH_DIRECT);
5919 5614 if (rval != 0) {
5920 5615 un->un_f_power_condition_supported = FALSE;
5921 5616 }
5922 5617 }
5618 + /* WTF? this fails for optical drives with no media */
5923 5619 if (!un->un_f_power_condition_supported) {
5924 5620 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5925 5621 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
5926 5622 }
5927 5623 if (rval != 0) {
5928 5624 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5929 5625 un->un_f_start_stop_supported = FALSE;
5930 5626 }
5931 5627
5932 5628 /*
5933 5629 * create pm properties anyways otherwise the parent can't
5934 5630 * go to sleep
5935 5631 */
5936 5632 un->un_f_pm_is_enabled = TRUE;
5937 5633 (void) sd_create_pm_components(devi, un);
5938 5634
5939 5635 /*
5940 5636 * If it claims that log sense is supported, check it out.
5941 5637 */
5942 5638 if (un->un_f_log_sense_supported) {
5943 5639 rval = sd_log_page_supported(ssc,
5944 5640 START_STOP_CYCLE_PAGE);
5945 5641 if (rval == 1) {
5946 5642 /* Page found, use it. */
5947 5643 un->un_start_stop_cycle_page =
5948 5644 START_STOP_CYCLE_PAGE;
5949 5645 } else {
5950 5646 /*
5951 5647 * Page not found or log sense is not
5952 5648 * supported.
5953 5649 * Notice we do not check the old style
5954 5650 * START_STOP_CYCLE_VU_PAGE because this
5955 5651 * code path does not apply to old disks.
5956 5652 */
5957 5653 un->un_f_log_sense_supported = FALSE;
5958 5654 un->un_f_pm_log_sense_smart = FALSE;
5959 5655 }
5960 5656 }
5961 5657
5962 5658 return;
5963 5659 }
5964 5660
5965 5661 /*
5966 5662 * For the disk whose attached HBA has not set the "pm-capable"
5967 5663 * property, check if it supports the power management.
5968 5664 */
5969 5665 if (!un->un_f_log_sense_supported) {
5970 5666 un->un_power_level = SD_SPINDLE_ON;
5971 5667 un->un_f_pm_is_enabled = FALSE;
5972 5668 return;
5973 5669 }
5974 5670
5975 5671 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE);
5976 5672
5977 5673 #ifdef SDDEBUG
5978 5674 if (sd_force_pm_supported) {
5979 5675 /* Force a successful result */
5980 5676 rval = 1;
5981 5677 }
5982 5678 #endif
5983 5679
5984 5680 /*
5985 5681 * If the start-stop cycle counter log page is not supported
5986 5682 * or if the pm-capable property is set to be false (0),
5987 5683 * then we should not create the pm_components property.
5988 5684 */
5989 5685 if (rval == -1) {
5990 5686 /*
5991 5687 * Error.
5992 5688 * Reading log sense failed, most likely this is
5993 5689 * an older drive that does not support log sense.
5994 5690 * If this fails auto-pm is not supported.
5995 5691 */
5996 5692 un->un_power_level = SD_SPINDLE_ON;
5997 5693 un->un_f_pm_is_enabled = FALSE;
5998 5694
5999 5695 } else if (rval == 0) {
6000 5696 /*
6001 5697 * Page not found.
6002 5698 * The start stop cycle counter is implemented as page
6003 5699 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For
6004 5700 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE).
6005 5701 */
6006 5702 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) {
6007 5703 /*
6008 5704 * Page found, use this one.
6009 5705 */
6010 5706 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE;
6011 5707 un->un_f_pm_is_enabled = TRUE;
6012 5708 } else {
6013 5709 /*
6014 5710 * Error or page not found.
6015 5711 * auto-pm is not supported for this device.
6016 5712 */
6017 5713 un->un_power_level = SD_SPINDLE_ON;
6018 5714 un->un_f_pm_is_enabled = FALSE;
6019 5715 }
6020 5716 } else {
6021 5717 /*
6022 5718 * Page found, use it.
6023 5719 */
6024 5720 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE;
6025 5721 un->un_f_pm_is_enabled = TRUE;
6026 5722 }
6027 5723
6028 5724
6029 5725 if (un->un_f_pm_is_enabled == TRUE) {
6030 5726 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6031 5727 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6032 5728
6033 5729 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6034 5730 log_page_size, un->un_start_stop_cycle_page,
6035 5731 0x01, 0, SD_PATH_DIRECT);
6036 5732
6037 5733 if (rval != 0) {
6038 5734 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6039 5735 }
6040 5736
6041 5737 #ifdef SDDEBUG
6042 5738 if (sd_force_pm_supported) {
6043 5739 /* Force a successful result */
6044 5740 rval = 0;
6045 5741 }
6046 5742 #endif
6047 5743
6048 5744 /*
6049 5745 * If the Log sense for Page( Start/stop cycle counter page)
6050 5746 * succeeds, then power management is supported and we can
6051 5747 * enable auto-pm.
6052 5748 */
6053 5749 if (rval == 0) {
6054 5750 (void) sd_create_pm_components(devi, un);
6055 5751 } else {
6056 5752 un->un_power_level = SD_SPINDLE_ON;
6057 5753 un->un_f_pm_is_enabled = FALSE;
6058 5754 }
6059 5755
6060 5756 kmem_free(log_page_data, log_page_size);
6061 5757 }
6062 5758 }
6063 5759
6064 5760
6065 5761 /*
6066 5762 * Function: sd_create_pm_components
6067 5763 *
6068 5764 * Description: Initialize PM property.
6069 5765 *
6070 5766 * Context: Kernel thread context
6071 5767 */
6072 5768
6073 5769 static void
6074 5770 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un)
6075 5771 {
6076 5772 ASSERT(!mutex_owned(SD_MUTEX(un)));
6077 5773
6078 5774 if (un->un_f_power_condition_supported) {
6079 5775 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6080 5776 "pm-components", sd_pwr_pc.pm_comp, 5)
6081 5777 != DDI_PROP_SUCCESS) {
6082 5778 un->un_power_level = SD_SPINDLE_ACTIVE;
6083 5779 un->un_f_pm_is_enabled = FALSE;
6084 5780 return;
6085 5781 }
6086 5782 } else {
6087 5783 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6088 5784 "pm-components", sd_pwr_ss.pm_comp, 3)
6089 5785 != DDI_PROP_SUCCESS) {
6090 5786 un->un_power_level = SD_SPINDLE_ON;
6091 5787 un->un_f_pm_is_enabled = FALSE;
6092 5788 return;
6093 5789 }
6094 5790 }
6095 5791 /*
6096 5792 * When components are initially created they are idle,
6097 5793 * power up any non-removables.
6098 5794 * Note: the return value of pm_raise_power can't be used
6099 5795 * for determining if PM should be enabled for this device.
6100 5796 * Even if you check the return values and remove this
6101 5797 * property created above, the PM framework will not honor the
6102 5798 * change after the first call to pm_raise_power. Hence,
6103 5799 * removal of that property does not help if pm_raise_power
6104 5800 * fails. In the case of removable media, the start/stop
6105 5801 * will fail if the media is not present.
6106 5802 */
6107 5803 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0,
6108 5804 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) {
6109 5805 mutex_enter(SD_MUTEX(un));
6110 5806 un->un_power_level = SD_PM_STATE_ACTIVE(un);
6111 5807 mutex_enter(&un->un_pm_mutex);
6112 5808 /* Set to on and not busy. */
6113 5809 un->un_pm_count = 0;
|
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
6114 5810 } else {
6115 5811 mutex_enter(SD_MUTEX(un));
6116 5812 un->un_power_level = SD_PM_STATE_STOPPED(un);
6117 5813 mutex_enter(&un->un_pm_mutex);
6118 5814 /* Set to off. */
6119 5815 un->un_pm_count = -1;
6120 5816 }
6121 5817 mutex_exit(&un->un_pm_mutex);
6122 5818 mutex_exit(SD_MUTEX(un));
6123 5819 }
5820 +#endif
6124 5821
6125 -
6126 5822 /*
6127 5823 * Function: sd_ddi_suspend
6128 5824 *
6129 5825 * Description: Performs system power-down operations. This includes
6130 5826 * setting the drive state to indicate its suspended so
6131 5827 * that no new commands will be accepted. Also, wait for
6132 5828 * all commands that are in transport or queued to a timer
6133 5829 * for retry to complete. All timeout threads are cancelled.
6134 5830 *
6135 5831 * Return Code: DDI_FAILURE or DDI_SUCCESS
6136 5832 *
6137 5833 * Context: Kernel thread context
6138 5834 */
6139 5835
6140 5836 static int
6141 5837 sd_ddi_suspend(dev_info_t *devi)
6142 5838 {
6143 5839 struct sd_lun *un;
6144 5840 clock_t wait_cmds_complete;
6145 5841
6146 5842 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6147 5843 if (un == NULL) {
6148 5844 return (DDI_FAILURE);
6149 5845 }
6150 5846
6151 5847 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n");
6152 5848
6153 5849 mutex_enter(SD_MUTEX(un));
6154 5850
6155 5851 /* Return success if the device is already suspended. */
6156 5852 if (un->un_state == SD_STATE_SUSPENDED) {
6157 5853 mutex_exit(SD_MUTEX(un));
6158 5854 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6159 5855 "device already suspended, exiting\n");
6160 5856 return (DDI_SUCCESS);
6161 5857 }
6162 5858
6163 5859 /* Return failure if the device is being used by HA */
6164 5860 if (un->un_resvd_status &
6165 5861 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) {
6166 5862 mutex_exit(SD_MUTEX(un));
6167 5863 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6168 5864 "device in use by HA, exiting\n");
6169 5865 return (DDI_FAILURE);
6170 5866 }
6171 5867
6172 5868 /*
6173 5869 * Return failure if the device is in a resource wait
6174 5870 * or power changing state.
6175 5871 */
6176 5872 if ((un->un_state == SD_STATE_RWAIT) ||
6177 5873 (un->un_state == SD_STATE_PM_CHANGING)) {
6178 5874 mutex_exit(SD_MUTEX(un));
6179 5875 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6180 5876 "device in resource wait state, exiting\n");
6181 5877 return (DDI_FAILURE);
6182 5878 }
6183 5879
6184 5880
6185 5881 un->un_save_state = un->un_last_state;
6186 5882 New_state(un, SD_STATE_SUSPENDED);
6187 5883
6188 5884 /*
6189 5885 * Wait for all commands that are in transport or queued to a timer
6190 5886 * for retry to complete.
6191 5887 *
6192 5888 * While waiting, no new commands will be accepted or sent because of
6193 5889 * the new state we set above.
6194 5890 *
6195 5891 * Wait till current operation has completed. If we are in the resource
6196 5892 * wait state (with an intr outstanding) then we need to wait till the
6197 5893 * intr completes and starts the next cmd. We want to wait for
6198 5894 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND.
6199 5895 */
6200 5896 wait_cmds_complete = ddi_get_lbolt() +
6201 5897 (sd_wait_cmds_complete * drv_usectohz(1000000));
6202 5898
6203 5899 while (un->un_ncmds_in_transport != 0) {
6204 5900 /*
6205 5901 * Fail if commands do not finish in the specified time.
6206 5902 */
6207 5903 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un),
6208 5904 wait_cmds_complete) == -1) {
6209 5905 /*
6210 5906 * Undo the state changes made above. Everything
6211 5907 * must go back to it's original value.
6212 5908 */
6213 5909 Restore_state(un);
6214 5910 un->un_last_state = un->un_save_state;
6215 5911 /* Wake up any threads that might be waiting. */
6216 5912 cv_broadcast(&un->un_suspend_cv);
6217 5913 mutex_exit(SD_MUTEX(un));
6218 5914 SD_ERROR(SD_LOG_IO_PM, un,
6219 5915 "sd_ddi_suspend: failed due to outstanding cmds\n");
6220 5916 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n");
6221 5917 return (DDI_FAILURE);
6222 5918 }
6223 5919 }
6224 5920
6225 5921 /*
6226 5922 * Cancel SCSI watch thread and timeouts, if any are active
6227 5923 */
6228 5924
6229 5925 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) {
6230 5926 opaque_t temp_token = un->un_swr_token;
6231 5927 mutex_exit(SD_MUTEX(un));
6232 5928 scsi_watch_suspend(temp_token);
6233 5929 mutex_enter(SD_MUTEX(un));
6234 5930 }
6235 5931
6236 5932 if (un->un_reset_throttle_timeid != NULL) {
6237 5933 timeout_id_t temp_id = un->un_reset_throttle_timeid;
6238 5934 un->un_reset_throttle_timeid = NULL;
6239 5935 mutex_exit(SD_MUTEX(un));
6240 5936 (void) untimeout(temp_id);
6241 5937 mutex_enter(SD_MUTEX(un));
6242 5938 }
6243 5939
6244 5940 if (un->un_dcvb_timeid != NULL) {
6245 5941 timeout_id_t temp_id = un->un_dcvb_timeid;
6246 5942 un->un_dcvb_timeid = NULL;
6247 5943 mutex_exit(SD_MUTEX(un));
6248 5944 (void) untimeout(temp_id);
6249 5945 mutex_enter(SD_MUTEX(un));
6250 5946 }
6251 5947
6252 5948 mutex_enter(&un->un_pm_mutex);
6253 5949 if (un->un_pm_timeid != NULL) {
6254 5950 timeout_id_t temp_id = un->un_pm_timeid;
6255 5951 un->un_pm_timeid = NULL;
6256 5952 mutex_exit(&un->un_pm_mutex);
6257 5953 mutex_exit(SD_MUTEX(un));
6258 5954 (void) untimeout(temp_id);
6259 5955 mutex_enter(SD_MUTEX(un));
6260 5956 } else {
6261 5957 mutex_exit(&un->un_pm_mutex);
6262 5958 }
6263 5959
6264 5960 if (un->un_rmw_msg_timeid != NULL) {
6265 5961 timeout_id_t temp_id = un->un_rmw_msg_timeid;
6266 5962 un->un_rmw_msg_timeid = NULL;
6267 5963 mutex_exit(SD_MUTEX(un));
6268 5964 (void) untimeout(temp_id);
6269 5965 mutex_enter(SD_MUTEX(un));
6270 5966 }
6271 5967
6272 5968 if (un->un_retry_timeid != NULL) {
6273 5969 timeout_id_t temp_id = un->un_retry_timeid;
6274 5970 un->un_retry_timeid = NULL;
6275 5971 mutex_exit(SD_MUTEX(un));
6276 5972 (void) untimeout(temp_id);
6277 5973 mutex_enter(SD_MUTEX(un));
6278 5974
6279 5975 if (un->un_retry_bp != NULL) {
6280 5976 un->un_retry_bp->av_forw = un->un_waitq_headp;
6281 5977 un->un_waitq_headp = un->un_retry_bp;
6282 5978 if (un->un_waitq_tailp == NULL) {
6283 5979 un->un_waitq_tailp = un->un_retry_bp;
6284 5980 }
6285 5981 un->un_retry_bp = NULL;
6286 5982 un->un_retry_statp = NULL;
6287 5983 }
|
↓ open down ↓ |
152 lines elided |
↑ open up ↑ |
6288 5984 }
6289 5985
6290 5986 if (un->un_direct_priority_timeid != NULL) {
6291 5987 timeout_id_t temp_id = un->un_direct_priority_timeid;
6292 5988 un->un_direct_priority_timeid = NULL;
6293 5989 mutex_exit(SD_MUTEX(un));
6294 5990 (void) untimeout(temp_id);
6295 5991 mutex_enter(SD_MUTEX(un));
6296 5992 }
6297 5993
6298 - if (un->un_f_is_fibre == TRUE) {
6299 - /*
6300 - * Remove callbacks for insert and remove events
6301 - */
6302 - if (un->un_insert_event != NULL) {
6303 - mutex_exit(SD_MUTEX(un));
6304 - (void) ddi_remove_event_handler(un->un_insert_cb_id);
6305 - mutex_enter(SD_MUTEX(un));
6306 - un->un_insert_event = NULL;
6307 - }
6308 -
6309 - if (un->un_remove_event != NULL) {
6310 - mutex_exit(SD_MUTEX(un));
6311 - (void) ddi_remove_event_handler(un->un_remove_cb_id);
6312 - mutex_enter(SD_MUTEX(un));
6313 - un->un_remove_event = NULL;
6314 - }
6315 - }
6316 -
6317 5994 mutex_exit(SD_MUTEX(un));
6318 5995
6319 5996 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
6320 5997
6321 5998 return (DDI_SUCCESS);
6322 5999 }
6323 6000
6324 6001
6325 6002 /*
6326 6003 * Function: sd_ddi_resume
6327 6004 *
6328 6005 * Description: Performs system power-up operations..
6329 6006 *
6330 6007 * Return Code: DDI_SUCCESS
6331 6008 * DDI_FAILURE
6332 6009 *
6333 6010 * Context: Kernel thread context
6334 6011 */
6335 6012
6336 6013 static int
6337 6014 sd_ddi_resume(dev_info_t *devi)
6338 6015 {
6339 6016 struct sd_lun *un;
6340 6017
6341 6018 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6342 6019 if (un == NULL) {
6343 6020 return (DDI_FAILURE);
6344 6021 }
6345 6022
6346 6023 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n");
6347 6024
6348 6025 mutex_enter(SD_MUTEX(un));
6349 6026 Restore_state(un);
6350 6027
6351 6028 /*
6352 6029 * Restore the state which was saved to give the
6353 6030 * the right state in un_last_state
6354 6031 */
6355 6032 un->un_last_state = un->un_save_state;
6356 6033 /*
6357 6034 * Note: throttle comes back at full.
6358 6035 * Also note: this MUST be done before calling pm_raise_power
6359 6036 * otherwise the system can get hung in biowait. The scenario where
6360 6037 * this'll happen is under cpr suspend. Writing of the system
6361 6038 * state goes through sddump, which writes 0 to un_throttle. If
6362 6039 * writing the system state then fails, example if the partition is
6363 6040 * too small, then cpr attempts a resume. If throttle isn't restored
6364 6041 * from the saved value until after calling pm_raise_power then
6365 6042 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs
6366 6043 * in biowait.
6367 6044 */
6368 6045 un->un_throttle = un->un_saved_throttle;
6369 6046
6370 6047 /*
6371 6048 * The chance of failure is very rare as the only command done in power
6372 6049 * entry point is START command when you transition from 0->1 or
6373 6050 * unknown->1. Put it to SPINDLE ON state irrespective of the state at
6374 6051 * which suspend was done. Ignore the return value as the resume should
6375 6052 * not be failed. In the case of removable media the media need not be
6376 6053 * inserted and hence there is a chance that raise power will fail with
6377 6054 * media not present.
6378 6055 */
6379 6056 if (un->un_f_attach_spinup) {
6380 6057 mutex_exit(SD_MUTEX(un));
6381 6058 (void) pm_raise_power(SD_DEVINFO(un), 0,
6382 6059 SD_PM_STATE_ACTIVE(un));
6383 6060 mutex_enter(SD_MUTEX(un));
6384 6061 }
6385 6062
6386 6063 /*
6387 6064 * Don't broadcast to the suspend cv and therefore possibly
|
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
6388 6065 * start I/O until after power has been restored.
6389 6066 */
6390 6067 cv_broadcast(&un->un_suspend_cv);
6391 6068 cv_broadcast(&un->un_state_cv);
6392 6069
6393 6070 /* restart thread */
6394 6071 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
6395 6072 scsi_watch_resume(un->un_swr_token);
6396 6073 }
6397 6074
6398 -#if (defined(__fibre))
6399 - if (un->un_f_is_fibre == TRUE) {
6400 - /*
6401 - * Add callbacks for insert and remove events
6402 - */
6403 - if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
6404 - sd_init_event_callbacks(un);
6405 - }
6406 - }
6407 -#endif
6408 -
6409 6075 /*
6410 6076 * Transport any pending commands to the target.
6411 6077 *
6412 6078 * If this is a low-activity device commands in queue will have to wait
6413 6079 * until new commands come in, which may take awhile. Also, we
6414 6080 * specifically don't check un_ncmds_in_transport because we know that
6415 6081 * there really are no commands in progress after the unit was
6416 6082 * suspended and we could have reached the throttle level, been
6417 6083 * suspended, and have no new commands coming in for awhile. Highly
6418 6084 * unlikely, but so is the low-activity disk scenario.
6419 6085 */
6420 6086 ddi_xbuf_dispatch(un->un_xbuf_attr);
6421 6087
6422 6088 sd_start_cmds(un, NULL);
6423 6089 mutex_exit(SD_MUTEX(un));
6424 6090
6425 6091 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n");
6426 6092
6427 6093 return (DDI_SUCCESS);
6428 6094 }
6429 6095
6430 6096
6431 6097 /*
6432 6098 * Function: sd_pm_state_change
6433 6099 *
6434 6100 * Description: Change the driver power state.
6435 6101 * Someone else is required to actually change the driver
6436 6102 * power level.
6437 6103 *
6438 6104 * Arguments: un - driver soft state (unit) structure
6439 6105 * level - the power level that is changed to
6440 6106 * flag - to decide how to change the power state
6441 6107 *
6442 6108 * Return Code: DDI_SUCCESS
6443 6109 *
6444 6110 * Context: Kernel thread context
6445 6111 */
6446 6112 static int
6447 6113 sd_pm_state_change(struct sd_lun *un, int level, int flag)
6448 6114 {
6449 6115 ASSERT(un != NULL);
6450 6116 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n");
6451 6117
6452 6118 ASSERT(!mutex_owned(SD_MUTEX(un)));
6453 6119 mutex_enter(SD_MUTEX(un));
6454 6120
6455 6121 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) {
6456 6122 un->un_power_level = level;
6457 6123 ASSERT(!mutex_owned(&un->un_pm_mutex));
6458 6124 mutex_enter(&un->un_pm_mutex);
6459 6125 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
6460 6126 un->un_pm_count++;
6461 6127 ASSERT(un->un_pm_count == 0);
6462 6128 }
6463 6129 mutex_exit(&un->un_pm_mutex);
6464 6130 } else {
6465 6131 /*
6466 6132 * Exit if power management is not enabled for this device,
6467 6133 * or if the device is being used by HA.
6468 6134 */
6469 6135 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status &
6470 6136 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) {
6471 6137 mutex_exit(SD_MUTEX(un));
6472 6138 SD_TRACE(SD_LOG_POWER, un,
6473 6139 "sd_pm_state_change: exiting\n");
6474 6140 return (DDI_FAILURE);
6475 6141 }
6476 6142
6477 6143 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: "
6478 6144 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver);
6479 6145
6480 6146 /*
6481 6147 * See if the device is not busy, ie.:
6482 6148 * - we have no commands in the driver for this device
6483 6149 * - not waiting for resources
6484 6150 */
6485 6151 if ((un->un_ncmds_in_driver == 0) &&
6486 6152 (un->un_state != SD_STATE_RWAIT)) {
6487 6153 /*
6488 6154 * The device is not busy, so it is OK to go to low
6489 6155 * power state. Indicate low power, but rely on someone
6490 6156 * else to actually change it.
6491 6157 */
6492 6158 mutex_enter(&un->un_pm_mutex);
6493 6159 un->un_pm_count = -1;
6494 6160 mutex_exit(&un->un_pm_mutex);
6495 6161 un->un_power_level = level;
6496 6162 }
6497 6163 }
6498 6164
6499 6165 mutex_exit(SD_MUTEX(un));
6500 6166
6501 6167 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n");
6502 6168
6503 6169 return (DDI_SUCCESS);
6504 6170 }
6505 6171
6506 6172
6507 6173 /*
6508 6174 * Function: sd_pm_idletimeout_handler
6509 6175 *
6510 6176 * Description: A timer routine that's active only while a device is busy.
6511 6177 * The purpose is to extend slightly the pm framework's busy
6512 6178 * view of the device to prevent busy/idle thrashing for
6513 6179 * back-to-back commands. Do this by comparing the current time
6514 6180 * to the time at which the last command completed and when the
6515 6181 * difference is greater than sd_pm_idletime, call
6516 6182 * pm_idle_component. In addition to indicating idle to the pm
6517 6183 * framework, update the chain type to again use the internal pm
6518 6184 * layers of the driver.
6519 6185 *
6520 6186 * Arguments: arg - driver soft state (unit) structure
6521 6187 *
6522 6188 * Context: Executes in a timeout(9F) thread context
6523 6189 */
6524 6190
6525 6191 static void
6526 6192 sd_pm_idletimeout_handler(void *arg)
6527 6193 {
6528 6194 const hrtime_t idletime = sd_pm_idletime * NANOSEC;
6529 6195 struct sd_lun *un = arg;
6530 6196
6531 6197 mutex_enter(&sd_detach_mutex);
6532 6198 if (un->un_detach_count != 0) {
6533 6199 /* Abort if the instance is detaching */
6534 6200 mutex_exit(&sd_detach_mutex);
6535 6201 return;
6536 6202 }
6537 6203 mutex_exit(&sd_detach_mutex);
6538 6204
6539 6205 /*
6540 6206 * Grab both mutexes, in the proper order, since we're accessing
6541 6207 * both PM and softstate variables.
6542 6208 */
6543 6209 mutex_enter(SD_MUTEX(un));
6544 6210 mutex_enter(&un->un_pm_mutex);
6545 6211 if (((gethrtime() - un->un_pm_idle_time) > idletime) &&
6546 6212 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) {
6547 6213 /*
6548 6214 * Update the chain types.
6549 6215 * This takes affect on the next new command received.
6550 6216 */
6551 6217 if (un->un_f_non_devbsize_supported) {
6552 6218 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
6553 6219 } else {
6554 6220 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
6555 6221 }
6556 6222 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
6557 6223
6558 6224 SD_TRACE(SD_LOG_IO_PM, un,
6559 6225 "sd_pm_idletimeout_handler: idling device\n");
6560 6226 (void) pm_idle_component(SD_DEVINFO(un), 0);
6561 6227 un->un_pm_idle_timeid = NULL;
6562 6228 } else {
6563 6229 un->un_pm_idle_timeid =
6564 6230 timeout(sd_pm_idletimeout_handler, un,
6565 6231 (drv_usectohz((clock_t)300000))); /* 300 ms. */
6566 6232 }
6567 6233 mutex_exit(&un->un_pm_mutex);
6568 6234 mutex_exit(SD_MUTEX(un));
6569 6235 }
6570 6236
6571 6237
6572 6238 /*
6573 6239 * Function: sd_pm_timeout_handler
6574 6240 *
6575 6241 * Description: Callback to tell framework we are idle.
6576 6242 *
6577 6243 * Context: timeout(9f) thread context.
6578 6244 */
6579 6245
6580 6246 static void
6581 6247 sd_pm_timeout_handler(void *arg)
6582 6248 {
6583 6249 struct sd_lun *un = arg;
6584 6250
6585 6251 (void) pm_idle_component(SD_DEVINFO(un), 0);
6586 6252 mutex_enter(&un->un_pm_mutex);
6587 6253 un->un_pm_timeid = NULL;
6588 6254 mutex_exit(&un->un_pm_mutex);
6589 6255 }
6590 6256
6591 6257
6592 6258 /*
6593 6259 * Function: sdpower
6594 6260 *
6595 6261 * Description: PM entry point.
6596 6262 *
6597 6263 * Return Code: DDI_SUCCESS
6598 6264 * DDI_FAILURE
6599 6265 *
6600 6266 * Context: Kernel thread context
6601 6267 */
6602 6268
6603 6269 static int
6604 6270 sdpower(dev_info_t *devi, int component, int level)
6605 6271 {
6606 6272 struct sd_lun *un;
6607 6273 int instance;
6608 6274 int rval = DDI_SUCCESS;
6609 6275 uint_t i, log_page_size, maxcycles, ncycles;
6610 6276 uchar_t *log_page_data;
6611 6277 int log_sense_page;
6612 6278 int medium_present;
6613 6279 time_t intvlp;
6614 6280 struct pm_trans_data sd_pm_tran_data;
6615 6281 uchar_t save_state = SD_STATE_NORMAL;
6616 6282 int sval;
6617 6283 uchar_t state_before_pm;
6618 6284 int got_semaphore_here;
6619 6285 sd_ssc_t *ssc;
6620 6286 int last_power_level = SD_SPINDLE_UNINIT;
6621 6287
6622 6288 instance = ddi_get_instance(devi);
6623 6289
6624 6290 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
6625 6291 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) {
6626 6292 return (DDI_FAILURE);
6627 6293 }
6628 6294
6629 6295 ssc = sd_ssc_init(un);
6630 6296
6631 6297 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level);
6632 6298
6633 6299 /*
6634 6300 * Must synchronize power down with close.
6635 6301 * Attempt to decrement/acquire the open/close semaphore,
6636 6302 * but do NOT wait on it. If it's not greater than zero,
6637 6303 * ie. it can't be decremented without waiting, then
6638 6304 * someone else, either open or close, already has it
6639 6305 * and the try returns 0. Use that knowledge here to determine
6640 6306 * if it's OK to change the device power level.
6641 6307 * Also, only increment it on exit if it was decremented, ie. gotten,
6642 6308 * here.
6643 6309 */
6644 6310 got_semaphore_here = sema_tryp(&un->un_semoclose);
6645 6311
6646 6312 mutex_enter(SD_MUTEX(un));
6647 6313
6648 6314 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n",
6649 6315 un->un_ncmds_in_driver);
6650 6316
6651 6317 /*
6652 6318 * If un_ncmds_in_driver is non-zero it indicates commands are
6653 6319 * already being processed in the driver, or if the semaphore was
6654 6320 * not gotten here it indicates an open or close is being processed.
6655 6321 * At the same time somebody is requesting to go to a lower power
6656 6322 * that can't perform I/O, which can't happen, therefore we need to
6657 6323 * return failure.
6658 6324 */
6659 6325 if ((!SD_PM_IS_IO_CAPABLE(un, level)) &&
6660 6326 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) {
6661 6327 mutex_exit(SD_MUTEX(un));
6662 6328
6663 6329 if (got_semaphore_here != 0) {
6664 6330 sema_v(&un->un_semoclose);
6665 6331 }
6666 6332 SD_TRACE(SD_LOG_IO_PM, un,
6667 6333 "sdpower: exit, device has queued cmds.\n");
6668 6334
6669 6335 goto sdpower_failed;
6670 6336 }
6671 6337
6672 6338 /*
6673 6339 * if it is OFFLINE that means the disk is completely dead
6674 6340 * in our case we have to put the disk in on or off by sending commands
6675 6341 * Of course that will fail anyway so return back here.
6676 6342 *
6677 6343 * Power changes to a device that's OFFLINE or SUSPENDED
6678 6344 * are not allowed.
6679 6345 */
6680 6346 if ((un->un_state == SD_STATE_OFFLINE) ||
6681 6347 (un->un_state == SD_STATE_SUSPENDED)) {
6682 6348 mutex_exit(SD_MUTEX(un));
6683 6349
6684 6350 if (got_semaphore_here != 0) {
6685 6351 sema_v(&un->un_semoclose);
6686 6352 }
6687 6353 SD_TRACE(SD_LOG_IO_PM, un,
6688 6354 "sdpower: exit, device is off-line.\n");
6689 6355
6690 6356 goto sdpower_failed;
6691 6357 }
6692 6358
6693 6359 /*
6694 6360 * Change the device's state to indicate it's power level
6695 6361 * is being changed. Do this to prevent a power off in the
6696 6362 * middle of commands, which is especially bad on devices
6697 6363 * that are really powered off instead of just spun down.
6698 6364 */
6699 6365 state_before_pm = un->un_state;
6700 6366 un->un_state = SD_STATE_PM_CHANGING;
6701 6367
6702 6368 mutex_exit(SD_MUTEX(un));
6703 6369
6704 6370 /*
6705 6371 * If log sense command is not supported, bypass the
6706 6372 * following checking, otherwise, check the log sense
6707 6373 * information for this device.
6708 6374 */
6709 6375 if (SD_PM_STOP_MOTOR_NEEDED(un, level) &&
6710 6376 un->un_f_log_sense_supported) {
6711 6377 /*
6712 6378 * Get the log sense information to understand whether the
6713 6379 * the powercycle counts have gone beyond the threshhold.
6714 6380 */
6715 6381 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6716 6382 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6717 6383
6718 6384 mutex_enter(SD_MUTEX(un));
6719 6385 log_sense_page = un->un_start_stop_cycle_page;
6720 6386 mutex_exit(SD_MUTEX(un));
6721 6387
6722 6388 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6723 6389 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT);
6724 6390
6725 6391 if (rval != 0) {
6726 6392 if (rval == EIO)
6727 6393 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6728 6394 else
6729 6395 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6730 6396 }
6731 6397
6732 6398 #ifdef SDDEBUG
6733 6399 if (sd_force_pm_supported) {
6734 6400 /* Force a successful result */
6735 6401 rval = 0;
6736 6402 }
6737 6403 #endif
6738 6404 if (rval != 0) {
6739 6405 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
6740 6406 "Log Sense Failed\n");
6741 6407
6742 6408 kmem_free(log_page_data, log_page_size);
6743 6409 /* Cannot support power management on those drives */
6744 6410
6745 6411 if (got_semaphore_here != 0) {
6746 6412 sema_v(&un->un_semoclose);
6747 6413 }
6748 6414 /*
6749 6415 * On exit put the state back to it's original value
6750 6416 * and broadcast to anyone waiting for the power
6751 6417 * change completion.
6752 6418 */
6753 6419 mutex_enter(SD_MUTEX(un));
6754 6420 un->un_state = state_before_pm;
6755 6421 cv_broadcast(&un->un_suspend_cv);
6756 6422 mutex_exit(SD_MUTEX(un));
6757 6423 SD_TRACE(SD_LOG_IO_PM, un,
6758 6424 "sdpower: exit, Log Sense Failed.\n");
6759 6425
6760 6426 goto sdpower_failed;
6761 6427 }
6762 6428
6763 6429 /*
6764 6430 * From the page data - Convert the essential information to
6765 6431 * pm_trans_data
6766 6432 */
6767 6433 maxcycles =
6768 6434 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) |
6769 6435 (log_page_data[0x1E] << 8) | log_page_data[0x1F];
6770 6436
6771 6437 ncycles =
6772 6438 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) |
6773 6439 (log_page_data[0x26] << 8) | log_page_data[0x27];
6774 6440
6775 6441 if (un->un_f_pm_log_sense_smart) {
6776 6442 sd_pm_tran_data.un.smart_count.allowed = maxcycles;
6777 6443 sd_pm_tran_data.un.smart_count.consumed = ncycles;
6778 6444 sd_pm_tran_data.un.smart_count.flag = 0;
6779 6445 sd_pm_tran_data.format = DC_SMART_FORMAT;
6780 6446 } else {
6781 6447 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles;
6782 6448 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles;
6783 6449 for (i = 0; i < DC_SCSI_MFR_LEN; i++) {
6784 6450 sd_pm_tran_data.un.scsi_cycles.svc_date[i] =
6785 6451 log_page_data[8+i];
6786 6452 }
6787 6453 sd_pm_tran_data.un.scsi_cycles.flag = 0;
6788 6454 sd_pm_tran_data.format = DC_SCSI_FORMAT;
6789 6455 }
6790 6456
6791 6457 kmem_free(log_page_data, log_page_size);
6792 6458
6793 6459 /*
6794 6460 * Call pm_trans_check routine to get the Ok from
6795 6461 * the global policy
6796 6462 */
6797 6463 rval = pm_trans_check(&sd_pm_tran_data, &intvlp);
6798 6464 #ifdef SDDEBUG
6799 6465 if (sd_force_pm_supported) {
6800 6466 /* Force a successful result */
6801 6467 rval = 1;
6802 6468 }
6803 6469 #endif
6804 6470 switch (rval) {
6805 6471 case 0:
6806 6472 /*
6807 6473 * Not Ok to Power cycle or error in parameters passed
6808 6474 * Would have given the advised time to consider power
6809 6475 * cycle. Based on the new intvlp parameter we are
6810 6476 * supposed to pretend we are busy so that pm framework
6811 6477 * will never call our power entry point. Because of
6812 6478 * that install a timeout handler and wait for the
6813 6479 * recommended time to elapse so that power management
6814 6480 * can be effective again.
6815 6481 *
6816 6482 * To effect this behavior, call pm_busy_component to
6817 6483 * indicate to the framework this device is busy.
6818 6484 * By not adjusting un_pm_count the rest of PM in
6819 6485 * the driver will function normally, and independent
6820 6486 * of this but because the framework is told the device
6821 6487 * is busy it won't attempt powering down until it gets
6822 6488 * a matching idle. The timeout handler sends this.
6823 6489 * Note: sd_pm_entry can't be called here to do this
6824 6490 * because sdpower may have been called as a result
6825 6491 * of a call to pm_raise_power from within sd_pm_entry.
6826 6492 *
6827 6493 * If a timeout handler is already active then
6828 6494 * don't install another.
6829 6495 */
6830 6496 mutex_enter(&un->un_pm_mutex);
6831 6497 if (un->un_pm_timeid == NULL) {
6832 6498 un->un_pm_timeid =
6833 6499 timeout(sd_pm_timeout_handler,
6834 6500 un, intvlp * drv_usectohz(1000000));
6835 6501 mutex_exit(&un->un_pm_mutex);
6836 6502 (void) pm_busy_component(SD_DEVINFO(un), 0);
6837 6503 } else {
6838 6504 mutex_exit(&un->un_pm_mutex);
6839 6505 }
6840 6506 if (got_semaphore_here != 0) {
6841 6507 sema_v(&un->un_semoclose);
6842 6508 }
6843 6509 /*
6844 6510 * On exit put the state back to it's original value
6845 6511 * and broadcast to anyone waiting for the power
6846 6512 * change completion.
6847 6513 */
6848 6514 mutex_enter(SD_MUTEX(un));
6849 6515 un->un_state = state_before_pm;
6850 6516 cv_broadcast(&un->un_suspend_cv);
6851 6517 mutex_exit(SD_MUTEX(un));
6852 6518
6853 6519 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, "
6854 6520 "trans check Failed, not ok to power cycle.\n");
6855 6521
6856 6522 goto sdpower_failed;
6857 6523 case -1:
6858 6524 if (got_semaphore_here != 0) {
6859 6525 sema_v(&un->un_semoclose);
6860 6526 }
6861 6527 /*
6862 6528 * On exit put the state back to it's original value
6863 6529 * and broadcast to anyone waiting for the power
6864 6530 * change completion.
6865 6531 */
6866 6532 mutex_enter(SD_MUTEX(un));
6867 6533 un->un_state = state_before_pm;
6868 6534 cv_broadcast(&un->un_suspend_cv);
6869 6535 mutex_exit(SD_MUTEX(un));
6870 6536 SD_TRACE(SD_LOG_IO_PM, un,
6871 6537 "sdpower: exit, trans check command Failed.\n");
6872 6538
6873 6539 goto sdpower_failed;
6874 6540 }
6875 6541 }
6876 6542
6877 6543 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6878 6544 /*
6879 6545 * Save the last state... if the STOP FAILS we need it
6880 6546 * for restoring
6881 6547 */
6882 6548 mutex_enter(SD_MUTEX(un));
6883 6549 save_state = un->un_last_state;
6884 6550 last_power_level = un->un_power_level;
6885 6551 /*
6886 6552 * There must not be any cmds. getting processed
6887 6553 * in the driver when we get here. Power to the
6888 6554 * device is potentially going off.
6889 6555 */
6890 6556 ASSERT(un->un_ncmds_in_driver == 0);
6891 6557 mutex_exit(SD_MUTEX(un));
6892 6558
6893 6559 /*
6894 6560 * For now PM suspend the device completely before spindle is
6895 6561 * turned off
6896 6562 */
6897 6563 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE))
6898 6564 == DDI_FAILURE) {
6899 6565 if (got_semaphore_here != 0) {
6900 6566 sema_v(&un->un_semoclose);
6901 6567 }
6902 6568 /*
6903 6569 * On exit put the state back to it's original value
6904 6570 * and broadcast to anyone waiting for the power
6905 6571 * change completion.
6906 6572 */
6907 6573 mutex_enter(SD_MUTEX(un));
6908 6574 un->un_state = state_before_pm;
6909 6575 un->un_power_level = last_power_level;
6910 6576 cv_broadcast(&un->un_suspend_cv);
6911 6577 mutex_exit(SD_MUTEX(un));
6912 6578 SD_TRACE(SD_LOG_IO_PM, un,
6913 6579 "sdpower: exit, PM suspend Failed.\n");
6914 6580
6915 6581 goto sdpower_failed;
6916 6582 }
6917 6583 }
6918 6584
6919 6585 /*
6920 6586 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open,
6921 6587 * close, or strategy. Dump no long uses this routine, it uses it's
6922 6588 * own code so it can be done in polled mode.
6923 6589 */
6924 6590
6925 6591 medium_present = TRUE;
6926 6592
6927 6593 /*
6928 6594 * When powering up, issue a TUR in case the device is at unit
6929 6595 * attention. Don't do retries. Bypass the PM layer, otherwise
6930 6596 * a deadlock on un_pm_busy_cv will occur.
6931 6597 */
6932 6598 if (SD_PM_IS_IO_CAPABLE(un, level)) {
6933 6599 sval = sd_send_scsi_TEST_UNIT_READY(ssc,
6934 6600 SD_DONT_RETRY_TUR | SD_BYPASS_PM);
6935 6601 if (sval != 0)
6936 6602 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6937 6603 }
6938 6604
6939 6605 if (un->un_f_power_condition_supported) {
6940 6606 char *pm_condition_name[] = {"STOPPED", "STANDBY",
6941 6607 "IDLE", "ACTIVE"};
6942 6608 SD_TRACE(SD_LOG_IO_PM, un,
6943 6609 "sdpower: sending \'%s\' power condition",
6944 6610 pm_condition_name[level]);
6945 6611 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
6946 6612 sd_pl2pc[level], SD_PATH_DIRECT);
6947 6613 } else {
6948 6614 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n",
6949 6615 ((level == SD_SPINDLE_ON) ? "START" : "STOP"));
6950 6616 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
6951 6617 ((level == SD_SPINDLE_ON) ? SD_TARGET_START :
6952 6618 SD_TARGET_STOP), SD_PATH_DIRECT);
6953 6619 }
6954 6620 if (sval != 0) {
6955 6621 if (sval == EIO)
6956 6622 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6957 6623 else
6958 6624 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6959 6625 }
6960 6626
6961 6627 /* Command failed, check for media present. */
6962 6628 if ((sval == ENXIO) && un->un_f_has_removable_media) {
6963 6629 medium_present = FALSE;
6964 6630 }
6965 6631
6966 6632 /*
6967 6633 * The conditions of interest here are:
6968 6634 * if a spindle off with media present fails,
6969 6635 * then restore the state and return an error.
6970 6636 * else if a spindle on fails,
6971 6637 * then return an error (there's no state to restore).
6972 6638 * In all other cases we setup for the new state
6973 6639 * and return success.
6974 6640 */
6975 6641 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6976 6642 if ((medium_present == TRUE) && (sval != 0)) {
6977 6643 /* The stop command from above failed */
6978 6644 rval = DDI_FAILURE;
6979 6645 /*
6980 6646 * The stop command failed, and we have media
6981 6647 * present. Put the level back by calling the
6982 6648 * sd_pm_resume() and set the state back to
6983 6649 * it's previous value.
6984 6650 */
6985 6651 (void) sd_pm_state_change(un, last_power_level,
6986 6652 SD_PM_STATE_ROLLBACK);
6987 6653 mutex_enter(SD_MUTEX(un));
6988 6654 un->un_last_state = save_state;
6989 6655 mutex_exit(SD_MUTEX(un));
6990 6656 } else if (un->un_f_monitor_media_state) {
6991 6657 /*
6992 6658 * The stop command from above succeeded.
6993 6659 * Terminate watch thread in case of removable media
6994 6660 * devices going into low power state. This is as per
6995 6661 * the requirements of pm framework, otherwise commands
6996 6662 * will be generated for the device (through watch
6997 6663 * thread), even when the device is in low power state.
6998 6664 */
6999 6665 mutex_enter(SD_MUTEX(un));
7000 6666 un->un_f_watcht_stopped = FALSE;
7001 6667 if (un->un_swr_token != NULL) {
7002 6668 opaque_t temp_token = un->un_swr_token;
7003 6669 un->un_f_watcht_stopped = TRUE;
7004 6670 un->un_swr_token = NULL;
7005 6671 mutex_exit(SD_MUTEX(un));
7006 6672 (void) scsi_watch_request_terminate(temp_token,
7007 6673 SCSI_WATCH_TERMINATE_ALL_WAIT);
7008 6674 } else {
7009 6675 mutex_exit(SD_MUTEX(un));
7010 6676 }
7011 6677 }
7012 6678 } else {
7013 6679 /*
7014 6680 * The level requested is I/O capable.
7015 6681 * Legacy behavior: return success on a failed spinup
7016 6682 * if there is no media in the drive.
7017 6683 * Do this by looking at medium_present here.
7018 6684 */
7019 6685 if ((sval != 0) && medium_present) {
7020 6686 /* The start command from above failed */
7021 6687 rval = DDI_FAILURE;
7022 6688 } else {
7023 6689 /*
7024 6690 * The start command from above succeeded
7025 6691 * PM resume the devices now that we have
7026 6692 * started the disks
7027 6693 */
7028 6694 (void) sd_pm_state_change(un, level,
7029 6695 SD_PM_STATE_CHANGE);
7030 6696
7031 6697 /*
7032 6698 * Resume the watch thread since it was suspended
7033 6699 * when the device went into low power mode.
7034 6700 */
7035 6701 if (un->un_f_monitor_media_state) {
7036 6702 mutex_enter(SD_MUTEX(un));
7037 6703 if (un->un_f_watcht_stopped == TRUE) {
7038 6704 opaque_t temp_token;
7039 6705
7040 6706 un->un_f_watcht_stopped = FALSE;
7041 6707 mutex_exit(SD_MUTEX(un));
7042 6708 temp_token =
7043 6709 sd_watch_request_submit(un);
7044 6710 mutex_enter(SD_MUTEX(un));
7045 6711 un->un_swr_token = temp_token;
7046 6712 }
7047 6713 mutex_exit(SD_MUTEX(un));
7048 6714 }
7049 6715 }
7050 6716 }
7051 6717
7052 6718 if (got_semaphore_here != 0) {
7053 6719 sema_v(&un->un_semoclose);
7054 6720 }
7055 6721 /*
7056 6722 * On exit put the state back to it's original value
7057 6723 * and broadcast to anyone waiting for the power
7058 6724 * change completion.
7059 6725 */
7060 6726 mutex_enter(SD_MUTEX(un));
7061 6727 un->un_state = state_before_pm;
7062 6728 cv_broadcast(&un->un_suspend_cv);
7063 6729 mutex_exit(SD_MUTEX(un));
7064 6730
7065 6731 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval);
7066 6732
7067 6733 sd_ssc_fini(ssc);
7068 6734 return (rval);
7069 6735
7070 6736 sdpower_failed:
7071 6737
7072 6738 sd_ssc_fini(ssc);
7073 6739 return (DDI_FAILURE);
7074 6740 }
7075 6741
7076 6742
7077 6743
7078 6744 /*
7079 6745 * Function: sdattach
7080 6746 *
7081 6747 * Description: Driver's attach(9e) entry point function.
7082 6748 *
7083 6749 * Arguments: devi - opaque device info handle
7084 6750 * cmd - attach type
|
↓ open down ↓ |
666 lines elided |
↑ open up ↑ |
7085 6751 *
7086 6752 * Return Code: DDI_SUCCESS
7087 6753 * DDI_FAILURE
7088 6754 *
7089 6755 * Context: Kernel thread context
7090 6756 */
7091 6757
7092 6758 static int
7093 6759 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
7094 6760 {
7095 - switch (cmd) {
7096 - case DDI_ATTACH:
7097 - return (sd_unit_attach(devi));
7098 - case DDI_RESUME:
7099 - return (sd_ddi_resume(devi));
7100 - default:
7101 - break;
7102 - }
7103 - return (DDI_FAILURE);
7104 -}
7105 -
7106 -
7107 -/*
7108 - * Function: sddetach
7109 - *
7110 - * Description: Driver's detach(9E) entry point function.
7111 - *
7112 - * Arguments: devi - opaque device info handle
7113 - * cmd - detach type
7114 - *
7115 - * Return Code: DDI_SUCCESS
7116 - * DDI_FAILURE
7117 - *
7118 - * Context: Kernel thread context
7119 - */
7120 -
7121 -static int
7122 -sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7123 -{
7124 - switch (cmd) {
7125 - case DDI_DETACH:
7126 - return (sd_unit_detach(devi));
7127 - case DDI_SUSPEND:
7128 - return (sd_ddi_suspend(devi));
7129 - default:
7130 - break;
7131 - }
7132 - return (DDI_FAILURE);
7133 -}
7134 -
7135 -
7136 -/*
7137 - * Function: sd_sync_with_callback
7138 - *
7139 - * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7140 - * state while the callback routine is active.
7141 - *
7142 - * Arguments: un: softstate structure for the instance
7143 - *
7144 - * Context: Kernel thread context
7145 - */
7146 -
7147 -static void
7148 -sd_sync_with_callback(struct sd_lun *un)
7149 -{
7150 - ASSERT(un != NULL);
7151 -
7152 - mutex_enter(SD_MUTEX(un));
7153 -
7154 - ASSERT(un->un_in_callback >= 0);
7155 -
7156 - while (un->un_in_callback > 0) {
7157 - mutex_exit(SD_MUTEX(un));
7158 - delay(2);
7159 - mutex_enter(SD_MUTEX(un));
7160 - }
7161 -
7162 - mutex_exit(SD_MUTEX(un));
7163 -}
7164 -
7165 -/*
7166 - * Function: sd_unit_attach
7167 - *
7168 - * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7169 - * the soft state structure for the device and performs
7170 - * all necessary structure and device initializations.
7171 - *
7172 - * Arguments: devi: the system's dev_info_t for the device.
7173 - *
7174 - * Return Code: DDI_SUCCESS if attach is successful.
7175 - * DDI_FAILURE if any part of the attach fails.
7176 - *
7177 - * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7178 - * Kernel thread context only. Can sleep.
7179 - */
7180 -
7181 -static int
7182 -sd_unit_attach(dev_info_t *devi)
7183 -{
7184 6761 struct scsi_device *devp;
7185 6762 struct sd_lun *un;
7186 6763 char *variantp;
7187 - char name_str[48];
7188 - int reservation_flag = SD_TARGET_IS_UNRESERVED;
7189 6764 int instance;
7190 - int rval;
7191 - int wc_enabled;
7192 - int wc_changeable;
7193 6765 int tgt;
7194 - uint64_t capacity;
7195 - uint_t lbasize = 0;
7196 6766 dev_info_t *pdip = ddi_get_parent(devi);
7197 - int offbyone = 0;
7198 - int geom_label_valid = 0;
6767 + int max_xfer_size;
7199 6768 sd_ssc_t *ssc;
7200 - int status;
7201 6769 struct sd_fm_internal *sfip = NULL;
7202 - int max_xfer_size;
7203 6770
6771 + switch (cmd) {
6772 + case DDI_ATTACH:
6773 + break;
6774 + case DDI_RESUME:
6775 + return (sd_ddi_resume(devi));
6776 + default:
6777 + return (DDI_FAILURE);
6778 + }
6779 +
7204 6780 /*
7205 6781 * Retrieve the target driver's private data area. This was set
7206 6782 * up by the HBA.
7207 6783 */
7208 6784 devp = ddi_get_driver_private(devi);
7209 6785
7210 6786 /*
7211 6787 * Retrieve the target ID of the device.
7212 6788 */
7213 6789 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7214 6790 SCSI_ADDR_PROP_TARGET, -1);
7215 6791
7216 6792 /*
7217 6793 * Since we have no idea what state things were left in by the last
7218 6794 * user of the device, set up some 'default' settings, ie. turn 'em
7219 6795 * off. The scsi_ifsetcap calls force re-negotiations with the drive.
7220 6796 * Do this before the scsi_probe, which sends an inquiry.
7221 6797 * This is a fix for bug (4430280).
7222 6798 * Of special importance is wide-xfer. The drive could have been left
7223 6799 * in wide transfer mode by the last driver to communicate with it,
7224 6800 * this includes us. If that's the case, and if the following is not
7225 6801 * setup properly or we don't re-negotiate with the drive prior to
7226 6802 * transferring data to/from the drive, it causes bus parity errors,
7227 6803 * data overruns, and unexpected interrupts. This first occurred when
7228 6804 * the fix for bug (4378686) was made.
7229 6805 */
7230 6806 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1);
7231 6807 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1);
7232 6808 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1);
7233 6809
7234 6810 /*
7235 6811 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs
7236 6812 * on a target. Setting it per lun instance actually sets the
7237 6813 * capability of this target, which affects those luns already
7238 6814 * attached on the same target. So during attach, we can only disable
7239 6815 * this capability only when no other lun has been attached on this
7240 6816 * target. By doing this, we assume a target has the same tagged-qing
7241 6817 * capability for every lun. The condition can be removed when HBA
7242 6818 * is changed to support per lun based tagged-qing capability.
7243 6819 */
7244 6820 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
7245 6821 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1);
7246 6822 }
7247 6823
7248 6824 /*
7249 6825 * Use scsi_probe() to issue an INQUIRY command to the device.
7250 6826 * This call will allocate and fill in the scsi_inquiry structure
7251 6827 * and point the sd_inq member of the scsi_device structure to it.
7252 6828 * If the attach succeeds, then this memory will not be de-allocated
7253 6829 * (via scsi_unprobe()) until the instance is detached.
7254 6830 */
7255 6831 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
7256 6832 goto probe_failed;
7257 6833 }
7258 6834
7259 6835 /*
7260 6836 * Check the device type as specified in the inquiry data and
7261 6837 * claim it if it is of a type that we support.
7262 6838 */
7263 6839 switch (devp->sd_inq->inq_dtype) {
7264 6840 case DTYPE_DIRECT:
7265 6841 break;
7266 6842 case DTYPE_RODIRECT:
7267 6843 break;
7268 6844 case DTYPE_OPTICAL:
7269 6845 break;
7270 6846 case DTYPE_NOTPRESENT:
7271 6847 default:
7272 6848 /* Unsupported device type; fail the attach. */
7273 6849 goto probe_failed;
7274 6850 }
7275 6851
7276 6852 /*
7277 6853 * Allocate the soft state structure for this unit.
7278 6854 *
7279 6855 * We rely upon this memory being set to all zeroes by
7280 6856 * ddi_soft_state_zalloc(). We assume that any member of the
7281 6857 * soft state structure that is not explicitly initialized by
7282 6858 * this routine will have a value of zero.
7283 6859 */
7284 6860 instance = ddi_get_instance(devp->sd_dev);
7285 6861 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) {
7286 6862 goto probe_failed;
7287 6863 }
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
7288 6864
7289 6865 /*
7290 6866 * Retrieve a pointer to the newly-allocated soft state.
7291 6867 *
7292 6868 * This should NEVER fail if the ddi_soft_state_zalloc() call above
7293 6869 * was successful, unless something has gone horribly wrong and the
7294 6870 * ddi's soft state internals are corrupt (in which case it is
7295 6871 * probably better to halt here than just fail the attach....)
7296 6872 */
7297 6873 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
7298 - panic("sd_unit_attach: NULL soft state on instance:0x%x",
6874 + panic("sdattach: NULL soft state on instance:0x%x",
7299 6875 instance);
7300 6876 /*NOTREACHED*/
7301 6877 }
7302 6878
7303 6879 /*
7304 6880 * Link the back ptr of the driver soft state to the scsi_device
7305 6881 * struct for this lun.
7306 6882 * Save a pointer to the softstate in the driver-private area of
7307 6883 * the scsi_device struct.
7308 6884 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
7309 6885 * we first set un->un_sd below.
7310 6886 */
7311 6887 un->un_sd = devp;
7312 6888 devp->sd_private = (opaque_t)un;
7313 6889
7314 6890 /*
7315 6891 * The following must be after devp is stored in the soft state struct.
7316 6892 */
7317 6893 #ifdef SDDEBUG
7318 6894 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7319 6895 "%s_unit_attach: un:0x%p instance:%d\n",
7320 6896 ddi_driver_name(devi), un, instance);
7321 6897 #endif
7322 6898
7323 6899 /*
7324 6900 * Set up the device type and node type (for the minor nodes).
7325 6901 * By default we assume that the device can at least support the
7326 6902 * Common Command Set. Call it a CD-ROM if it reports itself
7327 6903 * as a RODIRECT device.
7328 6904 */
7329 6905 switch (devp->sd_inq->inq_dtype) {
7330 6906 case DTYPE_RODIRECT:
7331 6907 un->un_node_type = DDI_NT_CD_CHAN;
7332 6908 un->un_ctype = CTYPE_CDROM;
7333 6909 break;
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
7334 6910 case DTYPE_OPTICAL:
7335 6911 un->un_node_type = DDI_NT_BLOCK_CHAN;
7336 6912 un->un_ctype = CTYPE_ROD;
7337 6913 break;
7338 6914 default:
7339 6915 un->un_node_type = DDI_NT_BLOCK_CHAN;
7340 6916 un->un_ctype = CTYPE_CCS;
7341 6917 break;
7342 6918 }
7343 6919
7344 - /*
7345 - * Try to read the interconnect type from the HBA.
7346 - *
7347 - * Note: This driver is currently compiled as two binaries, a parallel
7348 - * scsi version (sd) and a fibre channel version (ssd). All functional
7349 - * differences are determined at compile time. In the future a single
7350 - * binary will be provided and the interconnect type will be used to
7351 - * differentiate between fibre and parallel scsi behaviors. At that time
7352 - * it will be necessary for all fibre channel HBAs to support this
7353 - * property.
7354 - *
7355 - * set un_f_is_fiber to TRUE ( default fiber )
7356 - */
7357 - un->un_f_is_fibre = TRUE;
6920 + /* Try to read the interconnect type from the HBA */
6921 + un->un_f_is_fibre = FALSE;
7358 6922 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
7359 6923 case INTERCONNECT_SSA:
6924 + un->un_f_is_fibre = TRUE;
7360 6925 un->un_interconnect_type = SD_INTERCONNECT_SSA;
7361 6926 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7362 - "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
6927 + "sdattach: un:0x%p SD_INTERCONNECT_SSA\n", un);
7363 6928 break;
7364 6929 case INTERCONNECT_PARALLEL:
7365 - un->un_f_is_fibre = FALSE;
7366 6930 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7367 6931 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7368 - "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
6932 + "sdattach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
7369 6933 break;
7370 6934 case INTERCONNECT_SAS:
7371 - un->un_f_is_fibre = FALSE;
7372 6935 un->un_interconnect_type = SD_INTERCONNECT_SAS;
7373 6936 un->un_node_type = DDI_NT_BLOCK_SAS;
7374 6937 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7375 - "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
6938 + "sdattach: un:0x%p SD_INTERCONNECT_SAS\n", un);
7376 6939 break;
7377 6940 case INTERCONNECT_SATA:
7378 - un->un_f_is_fibre = FALSE;
7379 6941 un->un_interconnect_type = SD_INTERCONNECT_SATA;
7380 6942 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7381 - "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
6943 + "sdattach: un:0x%p SD_INTERCONNECT_SATA\n", un);
7382 6944 break;
7383 6945 case INTERCONNECT_FIBRE:
6946 + un->un_f_is_fibre = TRUE;
7384 6947 un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
7385 6948 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7386 - "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
6949 + "sdattach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
7387 6950 break;
7388 6951 case INTERCONNECT_FABRIC:
6952 + un->un_f_is_fibre = TRUE;
7389 6953 un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
7390 6954 un->un_node_type = DDI_NT_BLOCK_FABRIC;
7391 6955 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7392 - "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
6956 + "sdattach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
7393 6957 break;
7394 6958 default:
7395 -#ifdef SD_DEFAULT_INTERCONNECT_TYPE
7396 6959 /*
7397 - * The HBA does not support the "interconnect-type" property
7398 - * (or did not provide a recognized type).
7399 - *
7400 - * Note: This will be obsoleted when a single fibre channel
7401 - * and parallel scsi driver is delivered. In the meantime the
7402 - * interconnect type will be set to the platform default.If that
7403 - * type is not parallel SCSI, it means that we should be
7404 - * assuming "ssd" semantics. However, here this also means that
7405 - * the FC HBA is not supporting the "interconnect-type" property
7406 - * like we expect it to, so log this occurrence.
6960 + * The default is to assume that if a device does not support
6961 + * the "interconnect-type" property it is a parallel SCSI HBA
6962 + * and set the interconnect type for parallel SCSI.
7407 6963 */
7408 - un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
7409 - if (!SD_IS_PARALLEL_SCSI(un)) {
7410 - SD_INFO(SD_LOG_ATTACH_DETACH, un,
7411 - "sd_unit_attach: un:0x%p Assuming "
7412 - "INTERCONNECT_FIBRE\n", un);
7413 - } else {
7414 - SD_INFO(SD_LOG_ATTACH_DETACH, un,
7415 - "sd_unit_attach: un:0x%p Assuming "
7416 - "INTERCONNECT_PARALLEL\n", un);
7417 - un->un_f_is_fibre = FALSE;
7418 - }
7419 -#else
7420 - /*
7421 - * Note: This source will be implemented when a single fibre
7422 - * channel and parallel scsi driver is delivered. The default
7423 - * will be to assume that if a device does not support the
7424 - * "interconnect-type" property it is a parallel SCSI HBA and
7425 - * we will set the interconnect type for parallel scsi.
7426 - */
7427 6964 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7428 - un->un_f_is_fibre = FALSE;
7429 -#endif
7430 6965 break;
7431 6966 }
7432 6967
7433 6968 if (un->un_f_is_fibre == TRUE) {
7434 6969 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
7435 6970 SCSI_VERSION_3) {
7436 6971 switch (un->un_interconnect_type) {
7437 6972 case SD_INTERCONNECT_FIBRE:
7438 6973 case SD_INTERCONNECT_SSA:
7439 6974 un->un_node_type = DDI_NT_BLOCK_WWN;
7440 6975 break;
7441 6976 default:
7442 6977 break;
7443 6978 }
7444 6979 }
7445 6980 }
7446 6981
6982 + (void) ddi_prop_update_int(DDI_DEV_T_NONE, devi,
6983 + "allow-unconstrained-retire", 1);
6984 +
7447 6985 /*
7448 6986 * Initialize the Request Sense command for the target
7449 6987 */
7450 6988 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
7451 6989 goto alloc_rqs_failed;
7452 6990 }
7453 6991
7454 - /*
7455 - * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7456 - * with separate binary for sd and ssd.
7457 - *
7458 - * x86 has 1 binary, un_retry_count is set base on connection type.
7459 - * The hardcoded values will go away when Sparc uses 1 binary
7460 - * for sd and ssd. This hardcoded values need to match
7461 - * SD_RETRY_COUNT in sddef.h
7462 - * The value used is base on interconnect type.
7463 - * fibre = 3, parallel = 5
7464 - */
7465 -#if defined(__i386) || defined(__amd64)
6992 + /* The value used is base on interconnect type */
7466 6993 un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
7467 -#else
7468 - un->un_retry_count = SD_RETRY_COUNT;
7469 -#endif
7470 6994
7471 6995 /*
7472 6996 * Set the per disk retry count to the default number of retries
7473 6997 * for disks and CDROMs. This value can be overridden by the
7474 6998 * disk property list or an entry in sd.conf.
7475 6999 */
7476 7000 un->un_notready_retry_count =
7477 7001 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
7478 7002 : DISK_NOT_READY_RETRY_COUNT(un);
7479 7003
7480 7004 /*
7481 7005 * Set the busy retry count to the default value of un_retry_count.
7482 7006 * This can be overridden by entries in sd.conf or the device
7483 7007 * config table.
7484 7008 */
7485 7009 un->un_busy_retry_count = un->un_retry_count;
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
7486 7010
7487 7011 /*
7488 7012 * Init the reset threshold for retries. This number determines
7489 7013 * how many retries must be performed before a reset can be issued
7490 7014 * (for certain error conditions). This can be overridden by entries
7491 7015 * in sd.conf or the device config table.
7492 7016 */
7493 7017 un->un_reset_retry_count = (un->un_retry_count / 2);
7494 7018
7495 7019 /*
7496 - * Set the victim_retry_count to the default un_retry_count
7020 + * Set the victim_retry_count to the default un_retry_count.
7021 + * This value is used in addition to the standard retry count.
7022 + * This can be overridden by entries in sd.conf or the device
7023 + * config table.
7497 7024 */
7498 - un->un_victim_retry_count = (2 * un->un_retry_count);
7025 + un->un_victim_retry_count = un->un_retry_count;
7499 7026
7500 7027 /*
7501 7028 * Set the reservation release timeout to the default value of
7502 - * 5 seconds. This can be overridden by entries in ssd.conf or the
7029 + * 5 seconds. This can be overridden by entries in sd.conf or the
7503 7030 * device config table.
7504 7031 */
7505 7032 un->un_reserve_release_time = 5;
7506 7033
7034 + un->un_io_time = sd_io_time;
7035 +
7036 + un->un_slow_io_threshold = sd_slow_io_threshold;
7037 +
7038 + un->un_f_lun_reset_enabled = sd_enable_lun_reset;
7039 +
7507 7040 /*
7508 7041 * Set up the default maximum transfer size. Note that this may
7509 7042 * get updated later in the attach, when setting up default wide
7510 7043 * operations for disks.
7511 7044 */
7512 -#if defined(__i386) || defined(__amd64)
7513 7045 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
7514 7046 un->un_partial_dma_supported = 1;
7515 -#else
7516 - un->un_max_xfer_size = (uint_t)maxphys;
7517 -#endif
7518 7047
7519 7048 /*
7520 7049 * Get "allow bus device reset" property (defaults to "enabled" if
7521 7050 * the property was not defined). This is to disable bus resets for
7522 7051 * certain kinds of error recovery. Note: In the future when a run-time
7523 7052 * fibre check is available the soft state flag should default to
7524 7053 * enabled.
7525 7054 */
7526 7055 if (un->un_f_is_fibre == TRUE) {
7527 7056 un->un_f_allow_bus_device_reset = TRUE;
7528 7057 } else {
7529 7058 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7530 7059 "allow-bus-device-reset", 1) != 0) {
7531 7060 un->un_f_allow_bus_device_reset = TRUE;
7532 7061 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7533 - "sd_unit_attach: un:0x%p Bus device reset "
7062 + "sdattach: un:0x%p Bus device reset "
7534 7063 "enabled\n", un);
7535 7064 } else {
7536 7065 un->un_f_allow_bus_device_reset = FALSE;
7537 7066 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7538 - "sd_unit_attach: un:0x%p Bus device reset "
7067 + "sdattach: un:0x%p Bus device reset "
7539 7068 "disabled\n", un);
7540 7069 }
7541 7070 }
7542 7071
7543 7072 /*
7544 7073 * Check if this is an ATAPI device. ATAPI devices use Group 1
7545 7074 * Read/Write commands and Group 2 Mode Sense/Select commands.
7546 7075 *
7547 7076 * Note: The "obsolete" way of doing this is to check for the "atapi"
7548 7077 * property. The new "variant" property with a value of "atapi" has been
7549 7078 * introduced so that future 'variants' of standard SCSI behavior (like
7550 7079 * atapi) could be specified by the underlying HBA drivers by supplying
7551 7080 * a new value for the "variant" property, instead of having to define a
7552 7081 * new property.
7553 7082 */
7554 7083 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
7555 7084 un->un_f_cfg_is_atapi = TRUE;
7556 7085 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7557 - "sd_unit_attach: un:0x%p Atapi device\n", un);
7086 + "sdattach: un:0x%p Atapi device\n", un);
7558 7087 }
7559 7088 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
7560 7089 &variantp) == DDI_PROP_SUCCESS) {
7561 7090 if (strcmp(variantp, "atapi") == 0) {
7562 7091 un->un_f_cfg_is_atapi = TRUE;
7563 7092 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7564 - "sd_unit_attach: un:0x%p Atapi device\n", un);
7093 + "sdattach: un:0x%p Atapi device\n", un);
7565 7094 }
7566 7095 ddi_prop_free(variantp);
7567 7096 }
7568 7097
7569 - un->un_cmd_timeout = SD_IO_TIME;
7098 + un->un_cmd_timeout = ((ISCD(un)) ? 2 : 1) * (ushort_t)un->un_io_time;
7099 + un->un_uscsi_timeout = un->un_cmd_timeout;
7100 + un->un_busy_timeout = SD_BSY_TIMEOUT;
7570 7101
7571 - un->un_busy_timeout = SD_BSY_TIMEOUT;
7572 -
7573 - /* Info on current states, statuses, etc. (Updated frequently) */
7574 - un->un_state = SD_STATE_NORMAL;
7102 + /*
7103 + * Info on current states, statuses, etc. (Updated frequently)
7104 + *
7105 + * Current state is ATTACHING until we finished sd_unit_attach.
7106 + * Last state is NORMAL so that sd_unit_attach can Restore_state()
7107 + * when it finishes successfully.
7108 + */
7109 + un->un_state = SD_STATE_ATTACHING;
7575 7110 un->un_last_state = SD_STATE_NORMAL;
7576 7111
7577 7112 /* Control & status info for command throttling */
7578 7113 un->un_throttle = sd_max_throttle;
7579 7114 un->un_saved_throttle = sd_max_throttle;
7580 7115 un->un_min_throttle = sd_min_throttle;
7581 7116
7582 7117 if (un->un_f_is_fibre == TRUE) {
7583 7118 un->un_f_use_adaptive_throttle = TRUE;
7584 7119 } else {
7585 7120 un->un_f_use_adaptive_throttle = FALSE;
7586 7121 }
7587 7122
7123 + /* Unit detach has to pause until outstanding commands abort */
7124 + un->un_f_detach_waiting = 0;
7125 + cv_init(&un->un_detach_cv, NULL, CV_DRIVER, NULL);
7126 +
7588 7127 /* Removable media support. */
7589 7128 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
7590 7129 un->un_mediastate = DKIO_NONE;
7591 7130 un->un_specified_mediastate = DKIO_NONE;
7592 7131
7593 7132 /* CVs for suspend/resume (PM or DR) */
7594 7133 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
7595 7134 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
7596 7135
7597 7136 /* Power management support. */
7598 7137 un->un_power_level = SD_SPINDLE_UNINIT;
7599 7138
7600 7139 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL);
7601 7140 un->un_f_wcc_inprog = 0;
7602 7141
7603 7142 /*
7604 7143 * The open/close semaphore is used to serialize threads executing
7605 7144 * in the driver's open & close entry point routines for a given
7606 7145 * instance.
7607 7146 */
7608 7147 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
7609 7148
7610 7149 /*
7611 7150 * The conf file entry and softstate variable is a forceful override,
7612 7151 * meaning a non-zero value must be entered to change the default.
7613 7152 */
7614 7153 un->un_f_disksort_disabled = FALSE;
7615 7154 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT;
7616 7155 un->un_f_enable_rmw = FALSE;
7617 7156
7618 7157 /*
7619 7158 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
7620 7159 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7621 7160 */
7622 7161 un->un_f_mmc_gesn_polling = TRUE;
7623 7162
7624 7163 /*
7625 7164 * physical sector size defaults to DEV_BSIZE currently. We can
7626 7165 * override this value via the driver configuration file so we must
7627 7166 * set it before calling sd_read_unit_properties().
7628 7167 */
7629 7168 un->un_phy_blocksize = DEV_BSIZE;
7169 + un->un_f_sdconf_phy_blocksize = FALSE;
7630 7170
7631 7171 /*
7632 7172 * Retrieve the properties from the static driver table or the driver
7633 7173 * configuration file (.conf) for this unit and update the soft state
7634 7174 * for the device as needed for the indicated properties.
7635 7175 * Note: the property configuration needs to occur here as some of the
7636 7176 * following routines may have dependencies on soft state flags set
7637 7177 * as part of the driver property configuration.
7638 7178 */
7639 7179 sd_read_unit_properties(un);
7640 7180 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7641 - "sd_unit_attach: un:0x%p property configuration complete.\n", un);
7181 + "sdattach: un:0x%p property configuration complete.\n", un);
7642 7182
7643 7183 /*
7644 7184 * Only if a device has "hotpluggable" property, it is
7645 7185 * treated as hotpluggable device. Otherwise, it is
7646 7186 * regarded as non-hotpluggable one.
7647 7187 */
7648 7188 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
7649 7189 -1) != -1) {
7650 7190 un->un_f_is_hotpluggable = TRUE;
7651 7191 }
7652 7192
7653 7193 /*
7654 7194 * set unit's attributes(flags) according to "hotpluggable" and
7655 7195 * RMB bit in INQUIRY data.
7656 7196 */
7657 7197 sd_set_unit_attributes(un, devi);
7658 7198
7659 7199 /*
7660 7200 * By default, we mark the capacity, lbasize, and geometry
7661 7201 * as invalid. Only if we successfully read a valid capacity
7662 7202 * will we update the un_blockcount and un_tgt_blocksize with the
7663 7203 * valid values (the geometry will be validated later).
7664 7204 */
7665 7205 un->un_f_blockcount_is_valid = FALSE;
7666 7206 un->un_f_tgt_blocksize_is_valid = FALSE;
7667 7207
7668 7208 /*
7669 7209 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine
7670 7210 * otherwise.
7671 7211 */
7672 7212 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE;
7673 7213 un->un_blockcount = 0;
7674 7214
7675 7215 /*
7676 7216 * Set up the per-instance info needed to determine the correct
7677 7217 * CDBs and other info for issuing commands to the target.
7678 7218 */
7679 7219 sd_init_cdb_limits(un);
7680 7220
7681 7221 /*
7682 7222 * Set up the IO chains to use, based upon the target type.
7683 7223 */
7684 7224 if (un->un_f_non_devbsize_supported) {
7685 7225 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
7686 7226 } else {
7687 7227 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
7688 7228 }
7689 7229 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
7690 7230 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD;
7691 7231 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD;
7692 7232
7693 7233 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf),
7694 7234 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit,
7695 7235 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER);
7696 7236 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi);
7697 7237
7698 7238
7699 7239 if (ISCD(un)) {
7700 7240 un->un_additional_codes = sd_additional_codes;
7701 7241 } else {
7702 7242 un->un_additional_codes = NULL;
7703 7243 }
7704 7244
7705 7245 /*
7706 7246 * Create the kstats here so they can be available for attach-time
7707 7247 * routines that send commands to the unit (either polled or via
7708 7248 * sd_send_scsi_cmd).
7709 7249 *
7710 7250 * Note: This is a critical sequence that needs to be maintained:
7711 7251 * 1) Instantiate the kstats here, before any routines using the
7712 7252 * iopath (i.e. sd_send_scsi_cmd).
7713 7253 * 2) Instantiate and initialize the partition stats
7714 7254 * (sd_set_pstats).
7715 7255 * 3) Initialize the error stats (sd_set_errstats), following
7716 7256 * sd_validate_geometry(),sd_register_devid(),
|
↓ open down ↓ |
65 lines elided |
↑ open up ↑ |
7717 7257 * and sd_cache_control().
7718 7258 */
7719 7259
7720 7260 un->un_stats = kstat_create(sd_label, instance,
7721 7261 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
7722 7262 if (un->un_stats != NULL) {
7723 7263 un->un_stats->ks_lock = SD_MUTEX(un);
7724 7264 kstat_install(un->un_stats);
7725 7265 }
7726 7266 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7727 - "sd_unit_attach: un:0x%p un_stats created\n", un);
7267 + "sdattach: un:0x%p un_stats created\n", un);
7728 7268
7269 + un->un_unmapstats_ks = kstat_create(sd_label, instance, "unmapstats",
7270 + "misc", KSTAT_TYPE_NAMED, sizeof (*un->un_unmapstats) /
7271 + sizeof (kstat_named_t), 0);
7272 + if (un->un_unmapstats_ks) {
7273 + un->un_unmapstats = un->un_unmapstats_ks->ks_data;
7274 +
7275 + kstat_named_init(&un->un_unmapstats->us_cmds,
7276 + "commands", KSTAT_DATA_UINT64);
7277 + kstat_named_init(&un->un_unmapstats->us_errs,
7278 + "errors", KSTAT_DATA_UINT64);
7279 + kstat_named_init(&un->un_unmapstats->us_extents,
7280 + "extents", KSTAT_DATA_UINT64);
7281 + kstat_named_init(&un->un_unmapstats->us_bytes,
7282 + "bytes", KSTAT_DATA_UINT64);
7283 +
7284 + kstat_install(un->un_unmapstats_ks);
7285 + } else {
7286 + cmn_err(CE_NOTE, "!Cannot create unmap kstats for disk %d",
7287 + instance);
7288 + }
7289 +
7290 + un->un_lat_ksp = kstat_create(sd_label, instance, "io_latency",
7291 + "io_latency", KSTAT_TYPE_RAW, sizeof (un_lat_stat_t),
7292 + KSTAT_FLAG_PERSISTENT);
7293 +
7294 + if (un->un_lat_ksp != NULL) {
7295 + un->un_lat_ksp->ks_lock = SD_MUTEX(un);
7296 + un->un_lat_stats = (un_lat_stat_t *)un->un_lat_ksp->ks_data;
7297 + kstat_install(un->un_lat_ksp);
7298 + } else {
7299 + un->un_lat_stats = NULL;
7300 + }
7301 +
7729 7302 sd_create_errstats(un, instance);
7730 7303 if (un->un_errstats == NULL) {
7731 7304 goto create_errstats_failed;
7732 7305 }
7733 7306 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7734 - "sd_unit_attach: un:0x%p errstats created\n", un);
7307 + "sdattach: un:0x%p errstats created\n", un);
7735 7308
7736 7309 /*
7737 7310 * The following if/else code was relocated here from below as part
7738 7311 * of the fix for bug (4430280). However with the default setup added
7739 7312 * on entry to this routine, it's no longer absolutely necessary for
7740 7313 * this to be before the call to sd_spin_up_unit.
7741 7314 */
7742 7315 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
7743 7316 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
7744 7317 (devp->sd_inq->inq_ansi == 5)) &&
7745 7318 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque;
7746 7319
7747 7320 /*
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
7748 7321 * If tagged queueing is supported by the target
7749 7322 * and by the host adapter then we will enable it
7750 7323 */
7751 7324 un->un_tagflags = 0;
7752 7325 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
7753 7326 (un->un_f_arq_enabled == TRUE)) {
7754 7327 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
7755 7328 1, 1) == 1) {
7756 7329 un->un_tagflags = FLAG_STAG;
7757 7330 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7758 - "sd_unit_attach: un:0x%p tag queueing "
7331 + "sdattach: un:0x%p tag queueing "
7759 7332 "enabled\n", un);
7760 7333 } else if (scsi_ifgetcap(SD_ADDRESS(un),
7761 7334 "untagged-qing", 0) == 1) {
7762 7335 un->un_f_opt_queueing = TRUE;
7763 7336 un->un_saved_throttle = un->un_throttle =
7764 7337 min(un->un_throttle, 3);
7765 7338 } else {
7766 7339 un->un_f_opt_queueing = FALSE;
7767 7340 un->un_saved_throttle = un->un_throttle = 1;
7768 7341 }
7769 7342 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
7770 7343 == 1) && (un->un_f_arq_enabled == TRUE)) {
7771 7344 /* The Host Adapter supports internal queueing. */
7772 7345 un->un_f_opt_queueing = TRUE;
7773 7346 un->un_saved_throttle = un->un_throttle =
7774 7347 min(un->un_throttle, 3);
7775 7348 } else {
7776 7349 un->un_f_opt_queueing = FALSE;
7777 7350 un->un_saved_throttle = un->un_throttle = 1;
7778 7351 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7779 - "sd_unit_attach: un:0x%p no tag queueing\n", un);
7352 + "sdattach: un:0x%p no tag queueing\n", un);
7780 7353 }
7781 7354
7782 7355 /*
7783 7356 * Enable large transfers for SATA/SAS drives
7784 7357 */
7785 7358 if (SD_IS_SERIAL(un)) {
7786 7359 un->un_max_xfer_size =
7787 7360 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7788 - sd_max_xfer_size, SD_MAX_XFER_SIZE);
7361 + "sd_max_xfer_size", SD_MAX_XFER_SIZE);
7789 7362 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7790 - "sd_unit_attach: un:0x%p max transfer "
7363 + "sdattach: un:0x%p max transfer "
7791 7364 "size=0x%x\n", un, un->un_max_xfer_size);
7792 7365
7793 7366 }
7794 7367
7795 7368 /* Setup or tear down default wide operations for disks */
7796 -
7797 - /*
7798 - * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7799 - * and "ssd_max_xfer_size" to exist simultaneously on the same
7800 - * system and be set to different values. In the future this
7801 - * code may need to be updated when the ssd module is
7802 - * obsoleted and removed from the system. (4299588)
7803 - */
7804 7369 if (SD_IS_PARALLEL_SCSI(un) &&
7805 7370 (devp->sd_inq->inq_rdf == RDF_SCSI2) &&
7806 7371 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
7807 7372 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7808 7373 1, 1) == 1) {
7809 7374 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7810 - "sd_unit_attach: un:0x%p Wide Transfer "
7375 + "sdattach: un:0x%p Wide Transfer "
7811 7376 "enabled\n", un);
7812 7377 }
7813 7378
7814 7379 /*
7815 7380 * If tagged queuing has also been enabled, then
7816 7381 * enable large xfers
7817 7382 */
7818 7383 if (un->un_saved_throttle == sd_max_throttle) {
7819 7384 un->un_max_xfer_size =
7820 7385 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7821 - sd_max_xfer_size, SD_MAX_XFER_SIZE);
7386 + "sd_max_xfer_size", SD_MAX_XFER_SIZE);
7822 7387 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7823 - "sd_unit_attach: un:0x%p max transfer "
7388 + "sdattach: un:0x%p max transfer "
7824 7389 "size=0x%x\n", un, un->un_max_xfer_size);
7825 7390 }
7826 7391 } else {
7827 7392 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7828 7393 0, 1) == 1) {
7829 7394 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7830 - "sd_unit_attach: un:0x%p "
7395 + "sdattach: un:0x%p "
7831 7396 "Wide Transfer disabled\n", un);
7832 7397 }
7833 7398 }
7834 7399 } else {
7835 7400 un->un_tagflags = FLAG_STAG;
7836 7401 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
7837 - devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
7402 + devi, 0, "sd_max_xfer_size", SD_MAX_XFER_SIZE);
7838 7403 }
7839 7404
7840 7405 /*
7841 7406 * If this target supports LUN reset, try to enable it.
7842 7407 */
7843 7408 if (un->un_f_lun_reset_enabled) {
7844 7409 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
7845 - SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7410 + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: "
7846 7411 "un:0x%p lun_reset capability set\n", un);
7847 7412 } else {
7848 - SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7413 + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: "
7849 7414 "un:0x%p lun-reset capability not set\n", un);
7850 7415 }
7851 7416 }
7852 7417
7853 7418 /*
7854 - * Adjust the maximum transfer size. This is to fix
7419 + * XXX Adjust the maximum transfer size. This was to fix
7855 7420 * the problem of partial DMA support on SPARC. Some
7856 7421 * HBA driver, like aac, has very small dma_attr_maxxfer
7857 7422 * size, which requires partial DMA support on SPARC.
7858 - * In the future the SPARC pci nexus driver may solve
7859 - * the problem instead of this fix.
7860 7423 */
7861 7424 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
7862 7425 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
7863 - /* We need DMA partial even on sparc to ensure sddump() works */
7864 7426 un->un_max_xfer_size = max_xfer_size;
7865 7427 if (un->un_partial_dma_supported == 0)
7866 7428 un->un_partial_dma_supported = 1;
7867 7429 }
7868 7430 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7869 7431 DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
7870 7432 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
7871 7433 un->un_max_xfer_size) == 1) {
7872 7434 un->un_buf_breakup_supported = 1;
7873 - SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7435 + SD_INFO(SD_LOG_ATTACH_DETACH, un, "sdattach: "
7874 7436 "un:0x%p Buf breakup enabled\n", un);
7875 7437 }
7876 7438 }
7877 7439
7878 7440 /*
7879 7441 * Set PKT_DMA_PARTIAL flag.
7880 7442 */
7881 7443 if (un->un_partial_dma_supported == 1) {
7882 7444 un->un_pkt_flags = PKT_DMA_PARTIAL;
7883 7445 } else {
7884 7446 un->un_pkt_flags = 0;
7885 7447 }
7886 7448
7887 - /* Initialize sd_ssc_t for internal uscsi commands */
7888 - ssc = sd_ssc_init(un);
7889 7449 scsi_fm_init(devp);
7890 7450
7891 7451 /*
7892 - * Allocate memory for SCSI FMA stuffs.
7452 + * Allocate memory for SCSI FMA stuff.
7893 7453 */
7894 7454 un->un_fm_private =
7895 7455 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
7896 7456 sfip = (struct sd_fm_internal *)un->un_fm_private;
7897 7457 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
7898 7458 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
7899 7459 sfip->fm_ssc.ssc_un = un;
7900 7460
7901 7461 if (ISCD(un) ||
7902 7462 un->un_f_has_removable_media ||
7903 7463 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) {
7904 7464 /*
7905 7465 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device.
7906 7466 * Their log are unchanged.
7907 7467 */
7908 7468 sfip->fm_log_level = SD_FM_LOG_NSUP;
7909 7469 } else {
7910 7470 /*
7911 7471 * If enter here, it should be non-CDROM and FM-capable
7912 7472 * device, and it will not keep the old scsi_log as before
7913 7473 * in /var/adm/messages. However, the property
7914 7474 * "fm-scsi-log" will control whether the FM telemetry will
7915 7475 * be logged in /var/adm/messages.
7916 7476 */
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
7917 7477 int fm_scsi_log;
7918 7478 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7919 7479 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
7920 7480
7921 7481 if (fm_scsi_log)
7922 7482 sfip->fm_log_level = SD_FM_LOG_EREPORT;
7923 7483 else
7924 7484 sfip->fm_log_level = SD_FM_LOG_SILENT;
7925 7485 }
7926 7486
7487 + /* Initialize sd_ssc_t for internal uscsi commands */
7488 + ssc = sd_ssc_init(un);
7489 +
7490 + mutex_enter(SD_MUTEX(un));
7927 7491 /*
7492 + * Initialize the devid for the unit. Indicate target reservation so
7493 + * that no real I/O is done for devices that need devid fabrication.
7494 + * We will try again in sd_unit_attach() if necessary.
7495 + */
7496 + if (un->un_f_devid_supported) {
7497 + sd_register_devid(ssc, devi, SD_TARGET_IS_RESERVED);
7498 + }
7499 + mutex_exit(SD_MUTEX(un));
7500 +
7501 + /* Uninitialize sd_ssc_t pointer */
7502 + sd_ssc_fini(ssc);
7503 +
7504 + cmlb_alloc_handle(&un->un_cmlbhandle);
7505 +
7506 + if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
7507 + VOID2BOOLEAN(un->un_f_has_removable_media != 0),
7508 + VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
7509 + un->un_node_type, 0, un->un_cmlbhandle,
7510 + (void *)SD_PATH_DIRECT) != 0) {
7511 + goto cmlb_attach_failed;
7512 + }
7513 +
7514 + /*
7928 7515 * At this point in the attach, we have enough info in the
7929 7516 * soft state to be able to issue commands to the target.
7930 7517 *
7518 + * Schedule a taskq to finish attach to avoid holding the
7519 + * device tree lock for too long. If this fails, rollback
7520 + * and fail the attach.
7521 + */
7522 +
7523 + if (taskq_dispatch(sd_tq, sd_unit_attach, devi, KM_PUSHPAGE) != NULL)
7524 + return (DDI_SUCCESS);
7525 +
7526 + cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
7527 + cmlb_free_handle(&un->un_cmlbhandle);
7528 +
7529 +cmlb_attach_failed:
7530 + mutex_enter(SD_MUTEX(un));
7531 +
7532 + /* Deallocate SCSI FMA memory spaces */
7533 + kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
7534 +
7535 + /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
7536 + if (un->un_direct_priority_timeid != NULL) {
7537 + timeout_id_t temp_id = un->un_direct_priority_timeid;
7538 + un->un_direct_priority_timeid = NULL;
7539 + mutex_exit(SD_MUTEX(un));
7540 + (void) untimeout(temp_id);
7541 + mutex_enter(SD_MUTEX(un));
7542 + }
7543 +
7544 + /* Cancel any pending start/stop timeouts */
7545 + if (un->un_startstop_timeid != NULL) {
7546 + timeout_id_t temp_id = un->un_startstop_timeid;
7547 + un->un_startstop_timeid = NULL;
7548 + mutex_exit(SD_MUTEX(un));
7549 + (void) untimeout(temp_id);
7550 + mutex_enter(SD_MUTEX(un));
7551 + }
7552 +
7553 + /* Cancel any pending reset-throttle timeouts */
7554 + if (un->un_reset_throttle_timeid != NULL) {
7555 + timeout_id_t temp_id = un->un_reset_throttle_timeid;
7556 + un->un_reset_throttle_timeid = NULL;
7557 + mutex_exit(SD_MUTEX(un));
7558 + (void) untimeout(temp_id);
7559 + mutex_enter(SD_MUTEX(un));
7560 + }
7561 +
7562 + /* Cancel rmw warning message timeouts */
7563 + if (un->un_rmw_msg_timeid != NULL) {
7564 + timeout_id_t temp_id = un->un_rmw_msg_timeid;
7565 + un->un_rmw_msg_timeid = NULL;
7566 + mutex_exit(SD_MUTEX(un));
7567 + (void) untimeout(temp_id);
7568 + mutex_enter(SD_MUTEX(un));
7569 + }
7570 +
7571 + /* Cancel any pending retry timeouts */
7572 + if (un->un_retry_timeid != NULL) {
7573 + timeout_id_t temp_id = un->un_retry_timeid;
7574 + un->un_retry_timeid = NULL;
7575 + mutex_exit(SD_MUTEX(un));
7576 + (void) untimeout(temp_id);
7577 + mutex_enter(SD_MUTEX(un));
7578 + }
7579 +
7580 + /* Cancel any pending delayed cv broadcast timeouts */
7581 + if (un->un_dcvb_timeid != NULL) {
7582 + timeout_id_t temp_id = un->un_dcvb_timeid;
7583 + un->un_dcvb_timeid = NULL;
7584 + mutex_exit(SD_MUTEX(un));
7585 + (void) untimeout(temp_id);
7586 + mutex_enter(SD_MUTEX(un));
7587 + }
7588 +
7589 + mutex_exit(SD_MUTEX(un));
7590 +
7591 + /* There should not be any in-progress I/O so ASSERT this check */
7592 + ASSERT(un->un_ncmds_in_transport == 0);
7593 + ASSERT(un->un_ncmds_in_driver == 0);
7594 +
7595 + /* Do not free the softstate if the callback routine is active */
7596 + sd_sync_with_callback(un);
7597 +
7598 + /*
7599 + * Partition stats apparently are not used with removables. These would
7600 + * not have been created during attach, so no need to clean them up...
7601 + */
7602 + if (un->un_errstats != NULL) {
7603 + kstat_delete(un->un_errstats);
7604 + un->un_errstats = NULL;
7605 + }
7606 +
7607 +create_errstats_failed:
7608 +
7609 + if (un->un_stats != NULL) {
7610 + kstat_delete(un->un_stats);
7611 + un->un_stats = NULL;
7612 + }
7613 +
7614 + if (un->un_unmapstats != NULL) {
7615 + kstat_delete(un->un_unmapstats_ks);
7616 + un->un_unmapstats_ks = NULL;
7617 + un->un_unmapstats = NULL;
7618 + }
7619 +
7620 + if (un->un_lat_ksp != NULL) {
7621 + kstat_delete(un->un_lat_ksp);
7622 + un->un_lat_ksp = NULL;
7623 + un->un_lat_stats = NULL;
7624 + }
7625 +
7626 + ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
7627 + ddi_xbuf_attr_destroy(un->un_xbuf_attr);
7628 +
7629 + ddi_prop_remove_all(devi);
7630 + sema_destroy(&un->un_semoclose);
7631 + cv_destroy(&un->un_state_cv);
7632 + cv_destroy(&un->un_detach_cv);
7633 + sd_free_rqs(un);
7634 +
7635 +alloc_rqs_failed:
7636 +
7637 + devp->sd_private = NULL;
7638 + bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */
7639 +
7640 + /*
7641 + * Note: the man pages are unclear as to whether or not doing a
7642 + * ddi_soft_state_free(sd_state, instance) is the right way to
7643 + * clean up after the ddi_soft_state_zalloc() if the subsequent
7644 + * ddi_get_soft_state() fails. The implication seems to be
7645 + * that the get_soft_state cannot fail if the zalloc succeeds.
7646 + */
7647 + ddi_soft_state_free(sd_state, instance);
7648 +
7649 +probe_failed:
7650 + scsi_unprobe(devp);
7651 +
7652 + return (DDI_FAILURE);
7653 +}
7654 +
7655 +
7656 +/*
7657 + * Function: sddetach
7658 + *
7659 + * Description: Driver's detach(9E) entry point function.
7660 + *
7661 + * Arguments: devi - opaque device info handle
7662 + * cmd - detach type
7663 + *
7664 + * Return Code: DDI_SUCCESS
7665 + * DDI_FAILURE
7666 + *
7667 + * Context: Kernel thread context
7668 + */
7669 +
7670 +static int
7671 +sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7672 +{
7673 + switch (cmd) {
7674 + case DDI_DETACH:
7675 + return (sd_unit_detach(devi));
7676 + case DDI_SUSPEND:
7677 + return (sd_ddi_suspend(devi));
7678 + default:
7679 + break;
7680 + }
7681 + return (DDI_FAILURE);
7682 +}
7683 +
7684 +
7685 +/*
7686 + * Function: sd_sync_with_callback
7687 + *
7688 + * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7689 + * state while the callback routine is active.
7690 + *
7691 + * Arguments: un: softstate structure for the instance
7692 + *
7693 + * Context: Kernel thread context
7694 + */
7695 +
7696 +static void
7697 +sd_sync_with_callback(struct sd_lun *un)
7698 +{
7699 + ASSERT(un != NULL);
7700 +
7701 + mutex_enter(SD_MUTEX(un));
7702 +
7703 + ASSERT(un->un_in_callback >= 0);
7704 +
7705 + while (un->un_in_callback > 0) {
7706 + mutex_exit(SD_MUTEX(un));
7707 + delay(2);
7708 + mutex_enter(SD_MUTEX(un));
7709 + }
7710 +
7711 + mutex_exit(SD_MUTEX(un));
7712 +}
7713 +
7714 +/*
7715 + * Function: sd_unit_attach
7716 + *
7717 + * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7718 + * the soft state structure for the device and performs
7719 + * all necessary structure and device initializations.
7720 + *
7721 + * Arguments: devi: the system's dev_info_t for the device.
7722 + *
7723 + * Return Code: DDI_SUCCESS if attach is successful.
7724 + * DDI_FAILURE if any part of the attach fails.
7725 + *
7726 + * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7727 + * Kernel thread context only. Can sleep.
7728 + */
7729 +void
7730 +sd_unit_attach(void *arg)
7731 +{
7732 + dev_info_t *devi = arg;
7733 + struct scsi_device *devp = ddi_get_driver_private(devi);
7734 + struct sd_lun *un = (struct sd_lun *)devp->sd_private;
7735 + char name_str[48];
7736 + int reservation_flag = SD_TARGET_IS_UNRESERVED;
7737 + int rval;
7738 + int wc_enabled;
7739 + int wc_changeable;
7740 + int tgt;
7741 + uint64_t capacity;
7742 + uint_t lbasize = 0;
7743 + dev_info_t *pdip = ddi_get_parent(devi);
7744 + int geom_label_valid = 0;
7745 + sd_ssc_t *ssc;
7746 + int status;
7747 + char *devid;
7748 +
7749 + /*
7750 + * Retrieve the target ID of the device.
7751 + */
7752 + tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7753 + SCSI_ADDR_PROP_TARGET, -1);
7754 +
7755 + /*
7931 7756 * All command paths used below MUST issue their commands as
7932 7757 * SD_PATH_DIRECT. This is important as intermediate layers
7933 7758 * are not all initialized yet (such as PM).
7934 7759 */
7935 7760
7761 + /* Initialize sd_ssc_t for internal uscsi commands */
7762 + ssc = sd_ssc_init(un);
7763 +
7936 7764 /*
7937 7765 * Send a TEST UNIT READY command to the device. This should clear
7938 7766 * any outstanding UNIT ATTENTION that may be present.
7939 7767 *
7940 7768 * Note: Don't check for success, just track if there is a reservation,
7941 7769 * this is a throw away command to clear any unit attentions.
7942 7770 *
7943 7771 * Note: This MUST be the first command issued to the target during
7944 7772 * attach to ensure power on UNIT ATTENTIONS are cleared.
7945 7773 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
7946 7774 * with attempts at spinning up a device with no media.
7947 7775 */
7948 7776 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
7949 7777 if (status != 0) {
7950 7778 if (status == EACCES)
7951 7779 reservation_flag = SD_TARGET_IS_RESERVED;
7952 7780 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
7953 7781 }
7954 7782
7955 7783 /*
7956 7784 * If the device is NOT a removable media device, attempt to spin
7957 7785 * it up (using the START_STOP_UNIT command) and read its capacity
7958 7786 * (using the READ CAPACITY command). Note, however, that either
7959 7787 * of these could fail and in some cases we would continue with
7960 7788 * the attach despite the failure (see below).
7961 7789 */
7962 7790 if (un->un_f_descr_format_supported) {
7963 7791
7964 7792 switch (sd_spin_up_unit(ssc)) {
7965 7793 case 0:
7966 7794 /*
7967 7795 * Spin-up was successful; now try to read the
7968 7796 * capacity. If successful then save the results
7969 7797 * and mark the capacity & lbasize as valid.
7970 7798 */
7971 7799 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7972 7800 "sd_unit_attach: un:0x%p spin-up successful\n", un);
7973 7801
7974 7802 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
7975 7803 &lbasize, SD_PATH_DIRECT);
7976 7804
7977 7805 switch (status) {
7978 7806 case 0: {
7979 7807 if (capacity > DK_MAX_BLOCKS) {
7980 7808 #ifdef _LP64
7981 7809 if ((capacity + 1) >
7982 7810 SD_GROUP1_MAX_ADDRESS) {
7983 7811 /*
7984 7812 * Enable descriptor format
7985 7813 * sense data so that we can
7986 7814 * get 64 bit sense data
7987 7815 * fields.
7988 7816 */
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
7989 7817 sd_enable_descr_sense(ssc);
7990 7818 }
7991 7819 #else
7992 7820 /* 32-bit kernels can't handle this */
7993 7821 scsi_log(SD_DEVINFO(un),
7994 7822 sd_label, CE_WARN,
7995 7823 "disk has %llu blocks, which "
7996 7824 "is too large for a 32-bit "
7997 7825 "kernel", capacity);
7998 7826
7999 -#if defined(__i386) || defined(__amd64)
8000 7827 /*
8001 7828 * 1TB disk was treated as (1T - 512)B
8002 7829 * in the past, so that it might have
8003 7830 * valid VTOC and solaris partitions,
8004 7831 * we have to allow it to continue to
8005 7832 * work.
8006 7833 */
8007 - if (capacity -1 > DK_MAX_BLOCKS)
7834 + if (capacity - 1 > DK_MAX_BLOCKS)
7835 + goto spinup_failed;
8008 7836 #endif
8009 - goto spinup_failed;
8010 -#endif
8011 7837 }
8012 7838
8013 7839 /*
8014 7840 * Here it's not necessary to check the case:
8015 7841 * the capacity of the device is bigger than
8016 7842 * what the max hba cdb can support. Because
8017 7843 * sd_send_scsi_READ_CAPACITY will retrieve
8018 7844 * the capacity by sending USCSI command, which
8019 7845 * is constrained by the max hba cdb. Actually,
8020 7846 * sd_send_scsi_READ_CAPACITY will return
8021 7847 * EINVAL when using bigger cdb than required
8022 7848 * cdb length. Will handle this case in
8023 7849 * "case EINVAL".
8024 7850 */
8025 7851
8026 7852 /*
8027 7853 * The following relies on
8028 7854 * sd_send_scsi_READ_CAPACITY never
8029 7855 * returning 0 for capacity and/or lbasize.
8030 7856 */
8031 7857 sd_update_block_info(un, lbasize, capacity);
8032 7858
8033 7859 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8034 7860 "sd_unit_attach: un:0x%p capacity = %ld "
8035 7861 "blocks; lbasize= %ld.\n", un,
8036 7862 un->un_blockcount, un->un_tgt_blocksize);
8037 7863
8038 7864 break;
8039 7865 }
8040 7866 case EINVAL:
8041 7867 /*
8042 7868 * In the case where the max-cdb-length property
8043 7869 * is smaller than the required CDB length for
8044 7870 * a SCSI device, a target driver can fail to
8045 7871 * attach to that device.
8046 7872 */
8047 7873 scsi_log(SD_DEVINFO(un),
8048 7874 sd_label, CE_WARN,
8049 7875 "disk capacity is too large "
8050 7876 "for current cdb length");
8051 7877 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8052 7878
8053 7879 goto spinup_failed;
8054 7880 case EACCES:
8055 7881 /*
8056 7882 * Should never get here if the spin-up
8057 7883 * succeeded, but code it in anyway.
8058 7884 * From here, just continue with the attach...
8059 7885 */
8060 7886 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8061 7887 "sd_unit_attach: un:0x%p "
8062 7888 "sd_send_scsi_READ_CAPACITY "
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
8063 7889 "returned reservation conflict\n", un);
8064 7890 reservation_flag = SD_TARGET_IS_RESERVED;
8065 7891 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8066 7892 break;
8067 7893 default:
8068 7894 /*
8069 7895 * Likewise, should never get here if the
8070 7896 * spin-up succeeded. Just continue with
8071 7897 * the attach...
8072 7898 */
8073 - if (status == EIO)
7899 + if (status == EIO) {
8074 7900 sd_ssc_assessment(ssc,
8075 7901 SD_FMT_STATUS_CHECK);
8076 - else
7902 + goto spinup_failed;
7903 + } else {
8077 7904 sd_ssc_assessment(ssc,
8078 7905 SD_FMT_IGNORE);
7906 + }
8079 7907 break;
8080 7908 }
8081 7909 break;
8082 7910 case EACCES:
8083 7911 /*
8084 7912 * Device is reserved by another host. In this case
8085 7913 * we could not spin it up or read the capacity, but
8086 7914 * we continue with the attach anyway.
8087 7915 */
8088 7916 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8089 7917 "sd_unit_attach: un:0x%p spin-up reservation "
8090 7918 "conflict.\n", un);
8091 7919 reservation_flag = SD_TARGET_IS_RESERVED;
8092 7920 break;
8093 7921 default:
8094 7922 /* Fail the attach if the spin-up failed. */
8095 7923 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8096 7924 "sd_unit_attach: un:0x%p spin-up failed.", un);
8097 7925 goto spinup_failed;
8098 7926 }
8099 7927
8100 7928 }
8101 7929
8102 7930 /*
8103 7931 * Check to see if this is a MMC drive
8104 7932 */
8105 7933 if (ISCD(un)) {
8106 7934 sd_set_mmc_caps(ssc);
8107 7935 }
8108 7936
8109 7937 /*
8110 7938 * Add a zero-length attribute to tell the world we support
8111 7939 * kernel ioctls (for layered drivers)
8112 7940 */
8113 7941 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8114 7942 DDI_KERNEL_IOCTL, NULL, 0);
8115 7943
8116 7944 /*
8117 7945 * Add a boolean property to tell the world we support
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
8118 7946 * the B_FAILFAST flag (for layered drivers)
8119 7947 */
8120 7948 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8121 7949 "ddi-failfast-supported", NULL, 0);
8122 7950
8123 7951 /*
8124 7952 * Initialize power management
8125 7953 */
8126 7954 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
8127 7955 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
7956 +#ifdef notyet
8128 7957 sd_setup_pm(ssc, devi);
7958 +#endif
8129 7959 if (un->un_f_pm_is_enabled == FALSE) {
8130 7960 /*
8131 7961 * For performance, point to a jump table that does
8132 7962 * not include pm.
8133 7963 * The direct and priority chains don't change with PM.
8134 7964 *
8135 7965 * Note: this is currently done based on individual device
8136 7966 * capabilities. When an interface for determining system
8137 7967 * power enabled state becomes available, or when additional
8138 7968 * layers are added to the command chain, these values will
8139 7969 * have to be re-evaluated for correctness.
8140 7970 */
8141 7971 if (un->un_f_non_devbsize_supported) {
8142 7972 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM;
8143 7973 } else {
8144 7974 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM;
8145 7975 }
8146 7976 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
8147 7977 }
8148 7978
8149 7979 /*
8150 7980 * This property is set to 0 by HA software to avoid retries
8151 7981 * on a reserved disk. (The preferred property name is
8152 7982 * "retry-on-reservation-conflict") (1189689)
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
8153 7983 *
8154 7984 * Note: The use of a global here can have unintended consequences. A
8155 7985 * per instance variable is preferable to match the capabilities of
8156 7986 * different underlying hba's (4402600)
8157 7987 */
8158 7988 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
8159 7989 DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
8160 7990 sd_retry_on_reservation_conflict);
8161 7991 if (sd_retry_on_reservation_conflict != 0) {
8162 7992 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
8163 - devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
7993 + devi, DDI_PROP_DONTPASS, "sd_retry_on_reservation_conflict",
8164 7994 sd_retry_on_reservation_conflict);
8165 7995 }
8166 7996
8167 7997 /* Set up options for QFULL handling. */
8168 7998 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8169 7999 "qfull-retries", -1)) != -1) {
8170 8000 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
8171 8001 rval, 1);
8172 8002 }
8173 8003 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8174 8004 "qfull-retry-interval", -1)) != -1) {
8175 8005 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval",
8176 8006 rval, 1);
8177 8007 }
8178 8008
8179 8009 /*
8180 8010 * This just prints a message that announces the existence of the
8181 8011 * device. The message is always printed in the system logfile, but
8182 8012 * only appears on the console if the system is booted with the
8183 8013 * -v (verbose) argument.
8184 8014 */
8185 8015 ddi_report_dev(devi);
8186 8016
8187 8017 un->un_mediastate = DKIO_NONE;
8188 8018
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
8189 8019 /*
8190 8020 * Check Block Device Characteristics VPD.
8191 8021 */
8192 8022 sd_check_bdc_vpd(ssc);
8193 8023
8194 8024 /*
8195 8025 * Check whether the drive is in emulation mode.
8196 8026 */
8197 8027 sd_check_emulation_mode(ssc);
8198 8028
8199 - cmlb_alloc_handle(&un->un_cmlbhandle);
8200 -
8201 -#if defined(__i386) || defined(__amd64)
8202 - /*
8203 - * On x86, compensate for off-by-1 legacy error
8204 - */
8029 + /* Compensate for off-by-1 legacy error */
8205 8030 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
8206 8031 (lbasize == un->un_sys_blocksize))
8207 - offbyone = CMLB_OFF_BY_ONE;
8208 -#endif
8032 + cmlb_workaround_off_by_one(un->un_cmlbhandle);
8209 8033
8210 - if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
8211 - VOID2BOOLEAN(un->un_f_has_removable_media != 0),
8212 - VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
8213 - un->un_node_type, offbyone, un->un_cmlbhandle,
8214 - (void *)SD_PATH_DIRECT) != 0) {
8215 - goto cmlb_attach_failed;
8216 - }
8217 -
8218 -
8219 8034 /*
8220 8035 * Read and validate the device's geometry (ie, disk label)
8221 8036 * A new unformatted drive will not have a valid geometry, but
8222 8037 * the driver needs to successfully attach to this device so
8223 8038 * the drive can be formatted via ioctls.
8224 8039 */
8225 8040 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
8226 8041 (void *)SD_PATH_DIRECT) == 0) ? 1: 0;
8227 8042
8228 8043 mutex_enter(SD_MUTEX(un));
8229 8044
8230 8045 /*
8231 - * Read and initialize the devid for the unit.
8046 + * Read and initialize the devid for the unit if not done already.
8232 8047 */
8233 - if (un->un_f_devid_supported) {
8048 + if (un->un_f_devid_supported && un->un_devid == NULL) {
8234 8049 sd_register_devid(ssc, devi, reservation_flag);
8235 8050 }
8236 8051 mutex_exit(SD_MUTEX(un));
8237 8052
8238 -#if (defined(__fibre))
8239 - /*
8240 - * Register callbacks for fibre only. You can't do this solely
8241 - * on the basis of the devid_type because this is hba specific.
8242 - * We need to query our hba capabilities to find out whether to
8243 - * register or not.
8244 - */
8245 - if (un->un_f_is_fibre) {
8246 - if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
8247 - sd_init_event_callbacks(un);
8248 - SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8249 - "sd_unit_attach: un:0x%p event callbacks inserted",
8250 - un);
8251 - }
8252 - }
8253 -#endif
8254 -
8255 8053 if (un->un_f_opt_disable_cache == TRUE) {
8256 8054 /*
8257 8055 * Disable both read cache and write cache. This is
8258 8056 * the historic behavior of the keywords in the config file.
8259 8057 */
8260 8058 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
8261 8059 0) {
8262 8060 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8263 8061 "sd_unit_attach: un:0x%p Could not disable "
8264 8062 "caching", un);
8265 8063 goto devid_failed;
8266 8064 }
8267 8065 }
8268 8066
8269 8067 /*
8270 8068 * Check the value of the WCE bit and if it's allowed to be changed,
8271 8069 * set un_f_write_cache_enabled and un_f_cache_mode_changeable
8272 8070 * accordingly.
8273 8071 */
8274 8072 (void) sd_get_write_cache_enabled(ssc, &wc_enabled);
8275 8073 sd_get_write_cache_changeable(ssc, &wc_changeable);
8276 8074 mutex_enter(SD_MUTEX(un));
8277 8075 un->un_f_write_cache_enabled = (wc_enabled != 0);
8278 8076 un->un_f_cache_mode_changeable = (wc_changeable != 0);
8279 8077 mutex_exit(SD_MUTEX(un));
8280 8078
8281 8079 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR &&
8282 8080 un->un_tgt_blocksize != DEV_BSIZE) ||
8283 8081 un->un_f_enable_rmw) {
8284 8082 if (!(un->un_wm_cache)) {
8285 8083 (void) snprintf(name_str, sizeof (name_str),
8286 8084 "%s%d_cache",
8287 8085 ddi_driver_name(SD_DEVINFO(un)),
8288 8086 ddi_get_instance(SD_DEVINFO(un)));
8289 8087 un->un_wm_cache = kmem_cache_create(
8290 8088 name_str, sizeof (struct sd_w_map),
8291 8089 8, sd_wm_cache_constructor,
8292 8090 sd_wm_cache_destructor, NULL,
8293 8091 (void *)un, NULL, 0);
8294 8092 if (!(un->un_wm_cache)) {
8295 8093 goto wm_cache_failed;
8296 8094 }
8297 8095 }
8298 8096 }
8299 8097
8300 8098 /*
8301 8099 * Check the value of the NV_SUP bit and set
8302 8100 * un_f_suppress_cache_flush accordingly.
8303 8101 */
8304 8102 sd_get_nv_sup(ssc);
8305 8103
8306 8104 /*
8307 8105 * Find out what type of reservation this disk supports.
8308 8106 */
8309 8107 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL);
8310 8108
8311 8109 switch (status) {
8312 8110 case 0:
8313 8111 /*
8314 8112 * SCSI-3 reservations are supported.
8315 8113 */
8316 8114 un->un_reservation_type = SD_SCSI3_RESERVATION;
8317 8115 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8318 8116 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un);
8319 8117 break;
8320 8118 case ENOTSUP:
8321 8119 /*
8322 8120 * The PERSISTENT RESERVE IN command would not be recognized by
8323 8121 * a SCSI-2 device, so assume the reservation type is SCSI-2.
8324 8122 */
8325 8123 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8326 8124 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un);
8327 8125 un->un_reservation_type = SD_SCSI2_RESERVATION;
8328 8126
8329 8127 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8330 8128 break;
8331 8129 default:
8332 8130 /*
8333 8131 * default to SCSI-3 reservations
8334 8132 */
8335 8133 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8336 8134 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un);
8337 8135 un->un_reservation_type = SD_SCSI3_RESERVATION;
8338 8136
8339 8137 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8340 8138 break;
8341 8139 }
8342 8140
8343 8141 /*
8344 8142 * Set the pstat and error stat values here, so data obtained during the
8345 8143 * previous attach-time routines is available.
8346 8144 *
8347 8145 * Note: This is a critical sequence that needs to be maintained:
8348 8146 * 1) Instantiate the kstats before any routines using the iopath
8349 8147 * (i.e. sd_send_scsi_cmd).
8350 8148 * 2) Initialize the error stats (sd_set_errstats) and partition
8351 8149 * stats (sd_set_pstats)here, following
8352 8150 * cmlb_validate_geometry(), sd_register_devid(), and
8353 8151 * sd_cache_control().
8354 8152 */
8355 8153
|
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
8356 8154 if (un->un_f_pkstats_enabled && geom_label_valid) {
8357 8155 sd_set_pstats(un);
8358 8156 SD_TRACE(SD_LOG_IO_PARTITION, un,
8359 8157 "sd_unit_attach: un:0x%p pstats created and set\n", un);
8360 8158 }
8361 8159
8362 8160 sd_set_errstats(un);
8363 8161 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8364 8162 "sd_unit_attach: un:0x%p errstats set\n", un);
8365 8163
8164 + sd_setup_blk_limits(ssc);
8366 8165
8367 8166 /*
8368 8167 * After successfully attaching an instance, we record the information
8369 8168 * of how many luns have been attached on the relative target and
8370 8169 * controller for parallel SCSI. This information is used when sd tries
8371 8170 * to set the tagged queuing capability in HBA.
8372 8171 */
8373 8172 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8374 8173 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
8375 8174 }
8376 8175
8377 8176 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8378 8177 "sd_unit_attach: un:0x%p exit success\n", un);
8379 8178
8380 8179 /* Uninitialize sd_ssc_t pointer */
8381 8180 sd_ssc_fini(ssc);
8382 8181
8383 - return (DDI_SUCCESS);
8182 + /* attach finished, switch to SD_STATE_NORMAL */
8183 + mutex_enter(SD_MUTEX(un));
8184 + New_state(un, SD_STATE_NORMAL);
8185 + cv_broadcast(&un->un_suspend_cv);
8186 + mutex_exit(SD_MUTEX(un));
8384 8187
8188 + return;
8189 +
8385 8190 /*
8386 8191 * An error occurred during the attach; clean up & return failure.
8387 8192 */
8193 +
8388 8194 wm_cache_failed:
8389 8195 devid_failed:
8390 - ddi_remove_minor_node(devi, NULL);
8391 -
8392 -cmlb_attach_failed:
8393 8196 /*
8394 8197 * Cleanup from the scsi_ifsetcap() calls (437868)
8395 8198 */
8396 8199 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8397 8200 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8398 8201
8399 8202 /*
8400 8203 * Refer to the comments of setting tagged-qing in the beginning of
8401 8204 * sd_unit_attach. We can only disable tagged queuing when there is
8402 8205 * no lun attached on the target.
8403 8206 */
8404 8207 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
8405 8208 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8406 8209 }
8407 8210
8408 8211 if (un->un_f_is_fibre == FALSE) {
8409 8212 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8410 8213 }
8411 8214
8412 8215 spinup_failed:
8413 -
8414 - /* Uninitialize sd_ssc_t pointer */
8415 - sd_ssc_fini(ssc);
8416 -
8216 + /* attach failed, switch to SD_STATE_ATTACH_FAILED */
8417 8217 mutex_enter(SD_MUTEX(un));
8418 -
8419 - /* Deallocate SCSI FMA memory spaces */
8420 - kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8421 -
8422 - /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8423 - if (un->un_direct_priority_timeid != NULL) {
8424 - timeout_id_t temp_id = un->un_direct_priority_timeid;
8425 - un->un_direct_priority_timeid = NULL;
8426 - mutex_exit(SD_MUTEX(un));
8427 - (void) untimeout(temp_id);
8428 - mutex_enter(SD_MUTEX(un));
8429 - }
8430 -
8431 - /* Cancel any pending start/stop timeouts */
8432 - if (un->un_startstop_timeid != NULL) {
8433 - timeout_id_t temp_id = un->un_startstop_timeid;
8434 - un->un_startstop_timeid = NULL;
8435 - mutex_exit(SD_MUTEX(un));
8436 - (void) untimeout(temp_id);
8437 - mutex_enter(SD_MUTEX(un));
8438 - }
8439 -
8440 - /* Cancel any pending reset-throttle timeouts */
8441 - if (un->un_reset_throttle_timeid != NULL) {
8442 - timeout_id_t temp_id = un->un_reset_throttle_timeid;
8443 - un->un_reset_throttle_timeid = NULL;
8444 - mutex_exit(SD_MUTEX(un));
8445 - (void) untimeout(temp_id);
8446 - mutex_enter(SD_MUTEX(un));
8447 - }
8448 -
8449 - /* Cancel rmw warning message timeouts */
8450 - if (un->un_rmw_msg_timeid != NULL) {
8451 - timeout_id_t temp_id = un->un_rmw_msg_timeid;
8452 - un->un_rmw_msg_timeid = NULL;
8453 - mutex_exit(SD_MUTEX(un));
8454 - (void) untimeout(temp_id);
8455 - mutex_enter(SD_MUTEX(un));
8456 - }
8457 -
8458 - /* Cancel any pending retry timeouts */
8459 - if (un->un_retry_timeid != NULL) {
8460 - timeout_id_t temp_id = un->un_retry_timeid;
8461 - un->un_retry_timeid = NULL;
8462 - mutex_exit(SD_MUTEX(un));
8463 - (void) untimeout(temp_id);
8464 - mutex_enter(SD_MUTEX(un));
8465 - }
8466 -
8467 - /* Cancel any pending delayed cv broadcast timeouts */
8468 - if (un->un_dcvb_timeid != NULL) {
8469 - timeout_id_t temp_id = un->un_dcvb_timeid;
8470 - un->un_dcvb_timeid = NULL;
8471 - mutex_exit(SD_MUTEX(un));
8472 - (void) untimeout(temp_id);
8473 - mutex_enter(SD_MUTEX(un));
8474 - }
8475 -
8218 + New_state(un, SD_STATE_ATTACH_FAILED);
8219 + cv_broadcast(&un->un_suspend_cv);
8476 8220 mutex_exit(SD_MUTEX(un));
8477 8221
8478 - /* There should not be any in-progress I/O so ASSERT this check */
8479 - ASSERT(un->un_ncmds_in_transport == 0);
8480 - ASSERT(un->un_ncmds_in_driver == 0);
8222 + devid = DEVI(devi)->devi_devid_str;
8223 + scsi_fm_ereport_post(un->un_sd, 0,
8224 + NULL, "disk.attach-failure", ssc->ssc_uscsi_info->ui_ena,
8225 + devid, NULL, DDI_NOSLEEP, NULL,
8226 + FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
8227 + DEVID_IF_KNOWN(devid));
8481 8228
8482 - /* Do not free the softstate if the callback routine is active */
8483 - sd_sync_with_callback(un);
8484 -
8485 - /*
8486 - * Partition stats apparently are not used with removables. These would
8487 - * not have been created during attach, so no need to clean them up...
8488 - */
8489 - if (un->un_errstats != NULL) {
8490 - kstat_delete(un->un_errstats);
8491 - un->un_errstats = NULL;
8492 - }
8493 -
8494 -create_errstats_failed:
8495 -
8496 - if (un->un_stats != NULL) {
8497 - kstat_delete(un->un_stats);
8498 - un->un_stats = NULL;
8499 - }
8500 -
8501 - ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8502 - ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8503 -
8504 - ddi_prop_remove_all(devi);
8505 - sema_destroy(&un->un_semoclose);
8506 - cv_destroy(&un->un_state_cv);
8507 -
8508 - sd_free_rqs(un);
8509 -
8510 -alloc_rqs_failed:
8511 -
8512 - devp->sd_private = NULL;
8513 - bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */
8514 -
8515 - /*
8516 - * Note: the man pages are unclear as to whether or not doing a
8517 - * ddi_soft_state_free(sd_state, instance) is the right way to
8518 - * clean up after the ddi_soft_state_zalloc() if the subsequent
8519 - * ddi_get_soft_state() fails. The implication seems to be
8520 - * that the get_soft_state cannot fail if the zalloc succeeds.
8521 - */
8522 -#ifndef XPV_HVM_DRIVER
8523 - ddi_soft_state_free(sd_state, instance);
8524 -#endif /* !XPV_HVM_DRIVER */
8525 -
8526 -probe_failed:
8527 - scsi_unprobe(devp);
8528 -
8529 - return (DDI_FAILURE);
8229 + /* Uninitialize sd_ssc_t pointer */
8230 + sd_ssc_fini(ssc);
8231 + SD_ERROR(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach failed: un: %p",
8232 + (void *)un);
8530 8233 }
8531 8234
8532 8235
8533 8236 /*
8534 8237 * Function: sd_unit_detach
8535 8238 *
8536 8239 * Description: Performs DDI_DETACH processing for sddetach().
8537 8240 *
8538 8241 * Return Code: DDI_SUCCESS
8539 8242 * DDI_FAILURE
8540 8243 *
8541 8244 * Context: Kernel thread context
8542 8245 */
8543 8246
8544 8247 static int
8545 8248 sd_unit_detach(dev_info_t *devi)
8546 8249 {
8547 8250 struct scsi_device *devp;
8548 8251 struct sd_lun *un;
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
8549 8252 int i;
8550 8253 int tgt;
8551 8254 dev_t dev;
8552 8255 dev_info_t *pdip = ddi_get_parent(devi);
8553 8256 int instance = ddi_get_instance(devi);
8554 8257
8555 8258 mutex_enter(&sd_detach_mutex);
8556 8259
8557 8260 /*
8558 8261 * Fail the detach for any of the following:
8559 - * - Unable to get the sd_lun struct for the instance
8560 - * - A layered driver has an outstanding open on the instance
8561 - * - Another thread is already detaching this instance
8562 - * - Another thread is currently performing an open
8262 + * - Unable to get the sd_lun struct for the instance
8263 + * - The instance is still attaching
8264 + * - Another thread is already detaching this instance
8265 + * - Another thread is currently performing an open
8266 + *
8267 + * Additionaly, if "device gone" flag is not set:
8268 + * - There are outstanding commands in driver
8269 + * - There are outstanding commands in transport
8563 8270 */
8564 8271 devp = ddi_get_driver_private(devi);
8565 - if ((devp == NULL) ||
8566 - ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8567 - (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8568 - (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {
8272 + if (devp == NULL || (un = (struct sd_lun *)devp->sd_private) == NULL ||
8273 + un->un_detach_count != 0 || un->un_opens_in_progress != 0 ||
8274 + (!DEVI_IS_GONE(devi) &&
8275 + (un->un_state == SD_STATE_RWAIT ||
8276 + un->un_state == SD_STATE_ATTACHING ||
8277 + un->un_ncmds_in_driver != 0 ||
8278 + un->un_ncmds_in_transport != 0))) {
8569 8279 mutex_exit(&sd_detach_mutex);
8570 8280 return (DDI_FAILURE);
8571 8281 }
8572 8282
8573 - SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
8283 + SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: entry 0x%p\n", __func__, un);
8574 8284
8575 8285 /*
8576 8286 * Mark this instance as currently in a detach, to inhibit any
8577 8287 * opens from a layered driver.
8578 8288 */
8579 8289 un->un_detach_count++;
8580 8290 mutex_exit(&sd_detach_mutex);
8581 8291
8582 8292 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8583 8293 SCSI_ADDR_PROP_TARGET, -1);
8584 8294
8585 8295 dev = sd_make_device(SD_DEVINFO(un));
8586 8296
8587 -#ifndef lint
8588 - _NOTE(COMPETING_THREADS_NOW);
8589 -#endif
8590 -
8591 8297 mutex_enter(SD_MUTEX(un));
8592 8298
8593 8299 /*
8594 8300 * Fail the detach if there are any outstanding layered
8595 8301 * opens on this device.
8596 8302 */
8597 8303 for (i = 0; i < NDKMAP; i++) {
8598 8304 if (un->un_ocmap.lyropen[i] != 0) {
8599 8305 goto err_notclosed;
8600 8306 }
8601 8307 }
8602 8308
8603 8309 /*
8604 - * Verify there are NO outstanding commands issued to this device.
8605 - * ie, un_ncmds_in_transport == 0.
8606 - * It's possible to have outstanding commands through the physio
8607 - * code path, even though everything's closed.
8310 + * If the attach wasn't successful, some normal cleanup work must not
8311 + * be done.
8608 8312 */
8609 - if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8610 - (un->un_direct_priority_timeid != NULL) ||
8611 - (un->un_state == SD_STATE_RWAIT)) {
8313 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
8612 8314 mutex_exit(SD_MUTEX(un));
8613 - SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8614 - "sd_dr_detach: Detach failure due to outstanding cmds\n");
8615 - goto err_stillbusy;
8315 + goto no_attach_cleanup;
8616 8316 }
8617 8317
8618 8318 /*
8619 8319 * If we have the device reserved, release the reservation.
8620 8320 */
8621 - if ((un->un_resvd_status & SD_RESERVE) &&
8321 + if (!DEVI_IS_GONE(devi) &&
8322 + (un->un_resvd_status & SD_RESERVE) &&
8622 8323 !(un->un_resvd_status & SD_LOST_RESERVE)) {
8623 8324 mutex_exit(SD_MUTEX(un));
8624 8325 /*
8625 8326 * Note: sd_reserve_release sends a command to the device
8626 8327 * via the sd_ioctlcmd() path, and can sleep.
8627 8328 */
8628 8329 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8629 8330 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8630 - "sd_dr_detach: Cannot release reservation \n");
8331 + "%s: cannot release reservation\n", __func__);
8631 8332 }
8632 8333 } else {
8633 8334 mutex_exit(SD_MUTEX(un));
8634 8335 }
8635 8336
8636 8337 /*
8637 8338 * Untimeout any reserve recover, throttle reset, restart unit
8638 8339 * and delayed broadcast timeout threads. Protect the timeout pointer
8639 8340 * from getting nulled by their callback functions.
8640 8341 */
8641 8342 mutex_enter(SD_MUTEX(un));
8642 8343 if (un->un_resvd_timeid != NULL) {
8643 8344 timeout_id_t temp_id = un->un_resvd_timeid;
8644 8345 un->un_resvd_timeid = NULL;
8645 8346 mutex_exit(SD_MUTEX(un));
8646 8347 (void) untimeout(temp_id);
8647 8348 mutex_enter(SD_MUTEX(un));
8648 8349 }
8649 8350
8650 8351 if (un->un_reset_throttle_timeid != NULL) {
8651 8352 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8652 8353 un->un_reset_throttle_timeid = NULL;
8653 8354 mutex_exit(SD_MUTEX(un));
8654 8355 (void) untimeout(temp_id);
8655 8356 mutex_enter(SD_MUTEX(un));
8656 8357 }
8657 8358
8658 8359 if (un->un_startstop_timeid != NULL) {
8659 8360 timeout_id_t temp_id = un->un_startstop_timeid;
8660 8361 un->un_startstop_timeid = NULL;
8661 8362 mutex_exit(SD_MUTEX(un));
8662 8363 (void) untimeout(temp_id);
8663 8364 mutex_enter(SD_MUTEX(un));
8664 8365 }
8665 8366
8666 8367 if (un->un_rmw_msg_timeid != NULL) {
8667 8368 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8668 8369 un->un_rmw_msg_timeid = NULL;
8669 8370 mutex_exit(SD_MUTEX(un));
8670 8371 (void) untimeout(temp_id);
8671 8372 mutex_enter(SD_MUTEX(un));
8672 8373 }
8673 8374
8674 8375 if (un->un_dcvb_timeid != NULL) {
8675 8376 timeout_id_t temp_id = un->un_dcvb_timeid;
8676 8377 un->un_dcvb_timeid = NULL;
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
8677 8378 mutex_exit(SD_MUTEX(un));
8678 8379 (void) untimeout(temp_id);
8679 8380 } else {
8680 8381 mutex_exit(SD_MUTEX(un));
8681 8382 }
8682 8383
8683 8384 /* Remove any pending reservation reclaim requests for this device */
8684 8385 sd_rmv_resv_reclaim_req(dev);
8685 8386
8686 8387 mutex_enter(SD_MUTEX(un));
8388 + if (un->un_retry_timeid != NULL) {
8389 + timeout_id_t temp_id = un->un_retry_timeid;
8390 + un->un_retry_timeid = NULL;
8391 + mutex_exit(SD_MUTEX(un));
8392 + (void) untimeout(temp_id);
8393 + mutex_enter(SD_MUTEX(un));
8687 8394
8395 + if (un->un_retry_bp != NULL) {
8396 + un->un_retry_bp->av_forw = un->un_waitq_headp;
8397 + un->un_waitq_headp = un->un_retry_bp;
8398 + if (un->un_waitq_tailp == NULL)
8399 + un->un_waitq_tailp = un->un_retry_bp;
8400 + un->un_retry_bp = NULL;
8401 + un->un_retry_statp = NULL;
8402 + }
8403 + }
8404 +
8405 + if (DEVI_IS_GONE(SD_DEVINFO(un))) {
8406 + /* abort in-flight IO */
8407 + (void) scsi_abort(SD_ADDRESS(un), NULL);
8408 + /* abort pending IO */
8409 + un->un_failfast_state = SD_FAILFAST_ACTIVE;
8410 + un->un_failfast_bp = NULL;
8411 + sd_failfast_flushq(un, B_TRUE);
8412 + }
8413 +
8688 8414 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8689 8415 if (un->un_direct_priority_timeid != NULL) {
8690 8416 timeout_id_t temp_id = un->un_direct_priority_timeid;
8691 8417 un->un_direct_priority_timeid = NULL;
8692 8418 mutex_exit(SD_MUTEX(un));
8693 8419 (void) untimeout(temp_id);
8694 8420 mutex_enter(SD_MUTEX(un));
8695 8421 }
8696 8422
8697 8423 /* Cancel any active multi-host disk watch thread requests */
8698 8424 if (un->un_mhd_token != NULL) {
8699 8425 mutex_exit(SD_MUTEX(un));
8700 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8701 8426 if (scsi_watch_request_terminate(un->un_mhd_token,
8702 8427 SCSI_WATCH_TERMINATE_NOWAIT)) {
8703 8428 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8704 - "sd_dr_detach: Cannot cancel mhd watch request\n");
8429 + "%s: cannot cancel mhd watch request\n", __func__);
8705 8430 /*
8706 8431 * Note: We are returning here after having removed
8707 8432 * some driver timeouts above. This is consistent with
8708 8433 * the legacy implementation but perhaps the watch
8709 8434 * terminate call should be made with the wait flag set.
8710 8435 */
8711 - goto err_stillbusy;
8436 + goto err_remove_event;
8712 8437 }
8713 8438 mutex_enter(SD_MUTEX(un));
8714 8439 un->un_mhd_token = NULL;
8715 8440 }
8716 8441
8717 8442 if (un->un_swr_token != NULL) {
8718 8443 mutex_exit(SD_MUTEX(un));
8719 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8720 8444 if (scsi_watch_request_terminate(un->un_swr_token,
8721 8445 SCSI_WATCH_TERMINATE_NOWAIT)) {
8722 8446 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8723 - "sd_dr_detach: Cannot cancel swr watch request\n");
8447 + "%s: cannot cancel swr watch request\n", __func__);
8724 8448 /*
8725 8449 * Note: We are returning here after having removed
8726 8450 * some driver timeouts above. This is consistent with
8727 8451 * the legacy implementation but perhaps the watch
8728 8452 * terminate call should be made with the wait flag set.
8729 8453 */
8730 - goto err_stillbusy;
8454 + goto err_remove_event;
8731 8455 }
8732 8456 mutex_enter(SD_MUTEX(un));
8733 8457 un->un_swr_token = NULL;
8734 8458 }
8735 8459
8736 - mutex_exit(SD_MUTEX(un));
8737 -
8738 8460 /*
8739 8461 * Clear any scsi_reset_notifies. We clear the reset notifies
8740 8462 * if we have not registered one.
8741 8463 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8742 8464 */
8465 + mutex_exit(SD_MUTEX(un));
8743 8466 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8744 8467 sd_mhd_reset_notify_cb, (caddr_t)un);
8745 8468
8746 - /*
8747 - * protect the timeout pointers from getting nulled by
8748 - * their callback functions during the cancellation process.
8749 - * In such a scenario untimeout can be invoked with a null value.
8750 - */
8751 - _NOTE(NO_COMPETING_THREADS_NOW);
8752 -
8753 8469 mutex_enter(&un->un_pm_mutex);
8754 8470 if (un->un_pm_idle_timeid != NULL) {
8755 8471 timeout_id_t temp_id = un->un_pm_idle_timeid;
8756 8472 un->un_pm_idle_timeid = NULL;
8757 8473 mutex_exit(&un->un_pm_mutex);
8758 8474
8759 8475 /*
8760 8476 * Timeout is active; cancel it.
8761 8477 * Note that it'll never be active on a device
8762 8478 * that does not support PM therefore we don't
8763 8479 * have to check before calling pm_idle_component.
8764 8480 */
8765 8481 (void) untimeout(temp_id);
8766 8482 (void) pm_idle_component(SD_DEVINFO(un), 0);
8767 8483 mutex_enter(&un->un_pm_mutex);
8768 8484 }
8769 8485
8770 8486 /*
8771 8487 * Check whether there is already a timeout scheduled for power
8772 8488 * management. If yes then don't lower the power here, that's.
8773 8489 * the timeout handler's job.
8774 8490 */
8775 8491 if (un->un_pm_timeid != NULL) {
8776 8492 timeout_id_t temp_id = un->un_pm_timeid;
8777 8493 un->un_pm_timeid = NULL;
8778 8494 mutex_exit(&un->un_pm_mutex);
8779 8495 /*
8780 8496 * Timeout is active; cancel it.
8781 8497 * Note that it'll never be active on a device
8782 8498 * that does not support PM therefore we don't
8783 8499 * have to check before calling pm_idle_component.
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
8784 8500 */
8785 8501 (void) untimeout(temp_id);
8786 8502 (void) pm_idle_component(SD_DEVINFO(un), 0);
8787 8503
8788 8504 } else {
8789 8505 mutex_exit(&un->un_pm_mutex);
8790 8506 if ((un->un_f_pm_is_enabled == TRUE) &&
8791 8507 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8792 8508 != DDI_SUCCESS)) {
8793 8509 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8794 - "sd_dr_detach: Lower power request failed, ignoring.\n");
8510 + "%s: lower power request failed, ignoring\n",
8511 + __func__);
8795 8512 /*
8796 - * Fix for bug: 4297749, item # 13
8797 8513 * The above test now includes a check to see if PM is
8798 8514 * supported by this device before call
8799 8515 * pm_lower_power().
8800 8516 * Note, the following is not dead code. The call to
8801 8517 * pm_lower_power above will generate a call back into
8802 8518 * our sdpower routine which might result in a timeout
8803 8519 * handler getting activated. Therefore the following
8804 8520 * code is valid and necessary.
8805 8521 */
8806 8522 mutex_enter(&un->un_pm_mutex);
8807 8523 if (un->un_pm_timeid != NULL) {
8808 8524 timeout_id_t temp_id = un->un_pm_timeid;
8809 8525 un->un_pm_timeid = NULL;
8810 8526 mutex_exit(&un->un_pm_mutex);
8811 8527 (void) untimeout(temp_id);
8812 8528 (void) pm_idle_component(SD_DEVINFO(un), 0);
8813 8529 } else {
8814 8530 mutex_exit(&un->un_pm_mutex);
8815 8531 }
8816 8532 }
8817 8533 }
8818 8534
8819 8535 /*
8820 8536 * Cleanup from the scsi_ifsetcap() calls (437868)
8821 8537 * Relocated here from above to be after the call to
8822 8538 * pm_lower_power, which was getting errors.
8823 8539 */
8824 8540 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8825 8541 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8826 8542
8827 8543 /*
8828 8544 * Currently, tagged queuing is supported per target based by HBA.
8829 8545 * Setting this per lun instance actually sets the capability of this
8830 8546 * target in HBA, which affects those luns already attached on the
8831 8547 * same target. So during detach, we can only disable this capability
8832 8548 * only when this is the only lun left on this target. By doing
8833 8549 * this, we assume a target has the same tagged queuing capability
8834 8550 * for every lun. The condition can be removed when HBA is changed to
8835 8551 * support per lun based tagged queuing capability.
8836 8552 */
8837 8553 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) {
8838 8554 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8839 8555 }
8840 8556
8841 8557 if (un->un_f_is_fibre == FALSE) {
8842 8558 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8843 8559 }
8844 8560
8845 8561 /*
8846 8562 * Remove any event callbacks, fibre only
8847 8563 */
8848 8564 if (un->un_f_is_fibre == TRUE) {
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
8849 8565 if ((un->un_insert_event != NULL) &&
8850 8566 (ddi_remove_event_handler(un->un_insert_cb_id) !=
8851 8567 DDI_SUCCESS)) {
8852 8568 /*
8853 8569 * Note: We are returning here after having done
8854 8570 * substantial cleanup above. This is consistent
8855 8571 * with the legacy implementation but this may not
8856 8572 * be the right thing to do.
8857 8573 */
8858 8574 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8859 - "sd_dr_detach: Cannot cancel insert event\n");
8575 + "%s: cannot cancel insert event\n", __func__);
8860 8576 goto err_remove_event;
8861 8577 }
8862 8578 un->un_insert_event = NULL;
8863 8579
8864 8580 if ((un->un_remove_event != NULL) &&
8865 8581 (ddi_remove_event_handler(un->un_remove_cb_id) !=
8866 8582 DDI_SUCCESS)) {
8867 8583 /*
8868 8584 * Note: We are returning here after having done
8869 8585 * substantial cleanup above. This is consistent
8870 8586 * with the legacy implementation but this may not
8871 8587 * be the right thing to do.
8872 8588 */
8873 8589 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8874 - "sd_dr_detach: Cannot cancel remove event\n");
8590 + "%s: cannot cancel remove event\n", __func__);
8875 8591 goto err_remove_event;
8876 8592 }
8877 8593 un->un_remove_event = NULL;
8878 8594 }
8879 8595
8880 8596 /* Do not free the softstate if the callback routine is active */
8881 8597 sd_sync_with_callback(un);
8882 8598
8599 +no_attach_cleanup:
8600 + /*
8601 + * The driver must wait, at least attempt to wait, for any commands
8602 + * still in the driver.
8603 + */
8604 + mutex_enter(SD_MUTEX(un));
8605 +
8606 + while (un->un_ncmds_in_driver != 0) {
8607 + clock_t max_delay = ddi_get_lbolt() + SEC_TO_TICK(30);
8608 + un->un_f_detach_waiting = 1;
8609 + if (cv_timedwait(&un->un_detach_cv, SD_MUTEX(un),
8610 + max_delay) == -1) {
8611 + break;
8612 + }
8613 + }
8614 +
8615 + un->un_f_detach_waiting = 0;
8616 + mutex_exit(SD_MUTEX(un));
8617 +
8883 8618 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8884 8619 cmlb_free_handle(&un->un_cmlbhandle);
8885 8620
8886 8621 /*
8887 8622 * Hold the detach mutex here, to make sure that no other threads ever
8888 8623 * can access a (partially) freed soft state structure.
8889 8624 */
8890 8625 mutex_enter(&sd_detach_mutex);
8891 8626
8892 8627 /*
8893 8628 * Clean up the soft state struct.
8894 8629 * Cleanup is done in reverse order of allocs/inits.
8895 8630 * At this point there should be no competing threads anymore.
8896 8631 */
8897 8632
8898 8633 scsi_fm_fini(devp);
8899 8634
8900 8635 /*
8901 8636 * Deallocate memory for SCSI FMA.
8902 8637 */
8903 8638 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8904 8639
8905 8640 /*
8906 8641 * Unregister and free device id if it was not registered
8907 8642 * by the transport.
8908 8643 */
8909 8644 if (un->un_f_devid_transport_defined == FALSE)
8910 8645 ddi_devid_unregister(devi);
8911 8646
8912 8647 /*
8913 8648 * free the devid structure if allocated before (by ddi_devid_init()
8914 8649 * or ddi_devid_get()).
8915 8650 */
8916 8651 if (un->un_devid) {
8917 8652 ddi_devid_free(un->un_devid);
8918 8653 un->un_devid = NULL;
8919 8654 }
8920 8655
8921 8656 /*
8922 8657 * Destroy wmap cache if it exists.
8923 8658 */
8924 8659 if (un->un_wm_cache != NULL) {
8925 8660 kmem_cache_destroy(un->un_wm_cache);
8926 8661 un->un_wm_cache = NULL;
8927 8662 }
8928 8663
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
8929 8664 /*
8930 8665 * kstat cleanup is done in detach for all device types (4363169).
8931 8666 * We do not want to fail detach if the device kstats are not deleted
8932 8667 * since there is a confusion about the devo_refcnt for the device.
8933 8668 * We just delete the kstats and let detach complete successfully.
8934 8669 */
8935 8670 if (un->un_stats != NULL) {
8936 8671 kstat_delete(un->un_stats);
8937 8672 un->un_stats = NULL;
8938 8673 }
8674 + if (un->un_unmapstats != NULL) {
8675 + kstat_delete(un->un_unmapstats_ks);
8676 + un->un_unmapstats_ks = NULL;
8677 + un->un_unmapstats = NULL;
8678 + }
8679 + if (un->un_lat_ksp != NULL) {
8680 + kstat_delete(un->un_lat_ksp);
8681 + un->un_lat_stats = NULL;
8682 + un->un_lat_ksp = NULL;
8683 + }
8939 8684 if (un->un_errstats != NULL) {
8940 8685 kstat_delete(un->un_errstats);
8941 8686 un->un_errstats = NULL;
8942 8687 }
8943 8688
8944 8689 /* Remove partition stats */
8945 8690 if (un->un_f_pkstats_enabled) {
8946 8691 for (i = 0; i < NSDMAP; i++) {
8947 8692 if (un->un_pstats[i] != NULL) {
8948 8693 kstat_delete(un->un_pstats[i]);
8949 8694 un->un_pstats[i] = NULL;
8950 8695 }
8951 8696 }
8952 8697 }
8953 8698
8954 8699 /* Remove xbuf registration */
8955 8700 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8956 8701 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8957 8702
8958 8703 /* Remove driver properties */
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
8959 8704 ddi_prop_remove_all(devi);
8960 8705
8961 8706 mutex_destroy(&un->un_pm_mutex);
8962 8707 cv_destroy(&un->un_pm_busy_cv);
8963 8708
8964 8709 cv_destroy(&un->un_wcc_cv);
8965 8710
8966 8711 /* Open/close semaphore */
8967 8712 sema_destroy(&un->un_semoclose);
8968 8713
8714 + /* Used to wait for outstanding commands */
8715 + cv_destroy(&un->un_detach_cv);
8716 +
8969 8717 /* Removable media condvar. */
8970 8718 cv_destroy(&un->un_state_cv);
8971 8719
8972 8720 /* Suspend/resume condvar. */
8973 8721 cv_destroy(&un->un_suspend_cv);
8974 8722 cv_destroy(&un->un_disk_busy_cv);
8975 8723
8976 8724 sd_free_rqs(un);
8977 8725
8978 8726 /* Free up soft state */
8979 8727 devp->sd_private = NULL;
8980 8728
8981 8729 bzero(un, sizeof (struct sd_lun));
8982 8730
8983 8731 ddi_soft_state_free(sd_state, instance);
8984 8732
8985 8733 mutex_exit(&sd_detach_mutex);
8986 8734
8987 8735 /* This frees up the INQUIRY data associated with the device. */
8988 8736 scsi_unprobe(devp);
8989 8737
8990 8738 /*
8991 8739 * After successfully detaching an instance, we update the information
8992 8740 * of how many luns have been attached in the relative target and
8993 8741 * controller for parallel SCSI. This information is used when sd tries
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
8994 8742 * to set the tagged queuing capability in HBA.
8995 8743 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
8996 8744 * check if the device is parallel SCSI. However, we don't need to
8997 8745 * check here because we've already checked during attach. No device
8998 8746 * that is not parallel SCSI is in the chain.
8999 8747 */
9000 8748 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9001 8749 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9002 8750 }
9003 8751
8752 + ddi_remove_minor_node(devi, NULL);
8753 + (void) devfs_clean(devi, NULL, DV_CLEAN_FORCE);
8754 +
9004 8755 return (DDI_SUCCESS);
9005 8756
9006 8757 err_notclosed:
9007 8758 mutex_exit(SD_MUTEX(un));
9008 8759
9009 -err_stillbusy:
9010 - _NOTE(NO_COMPETING_THREADS_NOW);
9011 -
9012 8760 err_remove_event:
9013 8761 mutex_enter(&sd_detach_mutex);
9014 8762 un->un_detach_count--;
9015 8763 mutex_exit(&sd_detach_mutex);
9016 8764
9017 - SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
8765 + SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: exit failure\n", __func__);
9018 8766 return (DDI_FAILURE);
9019 8767 }
9020 8768
9021 8769
9022 8770 /*
9023 8771 * Function: sd_create_errstats
9024 8772 *
9025 8773 * Description: This routine instantiates the device error stats.
9026 8774 *
9027 8775 * Note: During attach the stats are instantiated first so they are
9028 8776 * available for attach-time routines that utilize the driver
9029 8777 * iopath to send commands to the device. The stats are initialized
9030 8778 * separately so data obtained during some attach-time routines is
9031 8779 * available. (4362483)
9032 8780 *
9033 8781 * Arguments: un - driver soft state (unit) structure
9034 8782 * instance - driver instance
9035 8783 *
9036 8784 * Context: Kernel thread context
9037 8785 */
9038 8786
9039 8787 static void
9040 8788 sd_create_errstats(struct sd_lun *un, int instance)
9041 8789 {
9042 8790 struct sd_errstats *stp;
9043 8791 char kstatmodule_err[KSTAT_STRLEN];
9044 8792 char kstatname[KSTAT_STRLEN];
9045 8793 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t));
9046 8794
9047 8795 ASSERT(un != NULL);
9048 8796
9049 8797 if (un->un_errstats != NULL) {
9050 8798 return;
9051 8799 }
9052 8800
9053 8801 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err),
9054 8802 "%serr", sd_label);
9055 8803 (void) snprintf(kstatname, sizeof (kstatname),
9056 8804 "%s%d,err", sd_label, instance);
9057 8805
9058 8806 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname,
9059 8807 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
9060 8808
9061 8809 if (un->un_errstats == NULL) {
9062 8810 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
9063 8811 "sd_create_errstats: Failed kstat_create\n");
9064 8812 return;
9065 8813 }
9066 8814
9067 8815 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9068 8816 kstat_named_init(&stp->sd_softerrs, "Soft Errors",
9069 8817 KSTAT_DATA_UINT32);
9070 8818 kstat_named_init(&stp->sd_harderrs, "Hard Errors",
9071 8819 KSTAT_DATA_UINT32);
9072 8820 kstat_named_init(&stp->sd_transerrs, "Transport Errors",
9073 8821 KSTAT_DATA_UINT32);
9074 8822 kstat_named_init(&stp->sd_vid, "Vendor",
9075 8823 KSTAT_DATA_CHAR);
9076 8824 kstat_named_init(&stp->sd_pid, "Product",
9077 8825 KSTAT_DATA_CHAR);
9078 8826 kstat_named_init(&stp->sd_revision, "Revision",
9079 8827 KSTAT_DATA_CHAR);
9080 8828 kstat_named_init(&stp->sd_serial, "Serial No",
9081 8829 KSTAT_DATA_CHAR);
9082 8830 kstat_named_init(&stp->sd_capacity, "Size",
9083 8831 KSTAT_DATA_ULONGLONG);
9084 8832 kstat_named_init(&stp->sd_rq_media_err, "Media Error",
9085 8833 KSTAT_DATA_UINT32);
9086 8834 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready",
9087 8835 KSTAT_DATA_UINT32);
9088 8836 kstat_named_init(&stp->sd_rq_nodev_err, "No Device",
9089 8837 KSTAT_DATA_UINT32);
9090 8838 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable",
9091 8839 KSTAT_DATA_UINT32);
9092 8840 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request",
9093 8841 KSTAT_DATA_UINT32);
9094 8842 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis",
9095 8843 KSTAT_DATA_UINT32);
9096 8844
9097 8845 un->un_errstats->ks_private = un;
9098 8846 un->un_errstats->ks_update = nulldev;
9099 8847
9100 8848 kstat_install(un->un_errstats);
9101 8849 }
9102 8850
9103 8851
9104 8852 /*
9105 8853 * Function: sd_set_errstats
9106 8854 *
9107 8855 * Description: This routine sets the value of the vendor id, product id,
9108 8856 * revision, serial number, and capacity device error stats.
9109 8857 *
9110 8858 * Note: During attach the stats are instantiated first so they are
9111 8859 * available for attach-time routines that utilize the driver
9112 8860 * iopath to send commands to the device. The stats are initialized
9113 8861 * separately so data obtained during some attach-time routines is
9114 8862 * available. (4362483)
9115 8863 *
9116 8864 * Arguments: un - driver soft state (unit) structure
9117 8865 *
9118 8866 * Context: Kernel thread context
9119 8867 */
9120 8868
9121 8869 static void
9122 8870 sd_set_errstats(struct sd_lun *un)
9123 8871 {
9124 8872 struct sd_errstats *stp;
9125 8873 char *sn;
9126 8874
9127 8875 ASSERT(un != NULL);
9128 8876 ASSERT(un->un_errstats != NULL);
9129 8877 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9130 8878 ASSERT(stp != NULL);
9131 8879 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8);
9132 8880 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16);
9133 8881 (void) strncpy(stp->sd_revision.value.c,
9134 8882 un->un_sd->sd_inq->inq_revision, 4);
9135 8883
9136 8884 /*
9137 8885 * All the errstats are persistent across detach/attach,
9138 8886 * so reset all the errstats here in case of the hot
9139 8887 * replacement of disk drives, except for not changed
9140 8888 * Sun qualified drives.
9141 8889 */
9142 8890 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) ||
9143 8891 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9144 8892 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) {
9145 8893 stp->sd_softerrs.value.ui32 = 0;
9146 8894 stp->sd_harderrs.value.ui32 = 0;
9147 8895 stp->sd_transerrs.value.ui32 = 0;
9148 8896 stp->sd_rq_media_err.value.ui32 = 0;
9149 8897 stp->sd_rq_ntrdy_err.value.ui32 = 0;
9150 8898 stp->sd_rq_nodev_err.value.ui32 = 0;
9151 8899 stp->sd_rq_recov_err.value.ui32 = 0;
9152 8900 stp->sd_rq_illrq_err.value.ui32 = 0;
9153 8901 stp->sd_rq_pfa_err.value.ui32 = 0;
9154 8902 }
9155 8903
9156 8904 /*
9157 8905 * Set the "Serial No" kstat for Sun qualified drives (indicated by
9158 8906 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid)
9159 8907 * (4376302))
9160 8908 */
9161 8909 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) {
9162 8910 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9163 8911 sizeof (SD_INQUIRY(un)->inq_serial));
9164 8912 } else {
9165 8913 /*
9166 8914 * Set the "Serial No" kstat for non-Sun qualified drives
9167 8915 */
9168 8916 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un),
9169 8917 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9170 8918 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) {
9171 8919 (void) strlcpy(stp->sd_serial.value.c, sn,
9172 8920 sizeof (stp->sd_serial.value.c));
9173 8921 ddi_prop_free(sn);
9174 8922 }
9175 8923 }
9176 8924
9177 8925 if (un->un_f_blockcount_is_valid != TRUE) {
9178 8926 /*
9179 8927 * Set capacity error stat to 0 for no media. This ensures
9180 8928 * a valid capacity is displayed in response to 'iostat -E'
9181 8929 * when no media is present in the device.
9182 8930 */
9183 8931 stp->sd_capacity.value.ui64 = 0;
9184 8932 } else {
9185 8933 /*
9186 8934 * Multiply un_blockcount by un->un_sys_blocksize to get
9187 8935 * capacity.
9188 8936 *
9189 8937 * Note: for non-512 blocksize devices "un_blockcount" has been
9190 8938 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by
9191 8939 * (un_tgt_blocksize / un->un_sys_blocksize).
9192 8940 */
9193 8941 stp->sd_capacity.value.ui64 = (uint64_t)
9194 8942 ((uint64_t)un->un_blockcount * un->un_sys_blocksize);
9195 8943 }
9196 8944 }
9197 8945
9198 8946
9199 8947 /*
9200 8948 * Function: sd_set_pstats
9201 8949 *
9202 8950 * Description: This routine instantiates and initializes the partition
9203 8951 * stats for each partition with more than zero blocks.
9204 8952 * (4363169)
9205 8953 *
9206 8954 * Arguments: un - driver soft state (unit) structure
9207 8955 *
9208 8956 * Context: Kernel thread context
9209 8957 */
9210 8958
9211 8959 static void
9212 8960 sd_set_pstats(struct sd_lun *un)
9213 8961 {
|
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
9214 8962 char kstatname[KSTAT_STRLEN];
9215 8963 int instance;
9216 8964 int i;
9217 8965 diskaddr_t nblks = 0;
9218 8966 char *partname = NULL;
9219 8967
9220 8968 ASSERT(un != NULL);
9221 8969
9222 8970 instance = ddi_get_instance(SD_DEVINFO(un));
9223 8971
9224 - /* Note:x86: is this a VTOC8/VTOC16 difference? */
8972 + /* XXX is this a VTOC8/VTOC16 difference? */
9225 8973 for (i = 0; i < NSDMAP; i++) {
9226 -
9227 8974 if (cmlb_partinfo(un->un_cmlbhandle, i,
9228 8975 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
9229 8976 continue;
9230 8977 mutex_enter(SD_MUTEX(un));
9231 8978
9232 8979 if ((un->un_pstats[i] == NULL) &&
9233 8980 (nblks != 0)) {
9234 8981
9235 8982 (void) snprintf(kstatname, sizeof (kstatname),
9236 8983 "%s%d,%s", sd_label, instance,
9237 8984 partname);
9238 8985
9239 8986 un->un_pstats[i] = kstat_create(sd_label,
9240 8987 instance, kstatname, "partition", KSTAT_TYPE_IO,
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
9241 8988 1, KSTAT_FLAG_PERSISTENT);
9242 8989 if (un->un_pstats[i] != NULL) {
9243 8990 un->un_pstats[i]->ks_lock = SD_MUTEX(un);
9244 8991 kstat_install(un->un_pstats[i]);
9245 8992 }
9246 8993 }
9247 8994 mutex_exit(SD_MUTEX(un));
9248 8995 }
9249 8996 }
9250 8997
9251 -
9252 -#if (defined(__fibre))
9253 8998 /*
9254 - * Function: sd_init_event_callbacks
9255 - *
9256 - * Description: This routine initializes the insertion and removal event
9257 - * callbacks. (fibre only)
9258 - *
9259 - * Arguments: un - driver soft state (unit) structure
9260 - *
9261 - * Context: Kernel thread context
9262 - */
9263 -
9264 -static void
9265 -sd_init_event_callbacks(struct sd_lun *un)
9266 -{
9267 - ASSERT(un != NULL);
9268 -
9269 - if ((un->un_insert_event == NULL) &&
9270 - (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT,
9271 - &un->un_insert_event) == DDI_SUCCESS)) {
9272 - /*
9273 - * Add the callback for an insertion event
9274 - */
9275 - (void) ddi_add_event_handler(SD_DEVINFO(un),
9276 - un->un_insert_event, sd_event_callback, (void *)un,
9277 - &(un->un_insert_cb_id));
9278 - }
9279 -
9280 - if ((un->un_remove_event == NULL) &&
9281 - (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT,
9282 - &un->un_remove_event) == DDI_SUCCESS)) {
9283 - /*
9284 - * Add the callback for a removal event
9285 - */
9286 - (void) ddi_add_event_handler(SD_DEVINFO(un),
9287 - un->un_remove_event, sd_event_callback, (void *)un,
9288 - &(un->un_remove_cb_id));
9289 - }
9290 -}
9291 -
9292 -
9293 -/*
9294 - * Function: sd_event_callback
9295 - *
9296 - * Description: This routine handles insert/remove events (photon). The
9297 - * state is changed to OFFLINE which can be used to supress
9298 - * error msgs. (fibre only)
9299 - *
9300 - * Arguments: un - driver soft state (unit) structure
9301 - *
9302 - * Context: Callout thread context
9303 - */
9304 -/* ARGSUSED */
9305 -static void
9306 -sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg,
9307 - void *bus_impldata)
9308 -{
9309 - struct sd_lun *un = (struct sd_lun *)arg;
9310 -
9311 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event));
9312 - if (event == un->un_insert_event) {
9313 - SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event");
9314 - mutex_enter(SD_MUTEX(un));
9315 - if (un->un_state == SD_STATE_OFFLINE) {
9316 - if (un->un_last_state != SD_STATE_SUSPENDED) {
9317 - un->un_state = un->un_last_state;
9318 - } else {
9319 - /*
9320 - * We have gone through SUSPEND/RESUME while
9321 - * we were offline. Restore the last state
9322 - */
9323 - un->un_state = un->un_save_state;
9324 - }
9325 - }
9326 - mutex_exit(SD_MUTEX(un));
9327 -
9328 - _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event));
9329 - } else if (event == un->un_remove_event) {
9330 - SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event");
9331 - mutex_enter(SD_MUTEX(un));
9332 - /*
9333 - * We need to handle an event callback that occurs during
9334 - * the suspend operation, since we don't prevent it.
9335 - */
9336 - if (un->un_state != SD_STATE_OFFLINE) {
9337 - if (un->un_state != SD_STATE_SUSPENDED) {
9338 - New_state(un, SD_STATE_OFFLINE);
9339 - } else {
9340 - un->un_last_state = SD_STATE_OFFLINE;
9341 - }
9342 - }
9343 - mutex_exit(SD_MUTEX(un));
9344 - } else {
9345 - scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
9346 - "!Unknown event\n");
9347 - }
9348 -
9349 -}
9350 -#endif
9351 -
9352 -/*
9353 8999 * Values related to caching mode page depending on whether the unit is ATAPI.
9354 9000 */
9355 9001 #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
9356 9002 CDB_GROUP1 : CDB_GROUP0)
9357 9003 #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
9358 9004 MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH)
9359 9005 /*
9360 9006 * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise
9361 9007 * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching).
9362 9008 */
9363 9009 #define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \
9364 9010 sizeof (struct mode_cache_scsi3))
9365 9011
9366 9012 static int
9367 9013 sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header,
9368 9014 int *bdlen)
9369 9015 {
9370 9016 struct sd_lun *un = ssc->ssc_un;
9371 9017 struct mode_caching *mode_caching_page;
9372 9018 size_t buflen = SDC_BUFLEN(un);
9373 9019 int hdrlen = SDC_HDRLEN(un);
9374 9020 int rval;
9375 9021
9376 9022 /*
9377 9023 * Do a test unit ready, otherwise a mode sense may not work if this
9378 9024 * is the first command sent to the device after boot.
9379 9025 */
9380 9026 if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0)
9381 9027 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9382 9028
9383 9029 /*
9384 9030 * Allocate memory for the retrieved mode page and its headers. Set
9385 9031 * a pointer to the page itself.
9386 9032 */
9387 9033 *header = kmem_zalloc(buflen, KM_SLEEP);
9388 9034
9389 9035 /* Get the information from the device */
9390 9036 rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen,
9391 9037 page_control | MODEPAGE_CACHING, SD_PATH_DIRECT);
9392 9038 if (rval != 0) {
9393 9039 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n",
9394 9040 __func__);
9395 9041 goto mode_sense_failed;
9396 9042 }
9397 9043
9398 9044 /*
9399 9045 * Determine size of Block Descriptors in order to locate
9400 9046 * the mode page data. ATAPI devices return 0, SCSI devices
9401 9047 * should return MODE_BLK_DESC_LENGTH.
9402 9048 */
9403 9049 if (un->un_f_cfg_is_atapi == TRUE) {
9404 9050 struct mode_header_grp2 *mhp =
9405 9051 (struct mode_header_grp2 *)(*header);
9406 9052 *bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9407 9053 } else {
9408 9054 *bdlen = ((struct mode_header *)(*header))->bdesc_length;
9409 9055 }
9410 9056
9411 9057 if (*bdlen > MODE_BLK_DESC_LENGTH) {
9412 9058 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9413 9059 "%s: Mode Sense returned invalid block descriptor length\n",
9414 9060 __func__);
9415 9061 rval = EIO;
9416 9062 goto mode_sense_failed;
9417 9063 }
9418 9064
9419 9065 mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen);
9420 9066 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9421 9067 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9422 9068 "%s: Mode Sense caching page code mismatch %d\n",
9423 9069 __func__, mode_caching_page->mode_page.code);
9424 9070 rval = EIO;
9425 9071 }
9426 9072
9427 9073 mode_sense_failed:
9428 9074 if (rval != 0) {
9429 9075 kmem_free(*header, buflen);
9430 9076 *header = NULL;
9431 9077 *bdlen = 0;
9432 9078 }
9433 9079 return (rval);
9434 9080 }
9435 9081
9436 9082 /*
9437 9083 * Function: sd_cache_control()
9438 9084 *
9439 9085 * Description: This routine is the driver entry point for setting
9440 9086 * read and write caching by modifying the WCE (write cache
9441 9087 * enable) and RCD (read cache disable) bits of mode
9442 9088 * page 8 (MODEPAGE_CACHING).
9443 9089 *
9444 9090 * Arguments: ssc - ssc contains pointer to driver soft state
9445 9091 * (unit) structure for this target.
9446 9092 * rcd_flag - flag for controlling the read cache
9447 9093 * wce_flag - flag for controlling the write cache
9448 9094 *
9449 9095 * Return Code: EIO
9450 9096 * code returned by sd_send_scsi_MODE_SENSE and
9451 9097 * sd_send_scsi_MODE_SELECT
9452 9098 *
9453 9099 * Context: Kernel Thread
9454 9100 */
9455 9101
9456 9102 static int
9457 9103 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag)
9458 9104 {
9459 9105 struct sd_lun *un = ssc->ssc_un;
9460 9106 struct mode_caching *mode_caching_page;
9461 9107 uchar_t *header;
9462 9108 size_t buflen = SDC_BUFLEN(un);
9463 9109 int hdrlen = SDC_HDRLEN(un);
9464 9110 int bdlen;
9465 9111 int rval;
9466 9112
9467 9113 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen);
9468 9114 switch (rval) {
9469 9115 case 0:
9470 9116 /* Check the relevant bits on successful mode sense */
9471 9117 mode_caching_page = (struct mode_caching *)(header + hdrlen +
9472 9118 bdlen);
9473 9119 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) ||
9474 9120 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) ||
9475 9121 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) ||
9476 9122 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) {
9477 9123 size_t sbuflen;
9478 9124 uchar_t save_pg;
9479 9125
9480 9126 /*
9481 9127 * Construct select buffer length based on the
9482 9128 * length of the sense data returned.
9483 9129 */
9484 9130 sbuflen = hdrlen + bdlen + sizeof (struct mode_page) +
9485 9131 (int)mode_caching_page->mode_page.length;
9486 9132
9487 9133 /* Set the caching bits as requested */
9488 9134 if (rcd_flag == SD_CACHE_ENABLE)
9489 9135 mode_caching_page->rcd = 0;
9490 9136 else if (rcd_flag == SD_CACHE_DISABLE)
9491 9137 mode_caching_page->rcd = 1;
9492 9138
9493 9139 if (wce_flag == SD_CACHE_ENABLE)
9494 9140 mode_caching_page->wce = 1;
9495 9141 else if (wce_flag == SD_CACHE_DISABLE)
9496 9142 mode_caching_page->wce = 0;
9497 9143
9498 9144 /*
9499 9145 * Save the page if the mode sense says the
9500 9146 * drive supports it.
9501 9147 */
9502 9148 save_pg = mode_caching_page->mode_page.ps ?
9503 9149 SD_SAVE_PAGE : SD_DONTSAVE_PAGE;
9504 9150
9505 9151 /* Clear reserved bits before mode select */
9506 9152 mode_caching_page->mode_page.ps = 0;
9507 9153
9508 9154 /*
9509 9155 * Clear out mode header for mode select.
9510 9156 * The rest of the retrieved page will be reused.
9511 9157 */
9512 9158 bzero(header, hdrlen);
9513 9159
9514 9160 if (un->un_f_cfg_is_atapi == TRUE) {
9515 9161 struct mode_header_grp2 *mhp =
9516 9162 (struct mode_header_grp2 *)header;
9517 9163 mhp->bdesc_length_hi = bdlen >> 8;
9518 9164 mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff;
9519 9165 } else {
9520 9166 ((struct mode_header *)header)->bdesc_length =
9521 9167 bdlen;
9522 9168 }
9523 9169
9524 9170 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9525 9171
9526 9172 /* Issue mode select to change the cache settings */
9527 9173 rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un),
9528 9174 header, sbuflen, save_pg, SD_PATH_DIRECT);
9529 9175 }
9530 9176 kmem_free(header, buflen);
9531 9177 break;
9532 9178 case EIO:
9533 9179 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9534 9180 break;
9535 9181 default:
9536 9182 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9537 9183 break;
9538 9184 }
9539 9185
9540 9186 return (rval);
9541 9187 }
9542 9188
9543 9189
9544 9190 /*
9545 9191 * Function: sd_get_write_cache_enabled()
9546 9192 *
9547 9193 * Description: This routine is the driver entry point for determining if write
9548 9194 * caching is enabled. It examines the WCE (write cache enable)
9549 9195 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field
9550 9196 * bits set to MODEPAGE_CURRENT.
9551 9197 *
9552 9198 * Arguments: ssc - ssc contains pointer to driver soft state
9553 9199 * (unit) structure for this target.
9554 9200 * is_enabled - pointer to int where write cache enabled state
9555 9201 * is returned (non-zero -> write cache enabled)
9556 9202 *
9557 9203 * Return Code: EIO
9558 9204 * code returned by sd_send_scsi_MODE_SENSE
9559 9205 *
9560 9206 * Context: Kernel Thread
9561 9207 *
9562 9208 * NOTE: If ioctl is added to disable write cache, this sequence should
9563 9209 * be followed so that no locking is required for accesses to
9564 9210 * un->un_f_write_cache_enabled:
9565 9211 * do mode select to clear wce
9566 9212 * do synchronize cache to flush cache
9567 9213 * set un->un_f_write_cache_enabled = FALSE
9568 9214 *
9569 9215 * Conversely, an ioctl to enable the write cache should be done
9570 9216 * in this order:
9571 9217 * set un->un_f_write_cache_enabled = TRUE
9572 9218 * do mode select to set wce
9573 9219 */
9574 9220
9575 9221 static int
9576 9222 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled)
9577 9223 {
9578 9224 struct sd_lun *un = ssc->ssc_un;
9579 9225 struct mode_caching *mode_caching_page;
9580 9226 uchar_t *header;
9581 9227 size_t buflen = SDC_BUFLEN(un);
9582 9228 int hdrlen = SDC_HDRLEN(un);
9583 9229 int bdlen;
9584 9230 int rval;
9585 9231
9586 9232 /* In case of error, flag as enabled */
9587 9233 *is_enabled = TRUE;
9588 9234
9589 9235 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen);
9590 9236 switch (rval) {
9591 9237 case 0:
9592 9238 mode_caching_page = (struct mode_caching *)(header + hdrlen +
9593 9239 bdlen);
9594 9240 *is_enabled = mode_caching_page->wce;
9595 9241 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9596 9242 kmem_free(header, buflen);
9597 9243 break;
9598 9244 case EIO: {
9599 9245 /*
9600 9246 * Some disks do not support Mode Sense(6), we
9601 9247 * should ignore this kind of error (sense key is
9602 9248 * 0x5 - illegal request).
9603 9249 */
9604 9250 uint8_t *sensep;
9605 9251 int senlen;
9606 9252
9607 9253 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
9608 9254 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
9609 9255 ssc->ssc_uscsi_cmd->uscsi_rqresid);
9610 9256
9611 9257 if (senlen > 0 &&
9612 9258 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
9613 9259 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
9614 9260 } else {
9615 9261 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9616 9262 }
9617 9263 break;
9618 9264 }
9619 9265 default:
9620 9266 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9621 9267 break;
9622 9268 }
9623 9269
9624 9270 return (rval);
9625 9271 }
9626 9272
9627 9273 /*
9628 9274 * Function: sd_get_write_cache_changeable()
9629 9275 *
9630 9276 * Description: This routine is the driver entry point for determining if write
9631 9277 * caching is changeable. It examines the WCE (write cache enable)
9632 9278 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field
9633 9279 * bits set to MODEPAGE_CHANGEABLE.
9634 9280 *
9635 9281 * Arguments: ssc - ssc contains pointer to driver soft state
9636 9282 * (unit) structure for this target.
9637 9283 * is_changeable - pointer to int where write cache changeable
9638 9284 * state is returned (non-zero -> write cache
9639 9285 * changeable)
9640 9286 *
9641 9287 * Context: Kernel Thread
9642 9288 */
9643 9289
9644 9290 static void
9645 9291 sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable)
9646 9292 {
9647 9293 struct sd_lun *un = ssc->ssc_un;
9648 9294 struct mode_caching *mode_caching_page;
9649 9295 uchar_t *header;
9650 9296 size_t buflen = SDC_BUFLEN(un);
9651 9297 int hdrlen = SDC_HDRLEN(un);
9652 9298 int bdlen;
9653 9299 int rval;
9654 9300
9655 9301 /* In case of error, flag as enabled */
9656 9302 *is_changeable = TRUE;
9657 9303
9658 9304 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header,
9659 9305 &bdlen);
9660 9306 switch (rval) {
9661 9307 case 0:
9662 9308 mode_caching_page = (struct mode_caching *)(header + hdrlen +
9663 9309 bdlen);
9664 9310 *is_changeable = mode_caching_page->wce;
9665 9311 kmem_free(header, buflen);
9666 9312 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9667 9313 break;
9668 9314 case EIO:
9669 9315 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9670 9316 break;
9671 9317 default:
9672 9318 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9673 9319 break;
9674 9320 }
9675 9321 }
9676 9322
9677 9323 /*
9678 9324 * Function: sd_get_nv_sup()
9679 9325 *
9680 9326 * Description: This routine is the driver entry point for
9681 9327 * determining whether non-volatile cache is supported. This
9682 9328 * determination process works as follows:
9683 9329 *
9684 9330 * 1. sd first queries sd.conf on whether
9685 9331 * suppress_cache_flush bit is set for this device.
9686 9332 *
9687 9333 * 2. if not there, then queries the internal disk table.
9688 9334 *
9689 9335 * 3. if either sd.conf or internal disk table specifies
9690 9336 * cache flush be suppressed, we don't bother checking
9691 9337 * NV_SUP bit.
9692 9338 *
9693 9339 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries
9694 9340 * the optional INQUIRY VPD page 0x86. If the device
9695 9341 * supports VPD page 0x86, sd examines the NV_SUP
9696 9342 * (non-volatile cache support) bit in the INQUIRY VPD page
9697 9343 * 0x86:
9698 9344 * o If NV_SUP bit is set, sd assumes the device has a
9699 9345 * non-volatile cache and set the
9700 9346 * un_f_sync_nv_supported to TRUE.
9701 9347 * o Otherwise cache is not non-volatile,
9702 9348 * un_f_sync_nv_supported is set to FALSE.
9703 9349 *
9704 9350 * Arguments: un - driver soft state (unit) structure
9705 9351 *
9706 9352 * Return Code:
9707 9353 *
9708 9354 * Context: Kernel Thread
9709 9355 */
9710 9356
9711 9357 static void
9712 9358 sd_get_nv_sup(sd_ssc_t *ssc)
9713 9359 {
9714 9360 int rval = 0;
9715 9361 uchar_t *inq86 = NULL;
9716 9362 size_t inq86_len = MAX_INQUIRY_SIZE;
9717 9363 size_t inq86_resid = 0;
9718 9364 struct dk_callback *dkc;
9719 9365 struct sd_lun *un;
9720 9366
9721 9367 ASSERT(ssc != NULL);
9722 9368 un = ssc->ssc_un;
9723 9369 ASSERT(un != NULL);
9724 9370
9725 9371 mutex_enter(SD_MUTEX(un));
9726 9372
9727 9373 /*
9728 9374 * Be conservative on the device's support of
9729 9375 * SYNC_NV bit: un_f_sync_nv_supported is
9730 9376 * initialized to be false.
9731 9377 */
9732 9378 un->un_f_sync_nv_supported = FALSE;
9733 9379
9734 9380 /*
9735 9381 * If either sd.conf or internal disk table
9736 9382 * specifies cache flush be suppressed, then
9737 9383 * we don't bother checking NV_SUP bit.
9738 9384 */
9739 9385 if (un->un_f_suppress_cache_flush == TRUE) {
9740 9386 mutex_exit(SD_MUTEX(un));
9741 9387 return;
9742 9388 }
9743 9389
9744 9390 if (sd_check_vpd_page_support(ssc) == 0 &&
|
↓ open down ↓ |
382 lines elided |
↑ open up ↑ |
9745 9391 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
9746 9392 mutex_exit(SD_MUTEX(un));
9747 9393 /* collect page 86 data if available */
9748 9394 inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
9749 9395
9750 9396 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
9751 9397 0x01, 0x86, &inq86_resid);
9752 9398
9753 9399 if (rval == 0 && (inq86_len - inq86_resid > 6)) {
9754 9400 SD_TRACE(SD_LOG_COMMON, un,
9755 - "sd_get_nv_sup: \
9756 - successfully get VPD page: %x \
9757 - PAGE LENGTH: %x BYTE 6: %x\n",
9401 + "sd_get_nv_sup: successfully get VPD page: %x "
9402 + "PAGE LENGTH: %x BYTE 6: %x\n",
9758 9403 inq86[1], inq86[3], inq86[6]);
9759 9404
9760 9405 mutex_enter(SD_MUTEX(un));
9761 9406 /*
9762 9407 * check the value of NV_SUP bit: only if the device
9763 9408 * reports NV_SUP bit to be 1, the
9764 9409 * un_f_sync_nv_supported bit will be set to true.
9765 9410 */
9766 9411 if (inq86[6] & SD_VPD_NV_SUP) {
9767 9412 un->un_f_sync_nv_supported = TRUE;
9768 9413 }
9769 9414 mutex_exit(SD_MUTEX(un));
9770 9415 } else if (rval != 0) {
9771 9416 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9772 9417 }
9773 9418
9774 9419 kmem_free(inq86, inq86_len);
9775 9420 } else {
9776 9421 mutex_exit(SD_MUTEX(un));
9777 9422 }
9778 9423
9779 9424 /*
9780 9425 * Send a SYNC CACHE command to check whether
9781 9426 * SYNC_NV bit is supported. This command should have
9782 9427 * un_f_sync_nv_supported set to correct value.
9783 9428 */
9784 9429 mutex_enter(SD_MUTEX(un));
9785 9430 if (un->un_f_sync_nv_supported) {
9786 9431 mutex_exit(SD_MUTEX(un));
9787 9432 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP);
9788 9433 dkc->dkc_flag = FLUSH_VOLATILE;
9789 9434 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
9790 9435
9791 9436 /*
9792 9437 * Send a TEST UNIT READY command to the device. This should
9793 9438 * clear any outstanding UNIT ATTENTION that may be present.
9794 9439 */
9795 9440 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
9796 9441 if (rval != 0)
9797 9442 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9798 9443
9799 9444 kmem_free(dkc, sizeof (struct dk_callback));
9800 9445 } else {
9801 9446 mutex_exit(SD_MUTEX(un));
9802 9447 }
9803 9448
9804 9449 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \
9805 9450 un_f_suppress_cache_flush is set to %d\n",
9806 9451 un->un_f_suppress_cache_flush);
9807 9452 }
9808 9453
9809 9454 /*
9810 9455 * Function: sd_make_device
9811 9456 *
9812 9457 * Description: Utility routine to return the Solaris device number from
9813 9458 * the data in the device's dev_info structure.
9814 9459 *
9815 9460 * Return Code: The Solaris device number
9816 9461 *
9817 9462 * Context: Any
9818 9463 */
9819 9464
9820 9465 static dev_t
9821 9466 sd_make_device(dev_info_t *devi)
9822 9467 {
9823 9468 return (makedevice(ddi_driver_major(devi),
9824 9469 ddi_get_instance(devi) << SDUNIT_SHIFT));
9825 9470 }
9826 9471
9827 9472
9828 9473 /*
9829 9474 * Function: sd_pm_entry
9830 9475 *
9831 9476 * Description: Called at the start of a new command to manage power
9832 9477 * and busy status of a device. This includes determining whether
9833 9478 * the current power state of the device is sufficient for
9834 9479 * performing the command or whether it must be changed.
9835 9480 * The PM framework is notified appropriately.
9836 9481 * Only with a return status of DDI_SUCCESS will the
9837 9482 * component be busy to the framework.
9838 9483 *
9839 9484 * All callers of sd_pm_entry must check the return status
9840 9485 * and only call sd_pm_exit it it was DDI_SUCCESS. A status
9841 9486 * of DDI_FAILURE indicates the device failed to power up.
9842 9487 * In this case un_pm_count has been adjusted so the result
9843 9488 * on exit is still powered down, ie. count is less than 0.
9844 9489 * Calling sd_pm_exit with this count value hits an ASSERT.
9845 9490 *
9846 9491 * Return Code: DDI_SUCCESS or DDI_FAILURE
9847 9492 *
9848 9493 * Context: Kernel thread context.
9849 9494 */
9850 9495
9851 9496 static int
9852 9497 sd_pm_entry(struct sd_lun *un)
9853 9498 {
9854 9499 int return_status = DDI_SUCCESS;
9855 9500
9856 9501 ASSERT(!mutex_owned(SD_MUTEX(un)));
9857 9502 ASSERT(!mutex_owned(&un->un_pm_mutex));
9858 9503
9859 9504 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n");
9860 9505
9861 9506 if (un->un_f_pm_is_enabled == FALSE) {
9862 9507 SD_TRACE(SD_LOG_IO_PM, un,
9863 9508 "sd_pm_entry: exiting, PM not enabled\n");
9864 9509 return (return_status);
9865 9510 }
9866 9511
9867 9512 /*
9868 9513 * Just increment a counter if PM is enabled. On the transition from
9869 9514 * 0 ==> 1, mark the device as busy. The iodone side will decrement
9870 9515 * the count with each IO and mark the device as idle when the count
9871 9516 * hits 0.
9872 9517 *
9873 9518 * If the count is less than 0 the device is powered down. If a powered
9874 9519 * down device is successfully powered up then the count must be
9875 9520 * incremented to reflect the power up. Note that it'll get incremented
9876 9521 * a second time to become busy.
9877 9522 *
9878 9523 * Because the following has the potential to change the device state
9879 9524 * and must release the un_pm_mutex to do so, only one thread can be
9880 9525 * allowed through at a time.
9881 9526 */
9882 9527
9883 9528 mutex_enter(&un->un_pm_mutex);
9884 9529 while (un->un_pm_busy == TRUE) {
9885 9530 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex);
9886 9531 }
9887 9532 un->un_pm_busy = TRUE;
9888 9533
9889 9534 if (un->un_pm_count < 1) {
9890 9535
9891 9536 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n");
9892 9537
9893 9538 /*
9894 9539 * Indicate we are now busy so the framework won't attempt to
9895 9540 * power down the device. This call will only fail if either
9896 9541 * we passed a bad component number or the device has no
9897 9542 * components. Neither of these should ever happen.
9898 9543 */
9899 9544 mutex_exit(&un->un_pm_mutex);
9900 9545 return_status = pm_busy_component(SD_DEVINFO(un), 0);
9901 9546 ASSERT(return_status == DDI_SUCCESS);
9902 9547
9903 9548 mutex_enter(&un->un_pm_mutex);
9904 9549
9905 9550 if (un->un_pm_count < 0) {
9906 9551 mutex_exit(&un->un_pm_mutex);
9907 9552
9908 9553 SD_TRACE(SD_LOG_IO_PM, un,
9909 9554 "sd_pm_entry: power up component\n");
9910 9555
9911 9556 /*
9912 9557 * pm_raise_power will cause sdpower to be called
9913 9558 * which brings the device power level to the
9914 9559 * desired state, If successful, un_pm_count and
9915 9560 * un_power_level will be updated appropriately.
9916 9561 */
9917 9562 return_status = pm_raise_power(SD_DEVINFO(un), 0,
9918 9563 SD_PM_STATE_ACTIVE(un));
9919 9564
9920 9565 mutex_enter(&un->un_pm_mutex);
9921 9566
9922 9567 if (return_status != DDI_SUCCESS) {
9923 9568 /*
9924 9569 * Power up failed.
9925 9570 * Idle the device and adjust the count
9926 9571 * so the result on exit is that we're
9927 9572 * still powered down, ie. count is less than 0.
9928 9573 */
9929 9574 SD_TRACE(SD_LOG_IO_PM, un,
9930 9575 "sd_pm_entry: power up failed,"
9931 9576 " idle the component\n");
9932 9577
9933 9578 (void) pm_idle_component(SD_DEVINFO(un), 0);
9934 9579 un->un_pm_count--;
9935 9580 } else {
9936 9581 /*
9937 9582 * Device is powered up, verify the
9938 9583 * count is non-negative.
9939 9584 * This is debug only.
9940 9585 */
9941 9586 ASSERT(un->un_pm_count == 0);
9942 9587 }
9943 9588 }
9944 9589
9945 9590 if (return_status == DDI_SUCCESS) {
9946 9591 /*
9947 9592 * For performance, now that the device has been tagged
9948 9593 * as busy, and it's known to be powered up, update the
9949 9594 * chain types to use jump tables that do not include
9950 9595 * pm. This significantly lowers the overhead and
9951 9596 * therefore improves performance.
9952 9597 */
9953 9598
9954 9599 mutex_exit(&un->un_pm_mutex);
9955 9600 mutex_enter(SD_MUTEX(un));
9956 9601 SD_TRACE(SD_LOG_IO_PM, un,
9957 9602 "sd_pm_entry: changing uscsi_chain_type from %d\n",
9958 9603 un->un_uscsi_chain_type);
9959 9604
9960 9605 if (un->un_f_non_devbsize_supported) {
9961 9606 un->un_buf_chain_type =
9962 9607 SD_CHAIN_INFO_RMMEDIA_NO_PM;
9963 9608 } else {
9964 9609 un->un_buf_chain_type =
9965 9610 SD_CHAIN_INFO_DISK_NO_PM;
9966 9611 }
9967 9612 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
9968 9613
9969 9614 SD_TRACE(SD_LOG_IO_PM, un,
9970 9615 " changed uscsi_chain_type to %d\n",
9971 9616 un->un_uscsi_chain_type);
9972 9617 mutex_exit(SD_MUTEX(un));
9973 9618 mutex_enter(&un->un_pm_mutex);
9974 9619
9975 9620 if (un->un_pm_idle_timeid == NULL) {
9976 9621 /* 300 ms. */
9977 9622 un->un_pm_idle_timeid =
9978 9623 timeout(sd_pm_idletimeout_handler, un,
9979 9624 (drv_usectohz((clock_t)300000)));
9980 9625 /*
9981 9626 * Include an extra call to busy which keeps the
9982 9627 * device busy with-respect-to the PM layer
9983 9628 * until the timer fires, at which time it'll
9984 9629 * get the extra idle call.
9985 9630 */
9986 9631 (void) pm_busy_component(SD_DEVINFO(un), 0);
9987 9632 }
9988 9633 }
9989 9634 }
9990 9635 un->un_pm_busy = FALSE;
9991 9636 /* Next... */
9992 9637 cv_signal(&un->un_pm_busy_cv);
9993 9638
9994 9639 un->un_pm_count++;
9995 9640
9996 9641 SD_TRACE(SD_LOG_IO_PM, un,
9997 9642 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count);
9998 9643
9999 9644 mutex_exit(&un->un_pm_mutex);
10000 9645
10001 9646 return (return_status);
10002 9647 }
10003 9648
10004 9649
10005 9650 /*
10006 9651 * Function: sd_pm_exit
10007 9652 *
10008 9653 * Description: Called at the completion of a command to manage busy
10009 9654 * status for the device. If the device becomes idle the
10010 9655 * PM framework is notified.
10011 9656 *
10012 9657 * Context: Kernel thread context
10013 9658 */
10014 9659
10015 9660 static void
10016 9661 sd_pm_exit(struct sd_lun *un)
10017 9662 {
10018 9663 ASSERT(!mutex_owned(SD_MUTEX(un)));
10019 9664 ASSERT(!mutex_owned(&un->un_pm_mutex));
10020 9665
10021 9666 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n");
10022 9667
10023 9668 /*
10024 9669 * After attach the following flag is only read, so don't
10025 9670 * take the penalty of acquiring a mutex for it.
10026 9671 */
10027 9672 if (un->un_f_pm_is_enabled == TRUE) {
10028 9673
10029 9674 mutex_enter(&un->un_pm_mutex);
10030 9675 un->un_pm_count--;
10031 9676
10032 9677 SD_TRACE(SD_LOG_IO_PM, un,
10033 9678 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count);
10034 9679
10035 9680 ASSERT(un->un_pm_count >= 0);
10036 9681 if (un->un_pm_count == 0) {
10037 9682 mutex_exit(&un->un_pm_mutex);
10038 9683
10039 9684 SD_TRACE(SD_LOG_IO_PM, un,
10040 9685 "sd_pm_exit: idle component\n");
10041 9686
10042 9687 (void) pm_idle_component(SD_DEVINFO(un), 0);
10043 9688
10044 9689 } else {
10045 9690 mutex_exit(&un->un_pm_mutex);
10046 9691 }
10047 9692 }
10048 9693
10049 9694 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n");
10050 9695 }
10051 9696
10052 9697
10053 9698 /*
10054 9699 * Function: sdopen
10055 9700 *
10056 9701 * Description: Driver's open(9e) entry point function.
10057 9702 *
10058 9703 * Arguments: dev_i - pointer to device number
10059 9704 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE)
10060 9705 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10061 9706 * cred_p - user credential pointer
10062 9707 *
10063 9708 * Return Code: EINVAL
10064 9709 * ENXIO
10065 9710 * EIO
10066 9711 * EROFS
10067 9712 * EBUSY
10068 9713 *
10069 9714 * Context: Kernel thread context
10070 9715 */
10071 9716 /* ARGSUSED */
10072 9717 static int
10073 9718 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
10074 9719 {
10075 9720 struct sd_lun *un;
10076 9721 int nodelay;
10077 9722 int part;
10078 9723 uint64_t partmask;
10079 9724 int instance;
10080 9725 dev_t dev;
10081 9726 int rval = EIO;
10082 9727 diskaddr_t nblks = 0;
10083 9728 diskaddr_t label_cap;
10084 9729
10085 9730 /* Validate the open type */
10086 9731 if (otyp >= OTYPCNT) {
10087 9732 return (EINVAL);
10088 9733 }
|
↓ open down ↓ |
321 lines elided |
↑ open up ↑ |
10089 9734
10090 9735 dev = *dev_p;
10091 9736 instance = SDUNIT(dev);
10092 9737 mutex_enter(&sd_detach_mutex);
10093 9738
10094 9739 /*
10095 9740 * Fail the open if there is no softstate for the instance, or
10096 9741 * if another thread somewhere is trying to detach the instance.
10097 9742 */
10098 9743 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
10099 - (un->un_detach_count != 0)) {
9744 + un->un_detach_count != 0 || DEVI_IS_GONE(SD_DEVINFO(un))) {
10100 9745 mutex_exit(&sd_detach_mutex);
10101 9746 /*
10102 - * The probe cache only needs to be cleared when open (9e) fails
10103 - * with ENXIO (4238046).
9747 + * The probe cache only needs to be cleared when open (9E) fails
9748 + * with ENXIO.
10104 9749 */
10105 - /*
10106 - * un-conditionally clearing probe cache is ok with
10107 - * separate sd/ssd binaries
10108 - * x86 platform can be an issue with both parallel
10109 - * and fibre in 1 binary
10110 - */
10111 9750 sd_scsi_clear_probe_cache();
10112 9751 return (ENXIO);
10113 9752 }
10114 9753
10115 9754 /*
10116 9755 * The un_layer_count is to prevent another thread in specfs from
10117 9756 * trying to detach the instance, which can happen when we are
10118 9757 * called from a higher-layer driver instead of thru specfs.
10119 9758 * This will not be needed when DDI provides a layered driver
10120 9759 * interface that allows specfs to know that an instance is in
10121 9760 * use by a layered driver & should not be detached.
10122 9761 *
10123 9762 * Note: the semantics for layered driver opens are exactly one
10124 9763 * close for every open.
10125 9764 */
10126 9765 if (otyp == OTYP_LYR) {
10127 9766 un->un_layer_count++;
10128 9767 }
10129 9768
10130 9769 /*
10131 9770 * Keep a count of the current # of opens in progress. This is because
10132 9771 * some layered drivers try to call us as a regular open. This can
10133 9772 * cause problems that we cannot prevent, however by keeping this count
10134 9773 * we can at least keep our open and detach routines from racing against
10135 9774 * each other under such conditions.
10136 9775 */
10137 9776 un->un_opens_in_progress++;
10138 9777 mutex_exit(&sd_detach_mutex);
10139 9778
10140 9779 nodelay = (flag & (FNDELAY | FNONBLOCK));
10141 9780 part = SDPART(dev);
10142 9781 partmask = 1 << part;
10143 9782
10144 9783 /*
10145 9784 * We use a semaphore here in order to serialize
10146 9785 * open and close requests on the device.
10147 9786 */
10148 9787 sema_p(&un->un_semoclose);
10149 9788
10150 9789 mutex_enter(SD_MUTEX(un));
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
10151 9790
10152 9791 /*
10153 9792 * All device accesses go thru sdstrategy() where we check
10154 9793 * on suspend status but there could be a scsi_poll command,
10155 9794 * which bypasses sdstrategy(), so we need to check pm
10156 9795 * status.
10157 9796 */
10158 9797
10159 9798 if (!nodelay) {
10160 9799 while ((un->un_state == SD_STATE_SUSPENDED) ||
10161 - (un->un_state == SD_STATE_PM_CHANGING)) {
9800 + (un->un_state == SD_STATE_PM_CHANGING) ||
9801 + (un->un_state == SD_STATE_ATTACHING)) {
10162 9802 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10163 9803 }
10164 -
10165 9804 mutex_exit(SD_MUTEX(un));
10166 9805 if (sd_pm_entry(un) != DDI_SUCCESS) {
10167 9806 rval = EIO;
10168 9807 SD_ERROR(SD_LOG_OPEN_CLOSE, un,
10169 9808 "sdopen: sd_pm_entry failed\n");
10170 9809 goto open_failed_with_pm;
10171 9810 }
10172 9811 mutex_enter(SD_MUTEX(un));
9812 + } else if (un->un_state == SD_STATE_ATTACH_FAILED) {
9813 + mutex_exit(SD_MUTEX(un));
9814 + rval = EIO;
9815 + SD_ERROR(SD_LOG_OPEN_CLOSE, un,
9816 + "sdopen: attach failed, can't open\n");
9817 + goto open_failed_not_attached;
10173 9818 }
10174 9819
10175 9820 /* check for previous exclusive open */
10176 9821 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
10177 9822 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10178 9823 "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10179 9824 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
10180 9825
10181 9826 if (un->un_exclopen & (partmask)) {
10182 9827 goto excl_open_fail;
10183 9828 }
10184 9829
10185 9830 if (flag & FEXCL) {
10186 9831 int i;
10187 9832 if (un->un_ocmap.lyropen[part]) {
10188 9833 goto excl_open_fail;
10189 9834 }
10190 9835 for (i = 0; i < (OTYPCNT - 1); i++) {
10191 9836 if (un->un_ocmap.regopen[i] & (partmask)) {
10192 9837 goto excl_open_fail;
10193 9838 }
10194 9839 }
10195 9840 }
10196 9841
10197 9842 /*
10198 9843 * Check the write permission if this is a removable media device,
10199 9844 * NDELAY has not been set, and writable permission is requested.
10200 9845 *
10201 9846 * Note: If NDELAY was set and this is write-protected media the WRITE
10202 9847 * attempt will fail with EIO as part of the I/O processing. This is a
10203 9848 * more permissive implementation that allows the open to succeed and
10204 9849 * WRITE attempts to fail when appropriate.
10205 9850 */
10206 9851 if (un->un_f_chk_wp_open) {
10207 9852 if ((flag & FWRITE) && (!nodelay)) {
10208 9853 mutex_exit(SD_MUTEX(un));
10209 9854 /*
10210 9855 * Defer the check for write permission on writable
10211 9856 * DVD drive till sdstrategy and will not fail open even
10212 9857 * if FWRITE is set as the device can be writable
10213 9858 * depending upon the media and the media can change
10214 9859 * after the call to open().
10215 9860 */
10216 9861 if (un->un_f_dvdram_writable_device == FALSE) {
10217 9862 if (ISCD(un) || sr_check_wp(dev)) {
10218 9863 rval = EROFS;
10219 9864 mutex_enter(SD_MUTEX(un));
10220 9865 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10221 9866 "write to cd or write protected media\n");
10222 9867 goto open_fail;
10223 9868 }
10224 9869 }
10225 9870 mutex_enter(SD_MUTEX(un));
10226 9871 }
10227 9872 }
10228 9873
10229 9874 /*
10230 9875 * If opening in NDELAY/NONBLOCK mode, just return.
10231 9876 * Check if disk is ready and has a valid geometry later.
10232 9877 */
10233 9878 if (!nodelay) {
10234 9879 sd_ssc_t *ssc;
10235 9880
10236 9881 mutex_exit(SD_MUTEX(un));
10237 9882 ssc = sd_ssc_init(un);
10238 9883 rval = sd_ready_and_valid(ssc, part);
10239 9884 sd_ssc_fini(ssc);
10240 9885 mutex_enter(SD_MUTEX(un));
10241 9886 /*
10242 9887 * Fail if device is not ready or if the number of disk
10243 9888 * blocks is zero or negative for non CD devices.
10244 9889 */
10245 9890
10246 9891 nblks = 0;
10247 9892
10248 9893 if (rval == SD_READY_VALID && (!ISCD(un))) {
10249 9894 /* if cmlb_partinfo fails, nblks remains 0 */
10250 9895 mutex_exit(SD_MUTEX(un));
10251 9896 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks,
10252 9897 NULL, NULL, NULL, (void *)SD_PATH_DIRECT);
|
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
10253 9898 mutex_enter(SD_MUTEX(un));
10254 9899 }
10255 9900
10256 9901 if ((rval != SD_READY_VALID) ||
10257 9902 (!ISCD(un) && nblks <= 0)) {
10258 9903 rval = un->un_f_has_removable_media ? ENXIO : EIO;
10259 9904 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10260 9905 "device not ready or invalid disk block value\n");
10261 9906 goto open_fail;
10262 9907 }
10263 -#if defined(__i386) || defined(__amd64)
10264 9908 } else {
10265 9909 uchar_t *cp;
10266 9910 /*
10267 9911 * x86 requires special nodelay handling, so that p0 is
10268 9912 * always defined and accessible.
10269 9913 * Invalidate geometry only if device is not already open.
10270 9914 */
10271 9915 cp = &un->un_ocmap.chkd[0];
10272 9916 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10273 9917 if (*cp != (uchar_t)0) {
10274 9918 break;
10275 9919 }
10276 9920 cp++;
10277 9921 }
10278 9922 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10279 9923 mutex_exit(SD_MUTEX(un));
10280 9924 cmlb_invalidate(un->un_cmlbhandle,
10281 9925 (void *)SD_PATH_DIRECT);
10282 9926 mutex_enter(SD_MUTEX(un));
10283 9927 }
10284 -
10285 -#endif
10286 9928 }
10287 9929
10288 9930 if (otyp == OTYP_LYR) {
10289 9931 un->un_ocmap.lyropen[part]++;
10290 9932 } else {
10291 9933 un->un_ocmap.regopen[otyp] |= partmask;
10292 9934 }
10293 9935
10294 9936 /* Set up open and exclusive open flags */
10295 9937 if (flag & FEXCL) {
10296 9938 un->un_exclopen |= (partmask);
10297 9939 }
10298 9940
10299 9941 /*
10300 9942 * If the lun is EFI labeled and lun capacity is greater than the
10301 9943 * capacity contained in the label, log a sys-event to notify the
10302 9944 * interested module.
10303 9945 * To avoid an infinite loop of logging sys-event, we only log the
10304 9946 * event when the lun is not opened in NDELAY mode. The event handler
10305 9947 * should open the lun in NDELAY mode.
10306 9948 */
10307 9949 if (!nodelay) {
10308 9950 mutex_exit(SD_MUTEX(un));
10309 9951 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
10310 9952 (void*)SD_PATH_DIRECT) == 0) {
10311 9953 mutex_enter(SD_MUTEX(un));
10312 9954 if (un->un_f_blockcount_is_valid &&
10313 9955 un->un_blockcount > label_cap &&
10314 9956 un->un_f_expnevent == B_FALSE) {
10315 9957 un->un_f_expnevent = B_TRUE;
10316 9958 mutex_exit(SD_MUTEX(un));
10317 9959 sd_log_lun_expansion_event(un,
10318 9960 (nodelay ? KM_NOSLEEP : KM_SLEEP));
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
10319 9961 mutex_enter(SD_MUTEX(un));
10320 9962 }
10321 9963 } else {
10322 9964 mutex_enter(SD_MUTEX(un));
10323 9965 }
10324 9966 }
10325 9967
10326 9968 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10327 9969 "open of part %d type %d\n", part, otyp);
10328 9970
9971 + /*
9972 + * If we made it here, the disk is alive.
9973 + * Make sure it is set to normal state.
9974 + */
9975 + New_state(un, SD_STATE_NORMAL);
9976 +
10329 9977 mutex_exit(SD_MUTEX(un));
10330 9978 if (!nodelay) {
10331 9979 sd_pm_exit(un);
10332 9980 }
10333 9981
10334 9982 sema_v(&un->un_semoclose);
10335 9983
10336 9984 mutex_enter(&sd_detach_mutex);
10337 9985 un->un_opens_in_progress--;
10338 9986 mutex_exit(&sd_detach_mutex);
10339 9987
10340 9988 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n");
10341 9989 return (DDI_SUCCESS);
10342 9990
10343 9991 excl_open_fail:
10344 9992 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n");
10345 9993 rval = EBUSY;
10346 9994
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
10347 9995 open_fail:
10348 9996 mutex_exit(SD_MUTEX(un));
10349 9997
10350 9998 /*
10351 9999 * On a failed open we must exit the pm management.
10352 10000 */
10353 10001 if (!nodelay) {
10354 10002 sd_pm_exit(un);
10355 10003 }
10356 10004 open_failed_with_pm:
10005 +open_failed_not_attached:
10357 10006 sema_v(&un->un_semoclose);
10358 10007
10359 10008 mutex_enter(&sd_detach_mutex);
10360 10009 un->un_opens_in_progress--;
10361 10010 if (otyp == OTYP_LYR) {
10362 10011 un->un_layer_count--;
10363 10012 }
10364 10013 mutex_exit(&sd_detach_mutex);
10365 10014
10366 10015 return (rval);
10367 10016 }
10368 10017
10369 10018
10370 10019 /*
10371 10020 * Function: sdclose
10372 10021 *
10373 10022 * Description: Driver's close(9e) entry point function.
10374 10023 *
10375 10024 * Arguments: dev - device number
10376 10025 * flag - file status flag, informational only
10377 10026 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10378 10027 * cred_p - user credential pointer
10379 10028 *
10380 10029 * Return Code: ENXIO
10381 10030 *
10382 10031 * Context: Kernel thread context
10383 10032 */
10384 10033 /* ARGSUSED */
10385 10034 static int
10386 10035 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
10387 10036 {
10388 10037 struct sd_lun *un;
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
10389 10038 uchar_t *cp;
10390 10039 int part;
10391 10040 int nodelay;
10392 10041 int rval = 0;
10393 10042
10394 10043 /* Validate the open type */
10395 10044 if (otyp >= OTYPCNT) {
10396 10045 return (ENXIO);
10397 10046 }
10398 10047
10048 + /* Hold the detach mutex to allow close to complete */
10049 + mutex_enter(&sd_detach_mutex);
10050 +
10399 10051 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10052 + mutex_exit(&sd_detach_mutex);
10400 10053 return (ENXIO);
10401 10054 }
10402 10055
10403 10056 part = SDPART(dev);
10404 10057 nodelay = flag & (FNDELAY | FNONBLOCK);
10405 10058
10406 10059 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10407 10060 "sdclose: close of part %d type %d\n", part, otyp);
10408 10061
10409 10062 /*
10410 10063 * We use a semaphore here in order to serialize
10411 10064 * open and close requests on the device.
10412 10065 */
10413 10066 sema_p(&un->un_semoclose);
10414 10067
10415 10068 mutex_enter(SD_MUTEX(un));
10416 10069
10417 - /* Don't proceed if power is being changed. */
10418 - while (un->un_state == SD_STATE_PM_CHANGING) {
10070 + /* Don't proceed if power is being changed or we're still attaching. */
10071 + while ((un->un_state == SD_STATE_PM_CHANGING) ||
10072 + (un->un_state == SD_STATE_ATTACHING)) {
10419 10073 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10420 10074 }
10421 10075
10422 10076 if (un->un_exclopen & (1 << part)) {
10423 10077 un->un_exclopen &= ~(1 << part);
10424 10078 }
10425 10079
10426 10080 /* Update the open partition map */
10427 10081 if (otyp == OTYP_LYR) {
10428 10082 un->un_ocmap.lyropen[part] -= 1;
10429 10083 } else {
10430 10084 un->un_ocmap.regopen[otyp] &= ~(1 << part);
10431 10085 }
10432 10086
10433 10087 cp = &un->un_ocmap.chkd[0];
10434 10088 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10435 10089 if (*cp != NULL) {
10436 10090 break;
10437 10091 }
10438 10092 cp++;
10439 10093 }
10440 10094
10441 10095 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10442 10096 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n");
10443 10097
10444 10098 /*
10445 10099 * We avoid persistance upon the last close, and set
10446 10100 * the throttle back to the maximum.
10447 10101 */
10448 10102 un->un_throttle = un->un_saved_throttle;
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
10449 10103
10450 10104 if (un->un_state == SD_STATE_OFFLINE) {
10451 10105 if (un->un_f_is_fibre == FALSE) {
10452 10106 scsi_log(SD_DEVINFO(un), sd_label,
10453 10107 CE_WARN, "offline\n");
10454 10108 }
10455 10109 mutex_exit(SD_MUTEX(un));
10456 10110 cmlb_invalidate(un->un_cmlbhandle,
10457 10111 (void *)SD_PATH_DIRECT);
10458 10112 mutex_enter(SD_MUTEX(un));
10459 -
10460 - } else {
10113 + } else if (un->un_state != SD_STATE_ATTACH_FAILED) {
10461 10114 /*
10462 10115 * Flush any outstanding writes in NVRAM cache.
10463 10116 * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10464 10117 * cmd, it may not work for non-Pluto devices.
10465 10118 * SYNCHRONIZE CACHE is not required for removables,
10466 10119 * except DVD-RAM drives.
10467 10120 *
10468 10121 * Also note: because SYNCHRONIZE CACHE is currently
10469 10122 * the only command issued here that requires the
10470 10123 * drive be powered up, only do the power up before
10471 10124 * sending the Sync Cache command. If additional
10472 10125 * commands are added which require a powered up
10473 10126 * drive, the following sequence may have to change.
10474 - *
10475 - * And finally, note that parallel SCSI on SPARC
10476 - * only issues a Sync Cache to DVD-RAM, a newly
10477 - * supported device.
10478 10127 */
10479 -#if defined(__i386) || defined(__amd64)
10480 - if ((un->un_f_sync_cache_supported &&
10128 + if (!DEVI_IS_GONE(SD_DEVINFO(un)) &&
10129 + ((un->un_f_sync_cache_supported &&
10481 10130 un->un_f_sync_cache_required) ||
10482 - un->un_f_dvdram_writable_device == TRUE) {
10483 -#else
10484 - if (un->un_f_dvdram_writable_device == TRUE) {
10485 -#endif
10131 + un->un_f_dvdram_writable_device == TRUE)) {
10486 10132 mutex_exit(SD_MUTEX(un));
10487 10133 if (sd_pm_entry(un) == DDI_SUCCESS) {
10488 10134 rval =
10489 10135 sd_send_scsi_SYNCHRONIZE_CACHE(un,
10490 10136 NULL);
10491 10137 /* ignore error if not supported */
10492 10138 if (rval == ENOTSUP) {
10493 10139 rval = 0;
10494 10140 } else if (rval != 0) {
10495 10141 rval = EIO;
10496 10142 }
10497 10143 sd_pm_exit(un);
10498 10144 } else {
10499 10145 rval = EIO;
10500 10146 }
10501 10147 mutex_enter(SD_MUTEX(un));
10502 10148 }
10503 10149
10504 10150 /*
10505 10151 * For devices which supports DOOR_LOCK, send an ALLOW
10506 10152 * MEDIA REMOVAL command, but don't get upset if it
10507 10153 * fails. We need to raise the power of the drive before
10508 10154 * we can call sd_send_scsi_DOORLOCK()
10509 10155 */
10510 10156 if (un->un_f_doorlock_supported) {
10511 10157 mutex_exit(SD_MUTEX(un));
10512 10158 if (sd_pm_entry(un) == DDI_SUCCESS) {
10513 10159 sd_ssc_t *ssc;
10514 10160
10515 10161 ssc = sd_ssc_init(un);
10516 10162 rval = sd_send_scsi_DOORLOCK(ssc,
10517 10163 SD_REMOVAL_ALLOW, SD_PATH_DIRECT);
10518 10164 if (rval != 0)
10519 10165 sd_ssc_assessment(ssc,
10520 10166 SD_FMT_IGNORE);
10521 10167 sd_ssc_fini(ssc);
10522 10168
10523 10169 sd_pm_exit(un);
10524 10170 if (ISCD(un) && (rval != 0) &&
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
10525 10171 (nodelay != 0)) {
10526 10172 rval = ENXIO;
10527 10173 }
10528 10174 } else {
10529 10175 rval = EIO;
10530 10176 }
10531 10177 mutex_enter(SD_MUTEX(un));
10532 10178 }
10533 10179
10534 10180 /*
10181 + * Pardon a device that is currently in failfast
10182 + * active state, to not bias a future open.
10183 + */
10184 + un->un_failfast_state = SD_FAILFAST_INACTIVE;
10185 +
10186 + /*
10535 10187 * If a device has removable media, invalidate all
10536 10188 * parameters related to media, such as geometry,
10537 10189 * blocksize, and blockcount.
10538 10190 */
10539 10191 if (un->un_f_has_removable_media) {
10540 10192 sr_ejected(un);
10541 10193 }
10542 10194
10543 10195 /*
10544 10196 * Destroy the cache (if it exists) which was
10545 10197 * allocated for the write maps since this is
10546 10198 * the last close for this media.
10547 10199 */
10548 10200 if (un->un_wm_cache) {
10549 10201 /*
10550 10202 * Check if there are pending commands.
10551 10203 * and if there are give a warning and
10552 10204 * do not destroy the cache.
10553 10205 */
10554 10206 if (un->un_ncmds_in_driver > 0) {
10555 10207 scsi_log(SD_DEVINFO(un),
10556 10208 sd_label, CE_WARN,
10557 10209 "Unable to clean up memory "
10558 10210 "because of pending I/O\n");
10559 10211 } else {
10560 10212 kmem_cache_destroy(
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
10561 10213 un->un_wm_cache);
10562 10214 un->un_wm_cache = NULL;
10563 10215 }
10564 10216 }
10565 10217 }
10566 10218 }
10567 10219
10568 10220 mutex_exit(SD_MUTEX(un));
10569 10221 sema_v(&un->un_semoclose);
10570 10222
10571 - if (otyp == OTYP_LYR) {
10572 - mutex_enter(&sd_detach_mutex);
10573 - /*
10574 - * The detach routine may run when the layer count
10575 - * drops to zero.
10576 - */
10223 + if (otyp == OTYP_LYR)
10577 10224 un->un_layer_count--;
10578 - mutex_exit(&sd_detach_mutex);
10579 - }
10580 10225
10226 + mutex_exit(&sd_detach_mutex);
10227 +
10581 10228 return (rval);
10582 10229 }
10583 10230
10584 10231
10585 10232 /*
10586 10233 * Function: sd_ready_and_valid
10587 10234 *
10588 10235 * Description: Test if device is ready and has a valid geometry.
10589 10236 *
10590 10237 * Arguments: ssc - sd_ssc_t will contain un
10591 10238 * un - driver soft state (unit) structure
10592 10239 *
10593 10240 * Return Code: SD_READY_VALID ready and valid label
10594 10241 * SD_NOT_READY_VALID not ready, no label
10595 10242 * SD_RESERVED_BY_OTHERS reservation conflict
10596 10243 *
10597 10244 * Context: Never called at interrupt context.
10598 10245 */
10599 10246
10600 10247 static int
10601 10248 sd_ready_and_valid(sd_ssc_t *ssc, int part)
10602 10249 {
10603 10250 struct sd_errstats *stp;
10604 10251 uint64_t capacity;
10605 10252 uint_t lbasize;
10606 10253 int rval = SD_READY_VALID;
10607 10254 char name_str[48];
10608 10255 boolean_t is_valid;
10609 10256 struct sd_lun *un;
10610 10257 int status;
10611 10258
10612 10259 ASSERT(ssc != NULL);
10613 10260 un = ssc->ssc_un;
10614 10261 ASSERT(un != NULL);
10615 10262 ASSERT(!mutex_owned(SD_MUTEX(un)));
10616 10263
10617 10264 mutex_enter(SD_MUTEX(un));
10618 10265 /*
10619 10266 * If a device has removable media, we must check if media is
10620 10267 * ready when checking if this device is ready and valid.
10621 10268 */
10622 10269 if (un->un_f_has_removable_media) {
10623 10270 mutex_exit(SD_MUTEX(un));
10624 10271 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10625 10272
10626 10273 if (status != 0) {
10627 10274 rval = SD_NOT_READY_VALID;
10628 10275 mutex_enter(SD_MUTEX(un));
10629 10276
10630 10277 /* Ignore all failed status for removalbe media */
10631 10278 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10632 10279
10633 10280 goto done;
10634 10281 }
10635 10282
10636 10283 is_valid = SD_IS_VALID_LABEL(un);
10637 10284 mutex_enter(SD_MUTEX(un));
10638 10285 if (!is_valid ||
10639 10286 (un->un_f_blockcount_is_valid == FALSE) ||
10640 10287 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
10641 10288
10642 10289 /* capacity has to be read every open. */
10643 10290 mutex_exit(SD_MUTEX(un));
10644 10291 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
10645 10292 &lbasize, SD_PATH_DIRECT);
10646 10293
10647 10294 if (status != 0) {
10648 10295 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10649 10296
10650 10297 cmlb_invalidate(un->un_cmlbhandle,
10651 10298 (void *)SD_PATH_DIRECT);
10652 10299 mutex_enter(SD_MUTEX(un));
10653 10300 rval = SD_NOT_READY_VALID;
10654 10301
10655 10302 goto done;
10656 10303 } else {
10657 10304 mutex_enter(SD_MUTEX(un));
10658 10305 sd_update_block_info(un, lbasize, capacity);
10659 10306 }
10660 10307 }
10661 10308
10662 10309 /*
10663 10310 * Check if the media in the device is writable or not.
10664 10311 */
10665 10312 if (!is_valid && ISCD(un)) {
|
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
10666 10313 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
10667 10314 }
10668 10315
10669 10316 } else {
10670 10317 /*
10671 10318 * Do a test unit ready to clear any unit attention from non-cd
10672 10319 * devices.
10673 10320 */
10674 10321 mutex_exit(SD_MUTEX(un));
10675 10322
10676 - status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10323 + status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
10677 10324 if (status != 0) {
10678 10325 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10679 10326 }
10680 10327
10681 10328 mutex_enter(SD_MUTEX(un));
10682 10329 }
10683 10330
10684 10331
10685 10332 /*
10686 10333 * If this is a non 512 block device, allocate space for
10687 10334 * the wmap cache. This is being done here since every time
10688 10335 * a media is changed this routine will be called and the
10689 10336 * block size is a function of media rather than device.
10690 10337 */
10691 10338 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR ||
10692 10339 un->un_f_non_devbsize_supported) &&
10693 10340 un->un_tgt_blocksize != DEV_BSIZE) ||
10694 10341 un->un_f_enable_rmw) {
10695 10342 if (!(un->un_wm_cache)) {
10696 10343 (void) snprintf(name_str, sizeof (name_str),
10697 10344 "%s%d_cache",
10698 10345 ddi_driver_name(SD_DEVINFO(un)),
10699 10346 ddi_get_instance(SD_DEVINFO(un)));
10700 10347 un->un_wm_cache = kmem_cache_create(
10701 10348 name_str, sizeof (struct sd_w_map),
10702 10349 8, sd_wm_cache_constructor,
10703 10350 sd_wm_cache_destructor, NULL,
10704 10351 (void *)un, NULL, 0);
10705 10352 if (!(un->un_wm_cache)) {
10706 10353 rval = ENOMEM;
10707 10354 goto done;
10708 10355 }
10709 10356 }
10710 10357 }
10711 10358
10712 10359 if (un->un_state == SD_STATE_NORMAL) {
10713 10360 /*
10714 10361 * If the target is not yet ready here (defined by a TUR
10715 10362 * failure), invalidate the geometry and print an 'offline'
10716 10363 * message. This is a legacy message, as the state of the
10717 10364 * target is not actually changed to SD_STATE_OFFLINE.
10718 10365 *
10719 10366 * If the TUR fails for EACCES (Reservation Conflict),
10720 10367 * SD_RESERVED_BY_OTHERS will be returned to indicate
10721 10368 * reservation conflict. If the TUR fails for other
10722 10369 * reasons, SD_NOT_READY_VALID will be returned.
10723 10370 */
10724 10371 int err;
10725 10372
10726 10373 mutex_exit(SD_MUTEX(un));
10727 10374 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10728 10375 mutex_enter(SD_MUTEX(un));
10729 10376
10730 10377 if (err != 0) {
10731 10378 mutex_exit(SD_MUTEX(un));
10732 10379 cmlb_invalidate(un->un_cmlbhandle,
10733 10380 (void *)SD_PATH_DIRECT);
10734 10381 mutex_enter(SD_MUTEX(un));
10735 10382 if (err == EACCES) {
10736 10383 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10737 10384 "reservation conflict\n");
10738 10385 rval = SD_RESERVED_BY_OTHERS;
10739 10386 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10740 10387 } else {
10741 10388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10742 10389 "drive offline\n");
10743 10390 rval = SD_NOT_READY_VALID;
10744 10391 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
10745 10392 }
10746 10393 goto done;
10747 10394 }
10748 10395 }
10749 10396
10750 10397 if (un->un_f_format_in_progress == FALSE) {
10751 10398 mutex_exit(SD_MUTEX(un));
10752 10399
10753 10400 (void) cmlb_validate(un->un_cmlbhandle, 0,
10754 10401 (void *)SD_PATH_DIRECT);
10755 10402 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL,
10756 10403 NULL, (void *) SD_PATH_DIRECT) != 0) {
10757 10404 rval = SD_NOT_READY_VALID;
10758 10405 mutex_enter(SD_MUTEX(un));
10759 10406
10760 10407 goto done;
10761 10408 }
10762 10409 if (un->un_f_pkstats_enabled) {
10763 10410 sd_set_pstats(un);
10764 10411 SD_TRACE(SD_LOG_IO_PARTITION, un,
10765 10412 "sd_ready_and_valid: un:0x%p pstats created and "
10766 10413 "set\n", un);
10767 10414 }
10768 10415 mutex_enter(SD_MUTEX(un));
10769 10416 }
10770 10417
10771 10418 /*
10772 10419 * If this device supports DOOR_LOCK command, try and send
10773 10420 * this command to PREVENT MEDIA REMOVAL, but don't get upset
10774 10421 * if it fails. For a CD, however, it is an error
10775 10422 */
10776 10423 if (un->un_f_doorlock_supported) {
10777 10424 mutex_exit(SD_MUTEX(un));
10778 10425 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
10779 10426 SD_PATH_DIRECT);
10780 10427
10781 10428 if ((status != 0) && ISCD(un)) {
10782 10429 rval = SD_NOT_READY_VALID;
10783 10430 mutex_enter(SD_MUTEX(un));
10784 10431
10785 10432 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10786 10433
10787 10434 goto done;
10788 10435 } else if (status != 0)
10789 10436 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10790 10437 mutex_enter(SD_MUTEX(un));
10791 10438 }
10792 10439
10793 10440 /* The state has changed, inform the media watch routines */
10794 10441 un->un_mediastate = DKIO_INSERTED;
10795 10442 cv_broadcast(&un->un_state_cv);
10796 10443 rval = SD_READY_VALID;
10797 10444
10798 10445 done:
10799 10446
10800 10447 /*
10801 10448 * Initialize the capacity kstat value, if no media previously
10802 10449 * (capacity kstat is 0) and a media has been inserted
10803 10450 * (un_blockcount > 0).
10804 10451 */
10805 10452 if (un->un_errstats != NULL) {
10806 10453 stp = (struct sd_errstats *)un->un_errstats->ks_data;
10807 10454 if ((stp->sd_capacity.value.ui64 == 0) &&
10808 10455 (un->un_f_blockcount_is_valid == TRUE)) {
10809 10456 stp->sd_capacity.value.ui64 =
10810 10457 (uint64_t)((uint64_t)un->un_blockcount *
10811 10458 un->un_sys_blocksize);
10812 10459 }
10813 10460 }
10814 10461
10815 10462 mutex_exit(SD_MUTEX(un));
10816 10463 return (rval);
10817 10464 }
10818 10465
10819 10466
10820 10467 /*
10821 10468 * Function: sdmin
10822 10469 *
10823 10470 * Description: Routine to limit the size of a data transfer. Used in
10824 10471 * conjunction with physio(9F).
10825 10472 *
10826 10473 * Arguments: bp - pointer to the indicated buf(9S) struct.
10827 10474 *
10828 10475 * Context: Kernel thread context.
10829 10476 */
10830 10477
10831 10478 static void
10832 10479 sdmin(struct buf *bp)
10833 10480 {
10834 10481 struct sd_lun *un;
10835 10482 int instance;
10836 10483
10837 10484 instance = SDUNIT(bp->b_edev);
10838 10485
10839 10486 un = ddi_get_soft_state(sd_state, instance);
10840 10487 ASSERT(un != NULL);
10841 10488
10842 10489 /*
10843 10490 * We depend on buf breakup to restrict
10844 10491 * IO size if it is enabled.
10845 10492 */
10846 10493 if (un->un_buf_breakup_supported) {
10847 10494 return;
10848 10495 }
10849 10496
10850 10497 if (bp->b_bcount > un->un_max_xfer_size) {
10851 10498 bp->b_bcount = un->un_max_xfer_size;
10852 10499 }
10853 10500 }
10854 10501
10855 10502
10856 10503 /*
10857 10504 * Function: sdread
10858 10505 *
10859 10506 * Description: Driver's read(9e) entry point function.
10860 10507 *
10861 10508 * Arguments: dev - device number
10862 10509 * uio - structure pointer describing where data is to be stored
10863 10510 * in user's space
10864 10511 * cred_p - user credential pointer
10865 10512 *
10866 10513 * Return Code: ENXIO
10867 10514 * EIO
10868 10515 * EINVAL
10869 10516 * value returned by physio
10870 10517 *
10871 10518 * Context: Kernel thread context.
|
↓ open down ↓ |
185 lines elided |
↑ open up ↑ |
10872 10519 */
10873 10520 /* ARGSUSED */
10874 10521 static int
10875 10522 sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
10876 10523 {
10877 10524 struct sd_lun *un = NULL;
10878 10525 int secmask;
10879 10526 int err = 0;
10880 10527 sd_ssc_t *ssc;
10881 10528
10882 - if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10529 + if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
10530 + DEVI_IS_GONE(SD_DEVINFO(un)))
10883 10531 return (ENXIO);
10884 - }
10885 10532
10886 10533 ASSERT(!mutex_owned(SD_MUTEX(un)));
10887 10534
10535 + mutex_enter(SD_MUTEX(un));
10536 + while (un->un_state == SD_STATE_ATTACHING)
10537 + cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10888 10538
10539 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
10540 + mutex_exit(SD_MUTEX(un));
10541 + SD_ERROR(SD_LOG_READ_WRITE, un, "sdread: attach failed\n");
10542 + return (EIO);
10543 + }
10544 + mutex_exit(SD_MUTEX(un));
10545 +
10889 10546 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10890 10547 mutex_enter(SD_MUTEX(un));
10891 10548 /*
10892 10549 * Because the call to sd_ready_and_valid will issue I/O we
10893 10550 * must wait here if either the device is suspended or
10894 10551 * if it's power level is changing.
10895 10552 */
10896 10553 while ((un->un_state == SD_STATE_SUSPENDED) ||
10897 10554 (un->un_state == SD_STATE_PM_CHANGING)) {
10898 10555 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10899 10556 }
10557 +
10558 + SD_BAIL_CHECK(un);
10900 10559 un->un_ncmds_in_driver++;
10901 10560 mutex_exit(SD_MUTEX(un));
10902 10561
10903 10562 /* Initialize sd_ssc_t for internal uscsi commands */
10904 10563 ssc = sd_ssc_init(un);
10905 10564 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10906 10565 err = EIO;
10907 10566 } else {
10908 10567 err = 0;
10909 10568 }
10910 10569 sd_ssc_fini(ssc);
10911 10570
10912 10571 mutex_enter(SD_MUTEX(un));
10913 10572 un->un_ncmds_in_driver--;
10573 + if (un->un_f_detach_waiting)
10574 + cv_signal(&un->un_detach_cv);
10914 10575 ASSERT(un->un_ncmds_in_driver >= 0);
10915 10576 mutex_exit(SD_MUTEX(un));
10916 10577 if (err != 0)
10917 10578 return (err);
10918 10579 }
10919 10580
10920 10581 /*
10921 10582 * Read requests are restricted to multiples of the system block size.
10922 10583 */
10923 10584 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
10924 10585 !un->un_f_enable_rmw)
10925 10586 secmask = un->un_tgt_blocksize - 1;
10926 10587 else
10927 10588 secmask = DEV_BSIZE - 1;
10928 10589
10929 10590 if (uio->uio_loffset & ((offset_t)(secmask))) {
10930 10591 SD_ERROR(SD_LOG_READ_WRITE, un,
10931 10592 "sdread: file offset not modulo %d\n",
10932 10593 secmask + 1);
10933 10594 err = EINVAL;
10934 10595 } else if (uio->uio_iov->iov_len & (secmask)) {
10935 10596 SD_ERROR(SD_LOG_READ_WRITE, un,
10936 10597 "sdread: transfer length not modulo %d\n",
10937 10598 secmask + 1);
10938 10599 err = EINVAL;
10939 10600 } else {
10940 10601 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio);
10941 10602 }
10942 10603
10943 10604 return (err);
10944 10605 }
10945 10606
10946 10607
10947 10608 /*
10948 10609 * Function: sdwrite
10949 10610 *
10950 10611 * Description: Driver's write(9e) entry point function.
10951 10612 *
10952 10613 * Arguments: dev - device number
10953 10614 * uio - structure pointer describing where data is stored in
10954 10615 * user's space
10955 10616 * cred_p - user credential pointer
10956 10617 *
10957 10618 * Return Code: ENXIO
10958 10619 * EIO
10959 10620 * EINVAL
10960 10621 * value returned by physio
10961 10622 *
10962 10623 * Context: Kernel thread context.
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
10963 10624 */
10964 10625 /* ARGSUSED */
10965 10626 static int
10966 10627 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
10967 10628 {
10968 10629 struct sd_lun *un = NULL;
10969 10630 int secmask;
10970 10631 int err = 0;
10971 10632 sd_ssc_t *ssc;
10972 10633
10973 - if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10634 + if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
10635 + DEVI_IS_GONE(SD_DEVINFO(un)))
10974 10636 return (ENXIO);
10975 - }
10976 10637
10977 10638 ASSERT(!mutex_owned(SD_MUTEX(un)));
10978 10639
10640 + mutex_enter(SD_MUTEX(un));
10641 + while (un->un_state == SD_STATE_ATTACHING)
10642 + cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10643 +
10644 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
10645 + mutex_exit(SD_MUTEX(un));
10646 + SD_ERROR(SD_LOG_READ_WRITE, un, "sdwrite: attach failed\n");
10647 + return (EIO);
10648 + }
10649 + mutex_exit(SD_MUTEX(un));
10650 +
10979 10651 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10980 10652 mutex_enter(SD_MUTEX(un));
10981 10653 /*
10982 10654 * Because the call to sd_ready_and_valid will issue I/O we
10983 10655 * must wait here if either the device is suspended or
10984 10656 * if it's power level is changing.
10985 10657 */
10986 10658 while ((un->un_state == SD_STATE_SUSPENDED) ||
10987 10659 (un->un_state == SD_STATE_PM_CHANGING)) {
10988 10660 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10989 10661 }
10662 +
10663 + SD_BAIL_CHECK(un);
10990 10664 un->un_ncmds_in_driver++;
10991 10665 mutex_exit(SD_MUTEX(un));
10992 10666
10993 10667 /* Initialize sd_ssc_t for internal uscsi commands */
10994 10668 ssc = sd_ssc_init(un);
10995 10669 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10996 10670 err = EIO;
10997 10671 } else {
10998 10672 err = 0;
10999 10673 }
11000 10674 sd_ssc_fini(ssc);
11001 10675
11002 10676 mutex_enter(SD_MUTEX(un));
11003 10677 un->un_ncmds_in_driver--;
11004 10678 ASSERT(un->un_ncmds_in_driver >= 0);
10679 + if (un->un_f_detach_waiting)
10680 + cv_signal(&un->un_detach_cv);
11005 10681 mutex_exit(SD_MUTEX(un));
11006 10682 if (err != 0)
11007 10683 return (err);
11008 10684 }
11009 10685
11010 10686 /*
11011 10687 * Write requests are restricted to multiples of the system block size.
11012 10688 */
11013 10689 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11014 10690 !un->un_f_enable_rmw)
11015 10691 secmask = un->un_tgt_blocksize - 1;
11016 10692 else
11017 10693 secmask = DEV_BSIZE - 1;
11018 10694
11019 10695 if (uio->uio_loffset & ((offset_t)(secmask))) {
11020 10696 SD_ERROR(SD_LOG_READ_WRITE, un,
11021 10697 "sdwrite: file offset not modulo %d\n",
11022 10698 secmask + 1);
11023 10699 err = EINVAL;
11024 10700 } else if (uio->uio_iov->iov_len & (secmask)) {
11025 10701 SD_ERROR(SD_LOG_READ_WRITE, un,
11026 10702 "sdwrite: transfer length not modulo %d\n",
11027 10703 secmask + 1);
11028 10704 err = EINVAL;
11029 10705 } else {
11030 10706 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio);
11031 10707 }
11032 10708
11033 10709 return (err);
11034 10710 }
11035 10711
11036 10712
11037 10713 /*
11038 10714 * Function: sdaread
11039 10715 *
11040 10716 * Description: Driver's aread(9e) entry point function.
11041 10717 *
11042 10718 * Arguments: dev - device number
11043 10719 * aio - structure pointer describing where data is to be stored
11044 10720 * cred_p - user credential pointer
11045 10721 *
11046 10722 * Return Code: ENXIO
11047 10723 * EIO
11048 10724 * EINVAL
11049 10725 * value returned by aphysio
11050 10726 *
11051 10727 * Context: Kernel thread context.
11052 10728 */
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
11053 10729 /* ARGSUSED */
11054 10730 static int
11055 10731 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11056 10732 {
11057 10733 struct sd_lun *un = NULL;
11058 10734 struct uio *uio = aio->aio_uio;
11059 10735 int secmask;
11060 10736 int err = 0;
11061 10737 sd_ssc_t *ssc;
11062 10738
11063 - if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10739 + if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
10740 + DEVI_IS_GONE(SD_DEVINFO(un)))
11064 10741 return (ENXIO);
11065 - }
11066 10742
11067 10743 ASSERT(!mutex_owned(SD_MUTEX(un)));
11068 10744
10745 + mutex_enter(SD_MUTEX(un));
10746 + while (un->un_state == SD_STATE_ATTACHING)
10747 + cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10748 +
10749 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
10750 + mutex_exit(SD_MUTEX(un));
10751 + SD_ERROR(SD_LOG_READ_WRITE, un, "sdaread: attach failed\n");
10752 + return (EIO);
10753 + }
10754 + mutex_exit(SD_MUTEX(un));
10755 +
11069 10756 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11070 10757 mutex_enter(SD_MUTEX(un));
11071 10758 /*
11072 10759 * Because the call to sd_ready_and_valid will issue I/O we
11073 10760 * must wait here if either the device is suspended or
11074 10761 * if it's power level is changing.
11075 10762 */
11076 10763 while ((un->un_state == SD_STATE_SUSPENDED) ||
11077 10764 (un->un_state == SD_STATE_PM_CHANGING)) {
11078 10765 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11079 10766 }
10767 +
10768 + SD_BAIL_CHECK(un);
11080 10769 un->un_ncmds_in_driver++;
11081 10770 mutex_exit(SD_MUTEX(un));
11082 10771
11083 10772 /* Initialize sd_ssc_t for internal uscsi commands */
11084 10773 ssc = sd_ssc_init(un);
11085 10774 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11086 10775 err = EIO;
11087 10776 } else {
11088 10777 err = 0;
11089 10778 }
11090 10779 sd_ssc_fini(ssc);
11091 10780
11092 10781 mutex_enter(SD_MUTEX(un));
11093 10782 un->un_ncmds_in_driver--;
11094 10783 ASSERT(un->un_ncmds_in_driver >= 0);
10784 + if (un->un_f_detach_waiting)
10785 + cv_signal(&un->un_detach_cv);
11095 10786 mutex_exit(SD_MUTEX(un));
11096 10787 if (err != 0)
11097 10788 return (err);
11098 10789 }
11099 10790
11100 10791 /*
11101 10792 * Read requests are restricted to multiples of the system block size.
11102 10793 */
11103 10794 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11104 10795 !un->un_f_enable_rmw)
11105 10796 secmask = un->un_tgt_blocksize - 1;
11106 10797 else
11107 10798 secmask = DEV_BSIZE - 1;
11108 10799
11109 10800 if (uio->uio_loffset & ((offset_t)(secmask))) {
11110 10801 SD_ERROR(SD_LOG_READ_WRITE, un,
11111 10802 "sdaread: file offset not modulo %d\n",
11112 10803 secmask + 1);
11113 10804 err = EINVAL;
11114 10805 } else if (uio->uio_iov->iov_len & (secmask)) {
11115 10806 SD_ERROR(SD_LOG_READ_WRITE, un,
11116 10807 "sdaread: transfer length not modulo %d\n",
11117 10808 secmask + 1);
11118 10809 err = EINVAL;
11119 10810 } else {
11120 10811 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio);
11121 10812 }
11122 10813
11123 10814 return (err);
11124 10815 }
11125 10816
11126 10817
11127 10818 /*
11128 10819 * Function: sdawrite
11129 10820 *
11130 10821 * Description: Driver's awrite(9e) entry point function.
11131 10822 *
11132 10823 * Arguments: dev - device number
11133 10824 * aio - structure pointer describing where data is stored
11134 10825 * cred_p - user credential pointer
11135 10826 *
11136 10827 * Return Code: ENXIO
11137 10828 * EIO
11138 10829 * EINVAL
11139 10830 * value returned by aphysio
11140 10831 *
11141 10832 * Context: Kernel thread context.
11142 10833 */
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
11143 10834 /* ARGSUSED */
11144 10835 static int
11145 10836 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11146 10837 {
11147 10838 struct sd_lun *un = NULL;
11148 10839 struct uio *uio = aio->aio_uio;
11149 10840 int secmask;
11150 10841 int err = 0;
11151 10842 sd_ssc_t *ssc;
11152 10843
11153 - if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10844 + if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
10845 + DEVI_IS_GONE(SD_DEVINFO(un)))
11154 10846 return (ENXIO);
11155 - }
11156 10847
11157 10848 ASSERT(!mutex_owned(SD_MUTEX(un)));
11158 10849
10850 + mutex_enter(SD_MUTEX(un));
10851 + while (un->un_state == SD_STATE_ATTACHING)
10852 + cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10853 +
10854 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
10855 + mutex_exit(SD_MUTEX(un));
10856 + SD_ERROR(SD_LOG_READ_WRITE, un,
10857 + "sdawrite: attach failed\n");
10858 + return (EIO);
10859 + }
10860 + mutex_exit(SD_MUTEX(un));
10861 +
11159 10862 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11160 10863 mutex_enter(SD_MUTEX(un));
11161 10864 /*
11162 10865 * Because the call to sd_ready_and_valid will issue I/O we
11163 10866 * must wait here if either the device is suspended or
11164 10867 * if it's power level is changing.
11165 10868 */
11166 10869 while ((un->un_state == SD_STATE_SUSPENDED) ||
11167 10870 (un->un_state == SD_STATE_PM_CHANGING)) {
11168 10871 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11169 10872 }
10873 +
10874 + SD_BAIL_CHECK(un);
11170 10875 un->un_ncmds_in_driver++;
11171 10876 mutex_exit(SD_MUTEX(un));
11172 10877
11173 10878 /* Initialize sd_ssc_t for internal uscsi commands */
11174 10879 ssc = sd_ssc_init(un);
11175 10880 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11176 10881 err = EIO;
11177 10882 } else {
11178 10883 err = 0;
11179 10884 }
11180 10885 sd_ssc_fini(ssc);
11181 10886
11182 10887 mutex_enter(SD_MUTEX(un));
11183 10888 un->un_ncmds_in_driver--;
11184 10889 ASSERT(un->un_ncmds_in_driver >= 0);
10890 + if (un->un_f_detach_waiting)
10891 + cv_signal(&un->un_detach_cv);
11185 10892 mutex_exit(SD_MUTEX(un));
11186 10893 if (err != 0)
11187 10894 return (err);
11188 10895 }
11189 10896
11190 10897 /*
11191 10898 * Write requests are restricted to multiples of the system block size.
11192 10899 */
11193 10900 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11194 10901 !un->un_f_enable_rmw)
11195 10902 secmask = un->un_tgt_blocksize - 1;
11196 10903 else
11197 10904 secmask = DEV_BSIZE - 1;
11198 10905
11199 10906 if (uio->uio_loffset & ((offset_t)(secmask))) {
11200 10907 SD_ERROR(SD_LOG_READ_WRITE, un,
11201 10908 "sdawrite: file offset not modulo %d\n",
11202 10909 secmask + 1);
11203 10910 err = EINVAL;
11204 10911 } else if (uio->uio_iov->iov_len & (secmask)) {
11205 10912 SD_ERROR(SD_LOG_READ_WRITE, un,
11206 10913 "sdawrite: transfer length not modulo %d\n",
11207 10914 secmask + 1);
11208 10915 err = EINVAL;
11209 10916 } else {
11210 10917 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio);
11211 10918 }
11212 10919
11213 10920 return (err);
11214 10921 }
11215 10922
11216 10923
11217 10924
11218 10925
11219 10926
11220 10927 /*
11221 10928 * Driver IO processing follows the following sequence:
11222 10929 *
11223 10930 * sdioctl(9E) sdstrategy(9E) biodone(9F)
11224 10931 * | | ^
11225 10932 * v v |
11226 10933 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+
11227 10934 * | | | |
11228 10935 * v | | |
11229 10936 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone()
11230 10937 * | | ^ ^
11231 10938 * v v | |
11232 10939 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | |
11233 10940 * | | | |
11234 10941 * +---+ | +------------+ +-------+
11235 10942 * | | | |
11236 10943 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11237 10944 * | v | |
11238 10945 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() |
11239 10946 * | | ^ |
11240 10947 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11241 10948 * | v | |
11242 10949 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() |
11243 10950 * | | ^ |
11244 10951 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11245 10952 * | v | |
11246 10953 * | sd_checksum_iostart() sd_checksum_iodone() |
11247 10954 * | | ^ |
11248 10955 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+
11249 10956 * | v | |
11250 10957 * | sd_pm_iostart() sd_pm_iodone() |
11251 10958 * | | ^ |
11252 10959 * | | | |
11253 10960 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+
11254 10961 * | ^
11255 10962 * v |
11256 10963 * sd_core_iostart() |
11257 10964 * | |
11258 10965 * | +------>(*destroypkt)()
11259 10966 * +-> sd_start_cmds() <-+ | |
11260 10967 * | | | v
11261 10968 * | | | scsi_destroy_pkt(9F)
11262 10969 * | | |
11263 10970 * +->(*initpkt)() +- sdintr()
11264 10971 * | | | |
11265 10972 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx()
11266 10973 * | +-> scsi_setup_cdb(9F) |
11267 10974 * | |
11268 10975 * +--> scsi_transport(9F) |
11269 10976 * | |
11270 10977 * +----> SCSA ---->+
11271 10978 *
11272 10979 *
11273 10980 * This code is based upon the following presumptions:
11274 10981 *
11275 10982 * - iostart and iodone functions operate on buf(9S) structures. These
11276 10983 * functions perform the necessary operations on the buf(9S) and pass
11277 10984 * them along to the next function in the chain by using the macros
11278 10985 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE()
11279 10986 * (for iodone side functions).
11280 10987 *
11281 10988 * - The iostart side functions may sleep. The iodone side functions
11282 10989 * are called under interrupt context and may NOT sleep. Therefore
11283 10990 * iodone side functions also may not call iostart side functions.
11284 10991 * (NOTE: iostart side functions should NOT sleep for memory, as
11285 10992 * this could result in deadlock.)
11286 10993 *
11287 10994 * - An iostart side function may call its corresponding iodone side
11288 10995 * function directly (if necessary).
11289 10996 *
11290 10997 * - In the event of an error, an iostart side function can return a buf(9S)
11291 10998 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and
11292 10999 * b_error in the usual way of course).
11293 11000 *
11294 11001 * - The taskq mechanism may be used by the iodone side functions to dispatch
11295 11002 * requests to the iostart side functions. The iostart side functions in
11296 11003 * this case would be called under the context of a taskq thread, so it's
11297 11004 * OK for them to block/sleep/spin in this case.
11298 11005 *
11299 11006 * - iostart side functions may allocate "shadow" buf(9S) structs and
11300 11007 * pass them along to the next function in the chain. The corresponding
11301 11008 * iodone side functions must coalesce the "shadow" bufs and return
11302 11009 * the "original" buf to the next higher layer.
11303 11010 *
|
↓ open down ↓ |
109 lines elided |
↑ open up ↑ |
11304 11011 * - The b_private field of the buf(9S) struct holds a pointer to
11305 11012 * an sd_xbuf struct, which contains information needed to
11306 11013 * construct the scsi_pkt for the command.
11307 11014 *
11308 11015 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11309 11016 * layer must acquire & release the SD_MUTEX(un) as needed.
11310 11017 */
11311 11018
11312 11019
11313 11020 /*
11314 - * Create taskq for all targets in the system. This is created at
11315 - * _init(9E) and destroyed at _fini(9E).
11316 - *
11317 - * Note: here we set the minalloc to a reasonably high number to ensure that
11318 - * we will have an adequate supply of task entries available at interrupt time.
11319 - * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11320 - * sd_create_taskq(). Since we do not want to sleep for allocations at
11321 - * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11322 - * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11323 - * requests any one instant in time.
11324 - */
11325 -#define SD_TASKQ_NUMTHREADS 8
11326 -#define SD_TASKQ_MINALLOC 256
11327 -#define SD_TASKQ_MAXALLOC 256
11328 -
11329 -static taskq_t *sd_tq = NULL;
11330 -_NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
11331 -
11332 -static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
11333 -static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
11334 -
11335 -/*
11336 - * The following task queue is being created for the write part of
11337 - * read-modify-write of non-512 block size devices.
11338 - * Limit the number of threads to 1 for now. This number has been chosen
11339 - * considering the fact that it applies only to dvd ram drives/MO drives
11340 - * currently. Performance for which is not main criteria at this stage.
11341 - * Note: It needs to be explored if we can use a single taskq in future
11342 - */
11343 -#define SD_WMR_TASKQ_NUMTHREADS 1
11344 -static taskq_t *sd_wmr_tq = NULL;
11345 -_NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
11346 -
11347 -/*
11348 11021 * Function: sd_taskq_create
11349 11022 *
11350 11023 * Description: Create taskq thread(s) and preallocate task entries
11351 11024 *
11352 11025 * Return Code: Returns a pointer to the allocated taskq_t.
11353 11026 *
11354 11027 * Context: Can sleep. Requires blockable context.
11355 11028 *
11356 11029 * Notes: - The taskq() facility currently is NOT part of the DDI.
11357 11030 * (definitely NOT recommeded for 3rd-party drivers!) :-)
11358 11031 * - taskq_create() will block for memory, also it will panic
11359 11032 * if it cannot create the requested number of threads.
11360 11033 * - Currently taskq_create() creates threads that cannot be
11361 11034 * swapped.
11362 11035 * - We use TASKQ_PREPOPULATE to ensure we have an adequate
11363 11036 * supply of taskq entries at interrupt time (ie, so that we
11364 11037 * do not have to sleep for memory)
11365 11038 */
11366 11039
11367 11040 static void
11368 11041 sd_taskq_create(void)
11369 11042 {
11370 11043 char taskq_name[TASKQ_NAMELEN];
11371 11044
11372 11045 ASSERT(sd_tq == NULL);
11373 11046 ASSERT(sd_wmr_tq == NULL);
11374 11047
11375 11048 (void) snprintf(taskq_name, sizeof (taskq_name),
11376 11049 "%s_drv_taskq", sd_label);
11377 11050 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS,
11378 11051 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11379 11052 TASKQ_PREPOPULATE));
11380 11053
11381 11054 (void) snprintf(taskq_name, sizeof (taskq_name),
11382 11055 "%s_rmw_taskq", sd_label);
11383 11056 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS,
11384 11057 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11385 11058 TASKQ_PREPOPULATE));
11386 11059 }
11387 11060
11388 11061
11389 11062 /*
11390 11063 * Function: sd_taskq_delete
11391 11064 *
11392 11065 * Description: Complementary cleanup routine for sd_taskq_create().
11393 11066 *
11394 11067 * Context: Kernel thread context.
11395 11068 */
11396 11069
11397 11070 static void
11398 11071 sd_taskq_delete(void)
11399 11072 {
11400 11073 ASSERT(sd_tq != NULL);
11401 11074 ASSERT(sd_wmr_tq != NULL);
11402 11075 taskq_destroy(sd_tq);
11403 11076 taskq_destroy(sd_wmr_tq);
11404 11077 sd_tq = NULL;
11405 11078 sd_wmr_tq = NULL;
11406 11079 }
11407 11080
11408 11081
11409 11082 /*
11410 11083 * Function: sdstrategy
11411 11084 *
11412 11085 * Description: Driver's strategy (9E) entry point function.
11413 11086 *
11414 11087 * Arguments: bp - pointer to buf(9S)
|
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
11415 11088 *
11416 11089 * Return Code: Always returns zero
11417 11090 *
11418 11091 * Context: Kernel thread context.
11419 11092 */
11420 11093
11421 11094 static int
11422 11095 sdstrategy(struct buf *bp)
11423 11096 {
11424 11097 struct sd_lun *un;
11098 + int error = EIO;
11425 11099
11426 - un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11427 - if (un == NULL) {
11428 - bioerror(bp, EIO);
11429 - bp->b_resid = bp->b_bcount;
11430 - biodone(bp);
11431 - return (0);
11432 - }
11100 + if ((un = ddi_get_soft_state(sd_state,
11101 + SD_GET_INSTANCE_FROM_BUF(bp))) == NULL)
11102 + goto fail;
11433 11103
11434 - /* As was done in the past, fail new cmds. if state is dumping. */
11435 - if (un->un_state == SD_STATE_DUMPING) {
11436 - bioerror(bp, ENXIO);
11437 - bp->b_resid = bp->b_bcount;
11438 - biodone(bp);
11439 - return (0);
11104 + /* Fail new cmds if state is dumping or device is gone */
11105 + if (un->un_state == SD_STATE_DUMPING ||
11106 + DEVI_IS_GONE(SD_DEVINFO(un))) {
11107 + error = ENXIO;
11108 + goto fail;
11440 11109 }
11441 11110
11442 11111 ASSERT(!mutex_owned(SD_MUTEX(un)));
11443 11112
11444 11113 /*
11445 11114 * Commands may sneak in while we released the mutex in
11446 11115 * DDI_SUSPEND, we should block new commands. However, old
11447 11116 * commands that are still in the driver at this point should
11448 11117 * still be allowed to drain.
11449 11118 */
11450 11119 mutex_enter(SD_MUTEX(un));
11451 11120 /*
11452 11121 * Must wait here if either the device is suspended or
11453 11122 * if it's power level is changing.
11454 11123 */
11455 11124 while ((un->un_state == SD_STATE_SUSPENDED) ||
11456 - (un->un_state == SD_STATE_PM_CHANGING)) {
11125 + (un->un_state == SD_STATE_PM_CHANGING) ||
11126 + (un->un_state == SD_STATE_ATTACHING)) {
11457 11127 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11458 11128 }
11459 11129
11130 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
11131 + mutex_exit(SD_MUTEX(un));
11132 + SD_ERROR(SD_LOG_READ_WRITE, un,
11133 + "sdstrategy: attach failed\n");
11134 + goto fail;
11135 + }
11136 + if (un->un_detach_count != 0) {
11137 + mutex_exit(SD_MUTEX(un));
11138 + goto fail;
11139 + }
11140 +
11460 11141 un->un_ncmds_in_driver++;
11461 11142
11462 11143 /*
11463 11144 * atapi: Since we are running the CD for now in PIO mode we need to
11464 11145 * call bp_mapin here to avoid bp_mapin called interrupt context under
11465 11146 * the HBA's init_pkt routine.
11466 11147 */
11467 11148 if (un->un_f_cfg_is_atapi == TRUE) {
11468 11149 mutex_exit(SD_MUTEX(un));
11469 11150 bp_mapin(bp);
11470 11151 mutex_enter(SD_MUTEX(un));
11471 11152 }
11472 11153 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n",
11473 11154 un->un_ncmds_in_driver);
11474 11155
11475 11156 if (bp->b_flags & B_WRITE)
11476 11157 un->un_f_sync_cache_required = TRUE;
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
11477 11158
11478 11159 mutex_exit(SD_MUTEX(un));
11479 11160
11480 11161 /*
11481 11162 * This will (eventually) allocate the sd_xbuf area and
11482 11163 * call sd_xbuf_strategy(). We just want to return the
11483 11164 * result of ddi_xbuf_qstrategy so that we have an opt-
11484 11165 * imized tail call which saves us a stack frame.
11485 11166 */
11486 11167 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
11168 +
11169 +fail:
11170 + bioerror(bp, error);
11171 + bp->b_resid = bp->b_bcount;
11172 + biodone(bp);
11173 + return (0);
11487 11174 }
11488 11175
11489 -
11490 11176 /*
11491 11177 * Function: sd_xbuf_strategy
11492 11178 *
11493 11179 * Description: Function for initiating IO operations via the
11494 11180 * ddi_xbuf_qstrategy() mechanism.
11495 11181 *
11496 11182 * Context: Kernel thread context.
11497 11183 */
11498 11184
11499 11185 static void
11500 11186 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg)
11501 11187 {
11502 11188 struct sd_lun *un = arg;
11503 11189
11504 11190 ASSERT(bp != NULL);
11505 11191 ASSERT(xp != NULL);
11506 11192 ASSERT(un != NULL);
11507 11193 ASSERT(!mutex_owned(SD_MUTEX(un)));
11508 11194
11509 11195 /*
11510 11196 * Initialize the fields in the xbuf and save a pointer to the
11511 11197 * xbuf in bp->b_private.
11512 11198 */
11513 11199 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL);
11514 11200
11515 11201 /* Send the buf down the iostart chain */
11516 11202 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp);
11517 11203 }
11518 11204
11519 11205
11520 11206 /*
11521 11207 * Function: sd_xbuf_init
11522 11208 *
11523 11209 * Description: Prepare the given sd_xbuf struct for use.
11524 11210 *
11525 11211 * Arguments: un - ptr to softstate
11526 11212 * bp - ptr to associated buf(9S)
11527 11213 * xp - ptr to associated sd_xbuf
11528 11214 * chain_type - IO chain type to use:
11529 11215 * SD_CHAIN_NULL
11530 11216 * SD_CHAIN_BUFIO
11531 11217 * SD_CHAIN_USCSI
11532 11218 * SD_CHAIN_DIRECT
11533 11219 * SD_CHAIN_DIRECT_PRIORITY
11534 11220 * pktinfop - ptr to private data struct for scsi_pkt(9S)
11535 11221 * initialization; may be NULL if none.
11536 11222 *
11537 11223 * Context: Kernel thread context
11538 11224 */
11539 11225
11540 11226 static void
11541 11227 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
11542 11228 uchar_t chain_type, void *pktinfop)
11543 11229 {
11544 11230 int index;
11545 11231
11546 11232 ASSERT(un != NULL);
11547 11233 ASSERT(bp != NULL);
11548 11234 ASSERT(xp != NULL);
11549 11235
11550 11236 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
11551 11237 bp, chain_type);
11552 11238
11553 11239 xp->xb_un = un;
11554 11240 xp->xb_pktp = NULL;
11555 11241 xp->xb_pktinfo = pktinfop;
11556 11242 xp->xb_private = bp->b_private;
11557 11243 xp->xb_blkno = (daddr_t)bp->b_blkno;
11558 11244
11559 11245 /*
11560 11246 * Set up the iostart and iodone chain indexes in the xbuf, based
11561 11247 * upon the specified chain type to use.
11562 11248 */
11563 11249 switch (chain_type) {
11564 11250 case SD_CHAIN_NULL:
11565 11251 /*
11566 11252 * Fall thru to just use the values for the buf type, even
11567 11253 * tho for the NULL chain these values will never be used.
11568 11254 */
11569 11255 /* FALLTHRU */
11570 11256 case SD_CHAIN_BUFIO:
11571 11257 index = un->un_buf_chain_type;
11572 11258 if ((!un->un_f_has_removable_media) &&
11573 11259 (un->un_tgt_blocksize != 0) &&
11574 11260 (un->un_tgt_blocksize != DEV_BSIZE ||
11575 11261 un->un_f_enable_rmw)) {
11576 11262 int secmask = 0, blknomask = 0;
11577 11263 if (un->un_f_enable_rmw) {
11578 11264 blknomask =
11579 11265 (un->un_phy_blocksize / DEV_BSIZE) - 1;
11580 11266 secmask = un->un_phy_blocksize - 1;
11581 11267 } else {
11582 11268 blknomask =
11583 11269 (un->un_tgt_blocksize / DEV_BSIZE) - 1;
11584 11270 secmask = un->un_tgt_blocksize - 1;
11585 11271 }
11586 11272
11587 11273 if ((bp->b_lblkno & (blknomask)) ||
11588 11274 (bp->b_bcount & (secmask))) {
11589 11275 if ((un->un_f_rmw_type !=
11590 11276 SD_RMW_TYPE_RETURN_ERROR) ||
11591 11277 un->un_f_enable_rmw) {
11592 11278 if (un->un_f_pm_is_enabled == FALSE)
11593 11279 index =
11594 11280 SD_CHAIN_INFO_MSS_DSK_NO_PM;
11595 11281 else
11596 11282 index =
11597 11283 SD_CHAIN_INFO_MSS_DISK;
11598 11284 }
11599 11285 }
11600 11286 }
11601 11287 break;
11602 11288 case SD_CHAIN_USCSI:
11603 11289 index = un->un_uscsi_chain_type;
11604 11290 break;
11605 11291 case SD_CHAIN_DIRECT:
11606 11292 index = un->un_direct_chain_type;
11607 11293 break;
11608 11294 case SD_CHAIN_DIRECT_PRIORITY:
11609 11295 index = un->un_priority_chain_type;
11610 11296 break;
11611 11297 default:
11612 11298 /* We're really broken if we ever get here... */
11613 11299 panic("sd_xbuf_init: illegal chain type!");
11614 11300 /*NOTREACHED*/
11615 11301 }
11616 11302
11617 11303 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index;
11618 11304 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index;
11619 11305
11620 11306 /*
11621 11307 * It might be a bit easier to simply bzero the entire xbuf above,
11622 11308 * but it turns out that since we init a fair number of members anyway,
11623 11309 * we save a fair number cycles by doing explicit assignment of zero.
11624 11310 */
11625 11311 xp->xb_pkt_flags = 0;
11626 11312 xp->xb_dma_resid = 0;
11627 11313 xp->xb_retry_count = 0;
11628 11314 xp->xb_victim_retry_count = 0;
11629 11315 xp->xb_ua_retry_count = 0;
11630 11316 xp->xb_nr_retry_count = 0;
11631 11317 xp->xb_sense_bp = NULL;
11632 11318 xp->xb_sense_status = 0;
11633 11319 xp->xb_sense_state = 0;
11634 11320 xp->xb_sense_resid = 0;
11635 11321 xp->xb_ena = 0;
11636 11322
11637 11323 bp->b_private = xp;
11638 11324 bp->b_flags &= ~(B_DONE | B_ERROR);
11639 11325 bp->b_resid = 0;
11640 11326 bp->av_forw = NULL;
11641 11327 bp->av_back = NULL;
11642 11328 bioerror(bp, 0);
11643 11329
11644 11330 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n");
11645 11331 }
11646 11332
11647 11333
11648 11334 /*
11649 11335 * Function: sd_uscsi_strategy
11650 11336 *
11651 11337 * Description: Wrapper for calling into the USCSI chain via physio(9F)
11652 11338 *
11653 11339 * Arguments: bp - buf struct ptr
11654 11340 *
11655 11341 * Return Code: Always returns 0
11656 11342 *
11657 11343 * Context: Kernel thread context
11658 11344 */
11659 11345
11660 11346 static int
11661 11347 sd_uscsi_strategy(struct buf *bp)
|
↓ open down ↓ |
162 lines elided |
↑ open up ↑ |
11662 11348 {
11663 11349 struct sd_lun *un;
11664 11350 struct sd_uscsi_info *uip;
11665 11351 struct sd_xbuf *xp;
11666 11352 uchar_t chain_type;
11667 11353 uchar_t cmd;
11668 11354
11669 11355 ASSERT(bp != NULL);
11670 11356
11671 11357 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11672 - if (un == NULL) {
11358 + if (un == NULL || DEVI_IS_GONE(SD_DEVINFO(un))) {
11673 11359 bioerror(bp, EIO);
11674 11360 bp->b_resid = bp->b_bcount;
11675 11361 biodone(bp);
11676 11362 return (0);
11677 11363 }
11678 11364
11679 11365 ASSERT(!mutex_owned(SD_MUTEX(un)));
11680 11366
11681 11367 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
11682 11368
11683 11369 /*
11684 11370 * A pointer to a struct sd_uscsi_info is expected in bp->b_private
11685 11371 */
11686 11372 ASSERT(bp->b_private != NULL);
11687 11373 uip = (struct sd_uscsi_info *)bp->b_private;
11688 11374 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0];
11689 11375
11690 11376 mutex_enter(SD_MUTEX(un));
11691 11377 /*
11692 11378 * atapi: Since we are running the CD for now in PIO mode we need to
11693 11379 * call bp_mapin here to avoid bp_mapin called interrupt context under
11694 11380 * the HBA's init_pkt routine.
11695 11381 */
11696 11382 if (un->un_f_cfg_is_atapi == TRUE) {
11697 11383 mutex_exit(SD_MUTEX(un));
11698 11384 bp_mapin(bp);
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
11699 11385 mutex_enter(SD_MUTEX(un));
11700 11386 }
11701 11387 un->un_ncmds_in_driver++;
11702 11388 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11703 11389 un->un_ncmds_in_driver);
11704 11390
11705 11391 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
11706 11392 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
11707 11393 un->un_f_sync_cache_required = TRUE;
11708 11394
11395 + if (sd_failfast_enable & SD_FAILFAST_ENABLE_FAIL_USCSI) {
11396 + /*
11397 + * If there are outstanding commands, treat all
11398 + * USCSI commands as if they have B_FAILFAST set.
11399 + */
11400 + if (un->un_ncmds_in_driver != 1)
11401 + bp->b_flags |= B_FAILFAST;
11402 + }
11403 +
11709 11404 mutex_exit(SD_MUTEX(un));
11710 11405
11711 11406 switch (uip->ui_flags) {
11712 11407 case SD_PATH_DIRECT:
11713 11408 chain_type = SD_CHAIN_DIRECT;
11714 11409 break;
11715 11410 case SD_PATH_DIRECT_PRIORITY:
11716 11411 chain_type = SD_CHAIN_DIRECT_PRIORITY;
11717 11412 break;
11718 11413 default:
11719 11414 chain_type = SD_CHAIN_USCSI;
11720 11415 break;
11721 11416 }
11722 11417
11723 11418 /*
11724 11419 * We may allocate extra buf for external USCSI commands. If the
11725 11420 * application asks for bigger than 20-byte sense data via USCSI,
11726 11421 * SCSA layer will allocate 252 bytes sense buf for that command.
11727 11422 */
11728 11423 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen >
11729 11424 SENSE_LENGTH) {
11730 11425 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH +
11731 11426 MAX_SENSE_LENGTH, KM_SLEEP);
11732 11427 } else {
11733 11428 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP);
11734 11429 }
11735 11430
11736 11431 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp);
11737 11432
11738 11433 /* Use the index obtained within xbuf_init */
11739 11434 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp);
11740 11435
11741 11436 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp);
11742 11437
11743 11438 return (0);
11744 11439 }
11745 11440
11746 11441 /*
11747 11442 * Function: sd_send_scsi_cmd
11748 11443 *
11749 11444 * Description: Runs a USCSI command for user (when called thru sdioctl),
11750 11445 * or for the driver
11751 11446 *
11752 11447 * Arguments: dev - the dev_t for the device
11753 11448 * incmd - ptr to a valid uscsi_cmd struct
11754 11449 * flag - bit flag, indicating open settings, 32/64 bit type
11755 11450 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11756 11451 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11757 11452 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11758 11453 * to use the USCSI "direct" chain and bypass the normal
11759 11454 * command waitq.
11760 11455 *
11761 11456 * Return Code: 0 - successful completion of the given command
11762 11457 * EIO - scsi_uscsi_handle_command() failed
11763 11458 * ENXIO - soft state not found for specified dev
11764 11459 * EINVAL
11765 11460 * EFAULT - copyin/copyout error
11766 11461 * return code of scsi_uscsi_handle_command():
11767 11462 * EIO
11768 11463 * ENXIO
11769 11464 * EACCES
11770 11465 *
11771 11466 * Context: Waits for command to complete. Can sleep.
11772 11467 */
|
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
11773 11468
11774 11469 static int
11775 11470 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
11776 11471 enum uio_seg dataspace, int path_flag)
11777 11472 {
11778 11473 struct sd_lun *un;
11779 11474 sd_ssc_t *ssc;
11780 11475 int rval;
11781 11476
11782 11477 un = ddi_get_soft_state(sd_state, SDUNIT(dev));
11783 - if (un == NULL) {
11478 + if (un == NULL || DEVI_IS_GONE(SD_DEVINFO(un)))
11784 11479 return (ENXIO);
11785 - }
11786 11480
11787 11481 /*
11788 11482 * Using sd_ssc_send to handle uscsi cmd
11789 11483 */
11790 11484 ssc = sd_ssc_init(un);
11791 11485 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
11792 11486 sd_ssc_fini(ssc);
11793 11487
11794 11488 return (rval);
11795 11489 }
11796 11490
11797 11491 /*
11798 11492 * Function: sd_ssc_init
11799 11493 *
11800 11494 * Description: Uscsi end-user call this function to initialize necessary
11801 11495 * fields, such as uscsi_cmd and sd_uscsi_info struct.
11802 11496 *
11803 11497 * The return value of sd_send_scsi_cmd will be treated as a
11804 11498 * fault in various conditions. Even it is not Zero, some
11805 11499 * callers may ignore the return value. That is to say, we can
11806 11500 * not make an accurate assessment in sdintr, since if a
11807 11501 * command is failed in sdintr it does not mean the caller of
11808 11502 * sd_send_scsi_cmd will treat it as a real failure.
11809 11503 *
11810 11504 * To avoid printing too many error logs for a failed uscsi
11811 11505 * packet that the caller may not treat it as a failure, the
11812 11506 * sd will keep silent for handling all uscsi commands.
11813 11507 *
11814 11508 * During detach->attach and attach-open, for some types of
11815 11509 * problems, the driver should be providing information about
11816 11510 * the problem encountered. Device use USCSI_SILENT, which
11817 11511 * suppresses all driver information. The result is that no
11818 11512 * information about the problem is available. Being
11819 11513 * completely silent during this time is inappropriate. The
11820 11514 * driver needs a more selective filter than USCSI_SILENT, so
11821 11515 * that information related to faults is provided.
11822 11516 *
11823 11517 * To make the accurate accessment, the caller of
11824 11518 * sd_send_scsi_USCSI_CMD should take the ownership and
11825 11519 * get necessary information to print error messages.
11826 11520 *
11827 11521 * If we want to print necessary info of uscsi command, we need to
11828 11522 * keep the uscsi_cmd and sd_uscsi_info till we can make the
11829 11523 * assessment. We use sd_ssc_init to alloc necessary
11830 11524 * structs for sending an uscsi command and we are also
11831 11525 * responsible for free the memory by calling
11832 11526 * sd_ssc_fini.
11833 11527 *
11834 11528 * The calling secquences will look like:
11835 11529 * sd_ssc_init->
11836 11530 *
11837 11531 * ...
11838 11532 *
11839 11533 * sd_send_scsi_USCSI_CMD->
11840 11534 * sd_ssc_send-> - - - sdintr
11841 11535 * ...
11842 11536 *
11843 11537 * if we think the return value should be treated as a
11844 11538 * failure, we make the accessment here and print out
11845 11539 * necessary by retrieving uscsi_cmd and sd_uscsi_info'
11846 11540 *
11847 11541 * ...
11848 11542 *
11849 11543 * sd_ssc_fini
11850 11544 *
11851 11545 *
11852 11546 * Arguments: un - pointer to driver soft state (unit) structure for this
11853 11547 * target.
11854 11548 *
11855 11549 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains
11856 11550 * uscsi_cmd and sd_uscsi_info.
11857 11551 * NULL - if can not alloc memory for sd_ssc_t struct
11858 11552 *
11859 11553 * Context: Kernel Thread.
11860 11554 */
11861 11555 static sd_ssc_t *
11862 11556 sd_ssc_init(struct sd_lun *un)
11863 11557 {
11864 11558 sd_ssc_t *ssc;
11865 11559 struct uscsi_cmd *ucmdp;
11866 11560 struct sd_uscsi_info *uip;
11867 11561
11868 11562 ASSERT(un != NULL);
11869 11563 ASSERT(!mutex_owned(SD_MUTEX(un)));
11870 11564
11871 11565 /*
11872 11566 * Allocate sd_ssc_t structure
11873 11567 */
11874 11568 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP);
11875 11569
11876 11570 /*
11877 11571 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine
11878 11572 */
11879 11573 ucmdp = scsi_uscsi_alloc();
11880 11574
11881 11575 /*
11882 11576 * Allocate sd_uscsi_info structure
11883 11577 */
11884 11578 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
11885 11579
11886 11580 ssc->ssc_uscsi_cmd = ucmdp;
11887 11581 ssc->ssc_uscsi_info = uip;
11888 11582 ssc->ssc_un = un;
11889 11583
11890 11584 return (ssc);
11891 11585 }
11892 11586
11893 11587 /*
11894 11588 * Function: sd_ssc_fini
11895 11589 *
11896 11590 * Description: To free sd_ssc_t and it's hanging off
11897 11591 *
11898 11592 * Arguments: ssc - struct pointer of sd_ssc_t.
11899 11593 */
11900 11594 static void
11901 11595 sd_ssc_fini(sd_ssc_t *ssc)
11902 11596 {
11903 11597 scsi_uscsi_free(ssc->ssc_uscsi_cmd);
11904 11598
11905 11599 if (ssc->ssc_uscsi_info != NULL) {
11906 11600 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info));
11907 11601 ssc->ssc_uscsi_info = NULL;
11908 11602 }
11909 11603
11910 11604 kmem_free(ssc, sizeof (sd_ssc_t));
11911 11605 ssc = NULL;
11912 11606 }
11913 11607
11914 11608 /*
11915 11609 * Function: sd_ssc_send
11916 11610 *
11917 11611 * Description: Runs a USCSI command for user when called through sdioctl,
11918 11612 * or for the driver.
11919 11613 *
11920 11614 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
11921 11615 * sd_uscsi_info in.
11922 11616 * incmd - ptr to a valid uscsi_cmd struct
11923 11617 * flag - bit flag, indicating open settings, 32/64 bit type
11924 11618 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11925 11619 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11926 11620 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11927 11621 * to use the USCSI "direct" chain and bypass the normal
11928 11622 * command waitq.
11929 11623 *
11930 11624 * Return Code: 0 - successful completion of the given command
11931 11625 * EIO - scsi_uscsi_handle_command() failed
11932 11626 * ENXIO - soft state not found for specified dev
11933 11627 * ECANCELED - command cancelled due to low power
11934 11628 * EINVAL
11935 11629 * EFAULT - copyin/copyout error
11936 11630 * return code of scsi_uscsi_handle_command():
11937 11631 * EIO
11938 11632 * ENXIO
11939 11633 * EACCES
11940 11634 *
11941 11635 * Context: Kernel Thread;
|
↓ open down ↓ |
146 lines elided |
↑ open up ↑ |
11942 11636 * Waits for command to complete. Can sleep.
11943 11637 */
11944 11638 static int
11945 11639 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
11946 11640 enum uio_seg dataspace, int path_flag)
11947 11641 {
11948 11642 struct sd_uscsi_info *uip;
11949 11643 struct uscsi_cmd *uscmd;
11950 11644 struct sd_lun *un;
11951 11645 dev_t dev;
11646 + dev_info_t *dip = SD_DEVINFO(ssc->ssc_un);
11952 11647
11953 11648 int format = 0;
11954 11649 int rval;
11955 11650
11956 11651 ASSERT(ssc != NULL);
11957 11652 un = ssc->ssc_un;
11958 11653 ASSERT(un != NULL);
11959 11654 uscmd = ssc->ssc_uscsi_cmd;
11960 11655 ASSERT(uscmd != NULL);
11961 11656 ASSERT(!mutex_owned(SD_MUTEX(un)));
11962 11657 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
11963 11658 /*
11964 11659 * If enter here, it indicates that the previous uscsi
11965 11660 * command has not been processed by sd_ssc_assessment.
11966 11661 * This is violating our rules of FMA telemetry processing.
11967 11662 * We should print out this message and the last undisposed
11968 11663 * uscsi command.
11969 11664 */
11970 11665 if (uscmd->uscsi_cdb != NULL) {
11971 11666 SD_INFO(SD_LOG_SDTEST, un,
11972 11667 "sd_ssc_send is missing the alternative "
11973 11668 "sd_ssc_assessment when running command 0x%x.\n",
11974 11669 uscmd->uscsi_cdb[0]);
11975 11670 }
11976 11671 /*
11977 11672 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be
11978 11673 * the initial status.
11979 11674 */
11980 11675 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
11981 11676 }
11982 11677
11983 11678 /*
11984 11679 * We need to make sure sd_ssc_send will have sd_ssc_assessment
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
11985 11680 * followed to avoid missing FMA telemetries.
11986 11681 */
11987 11682 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
11988 11683
11989 11684 /*
11990 11685 * if USCSI_PMFAILFAST is set and un is in low power, fail the
11991 11686 * command immediately.
11992 11687 */
11993 11688 mutex_enter(SD_MUTEX(un));
11994 11689 mutex_enter(&un->un_pm_mutex);
11690 +
11995 11691 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
11996 11692 SD_DEVICE_IS_IN_LOW_POWER(un)) {
11997 11693 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
11998 11694 "un:0x%p is in low power\n", un);
11999 11695 mutex_exit(&un->un_pm_mutex);
12000 11696 mutex_exit(SD_MUTEX(un));
12001 11697 return (ECANCELED);
12002 11698 }
12003 11699 mutex_exit(&un->un_pm_mutex);
12004 11700 mutex_exit(SD_MUTEX(un));
12005 11701
12006 11702 #ifdef SDDEBUG
12007 11703 switch (dataspace) {
12008 11704 case UIO_USERSPACE:
12009 11705 SD_TRACE(SD_LOG_IO, un,
12010 11706 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un);
12011 11707 break;
12012 11708 case UIO_SYSSPACE:
12013 11709 SD_TRACE(SD_LOG_IO, un,
12014 11710 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un);
12015 11711 break;
12016 11712 default:
12017 11713 SD_TRACE(SD_LOG_IO, un,
12018 11714 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un);
12019 11715 break;
12020 11716 }
12021 11717 #endif
12022 11718
12023 11719 rval = scsi_uscsi_copyin((intptr_t)incmd, flag,
12024 11720 SD_ADDRESS(un), &uscmd);
12025 11721 if (rval != 0) {
12026 11722 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: "
12027 11723 "scsi_uscsi_alloc_and_copyin failed\n", un);
12028 11724 return (rval);
12029 11725 }
12030 11726
12031 11727 if ((uscmd->uscsi_cdb != NULL) &&
12032 11728 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) {
12033 11729 mutex_enter(SD_MUTEX(un));
12034 11730 un->un_f_format_in_progress = TRUE;
12035 11731 mutex_exit(SD_MUTEX(un));
12036 11732 format = 1;
12037 11733 }
12038 11734
12039 11735 /*
12040 11736 * Allocate an sd_uscsi_info struct and fill it with the info
12041 11737 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
12042 11738 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
12043 11739 * since we allocate the buf here in this function, we do not
12044 11740 * need to preserve the prior contents of b_private.
12045 11741 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
12046 11742 */
12047 11743 uip = ssc->ssc_uscsi_info;
12048 11744 uip->ui_flags = path_flag;
12049 11745 uip->ui_cmdp = uscmd;
12050 11746
12051 11747 /*
12052 11748 * Commands sent with priority are intended for error recovery
|
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
12053 11749 * situations, and do not have retries performed.
12054 11750 */
12055 11751 if (path_flag == SD_PATH_DIRECT_PRIORITY) {
12056 11752 uscmd->uscsi_flags |= USCSI_DIAGNOSE;
12057 11753 }
12058 11754 uscmd->uscsi_flags &= ~USCSI_NOINTR;
12059 11755
12060 11756 dev = SD_GET_DEV(un);
12061 11757 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
12062 11758 sd_uscsi_strategy, NULL, uip);
11759 + if (DEVI_IS_GONE(dip)) {
11760 + cmn_err(CE_WARN, "%s-%d: device is gone!", __func__, __LINE__);
11761 + return (ENXIO);
11762 + }
12063 11763
12064 11764 /*
12065 11765 * mark ssc_flags right after handle_cmd to make sure
12066 11766 * the uscsi has been sent
12067 11767 */
12068 11768 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
12069 11769
12070 11770 #ifdef SDDEBUG
12071 11771 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12072 11772 "uscsi_status: 0x%02x uscsi_resid:0x%x\n",
12073 11773 uscmd->uscsi_status, uscmd->uscsi_resid);
12074 11774 if (uscmd->uscsi_bufaddr != NULL) {
12075 11775 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12076 11776 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
12077 11777 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen);
12078 11778 if (dataspace == UIO_SYSSPACE) {
12079 11779 SD_DUMP_MEMORY(un, SD_LOG_IO,
12080 11780 "data", (uchar_t *)uscmd->uscsi_bufaddr,
12081 11781 uscmd->uscsi_buflen, SD_LOG_HEX);
12082 11782 }
12083 11783 }
12084 11784 #endif
12085 11785
12086 11786 if (format == 1) {
12087 11787 mutex_enter(SD_MUTEX(un));
12088 11788 un->un_f_format_in_progress = FALSE;
12089 11789 mutex_exit(SD_MUTEX(un));
12090 11790 }
12091 11791
12092 11792 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd);
12093 11793
12094 11794 return (rval);
12095 11795 }
12096 11796
12097 11797 /*
12098 11798 * Function: sd_ssc_print
12099 11799 *
12100 11800 * Description: Print information available to the console.
12101 11801 *
12102 11802 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12103 11803 * sd_uscsi_info in.
12104 11804 * sd_severity - log level.
12105 11805 * Context: Kernel thread or interrupt context.
12106 11806 */
12107 11807 static void
12108 11808 sd_ssc_print(sd_ssc_t *ssc, int sd_severity)
12109 11809 {
12110 11810 struct uscsi_cmd *ucmdp;
12111 11811 struct scsi_device *devp;
12112 11812 dev_info_t *devinfo;
12113 11813 uchar_t *sensep;
12114 11814 int senlen;
12115 11815 union scsi_cdb *cdbp;
12116 11816 uchar_t com;
12117 11817 extern struct scsi_key_strings scsi_cmds[];
12118 11818
12119 11819 ASSERT(ssc != NULL);
12120 11820 ASSERT(ssc->ssc_un != NULL);
12121 11821
12122 11822 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT)
12123 11823 return;
12124 11824 ucmdp = ssc->ssc_uscsi_cmd;
12125 11825 devp = SD_SCSI_DEVP(ssc->ssc_un);
12126 11826 devinfo = SD_DEVINFO(ssc->ssc_un);
12127 11827 ASSERT(ucmdp != NULL);
12128 11828 ASSERT(devp != NULL);
12129 11829 ASSERT(devinfo != NULL);
12130 11830 sensep = (uint8_t *)ucmdp->uscsi_rqbuf;
12131 11831 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid;
12132 11832 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb;
12133 11833
12134 11834 /* In certain case (like DOORLOCK), the cdb could be NULL. */
12135 11835 if (cdbp == NULL)
12136 11836 return;
12137 11837 /* We don't print log if no sense data available. */
12138 11838 if (senlen == 0)
12139 11839 sensep = NULL;
12140 11840 com = cdbp->scc_cmd;
12141 11841 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com,
12142 11842 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL);
12143 11843 }
12144 11844
12145 11845 /*
12146 11846 * Function: sd_ssc_assessment
12147 11847 *
12148 11848 * Description: We use this function to make an assessment at the point
12149 11849 * where SD driver may encounter a potential error.
12150 11850 *
12151 11851 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12152 11852 * sd_uscsi_info in.
12153 11853 * tp_assess - a hint of strategy for ereport posting.
12154 11854 * Possible values of tp_assess include:
12155 11855 * SD_FMT_IGNORE - we don't post any ereport because we're
12156 11856 * sure that it is ok to ignore the underlying problems.
12157 11857 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now
12158 11858 * but it might be not correct to ignore the underlying hardware
12159 11859 * error.
12160 11860 * SD_FMT_STATUS_CHECK - we will post an ereport with the
12161 11861 * payload driver-assessment of value "fail" or
12162 11862 * "fatal"(depending on what information we have here). This
12163 11863 * assessment value is usually set when SD driver think there
12164 11864 * is a potential error occurred(Typically, when return value
12165 11865 * of the SCSI command is EIO).
12166 11866 * SD_FMT_STANDARD - we will post an ereport with the payload
12167 11867 * driver-assessment of value "info". This assessment value is
12168 11868 * set when the SCSI command returned successfully and with
12169 11869 * sense data sent back.
12170 11870 *
12171 11871 * Context: Kernel thread.
12172 11872 */
12173 11873 static void
12174 11874 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess)
12175 11875 {
12176 11876 int senlen = 0;
12177 11877 struct uscsi_cmd *ucmdp = NULL;
12178 11878 struct sd_lun *un;
12179 11879
12180 11880 ASSERT(ssc != NULL);
12181 11881 un = ssc->ssc_un;
12182 11882 ASSERT(un != NULL);
12183 11883 ucmdp = ssc->ssc_uscsi_cmd;
12184 11884 ASSERT(ucmdp != NULL);
12185 11885
12186 11886 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
12187 11887 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT;
12188 11888 } else {
12189 11889 /*
12190 11890 * If enter here, it indicates that we have a wrong
12191 11891 * calling sequence of sd_ssc_send and sd_ssc_assessment,
12192 11892 * both of which should be called in a pair in case of
12193 11893 * loss of FMA telemetries.
12194 11894 */
12195 11895 if (ucmdp->uscsi_cdb != NULL) {
12196 11896 SD_INFO(SD_LOG_SDTEST, un,
12197 11897 "sd_ssc_assessment is missing the "
12198 11898 "alternative sd_ssc_send when running 0x%x, "
12199 11899 "or there are superfluous sd_ssc_assessment for "
12200 11900 "the same sd_ssc_send.\n",
12201 11901 ucmdp->uscsi_cdb[0]);
12202 11902 }
12203 11903 /*
12204 11904 * Set the ssc_flags to the initial value to avoid passing
12205 11905 * down dirty flags to the following sd_ssc_send function.
12206 11906 */
12207 11907 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12208 11908 return;
12209 11909 }
12210 11910
12211 11911 /*
12212 11912 * Only handle an issued command which is waiting for assessment.
12213 11913 * A command which is not issued will not have
12214 11914 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here.
12215 11915 */
12216 11916 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) {
12217 11917 sd_ssc_print(ssc, SCSI_ERR_INFO);
12218 11918 return;
12219 11919 } else {
12220 11920 /*
12221 11921 * For an issued command, we should clear this flag in
12222 11922 * order to make the sd_ssc_t structure be used off
12223 11923 * multiple uscsi commands.
12224 11924 */
12225 11925 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED;
12226 11926 }
12227 11927
12228 11928 /*
12229 11929 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set)
12230 11930 * commands here. And we should clear the ssc_flags before return.
12231 11931 */
12232 11932 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) {
12233 11933 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12234 11934 return;
12235 11935 }
12236 11936
12237 11937 switch (tp_assess) {
12238 11938 case SD_FMT_IGNORE:
12239 11939 case SD_FMT_IGNORE_COMPROMISE:
12240 11940 break;
12241 11941 case SD_FMT_STATUS_CHECK:
12242 11942 /*
12243 11943 * For a failed command(including the succeeded command
12244 11944 * with invalid data sent back).
12245 11945 */
12246 11946 sd_ssc_post(ssc, SD_FM_DRV_FATAL);
12247 11947 break;
12248 11948 case SD_FMT_STANDARD:
12249 11949 /*
12250 11950 * Always for the succeeded commands probably with sense
12251 11951 * data sent back.
12252 11952 * Limitation:
12253 11953 * We can only handle a succeeded command with sense
12254 11954 * data sent back when auto-request-sense is enabled.
12255 11955 */
12256 11956 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen -
12257 11957 ssc->ssc_uscsi_cmd->uscsi_rqresid;
12258 11958 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) &&
12259 11959 (un->un_f_arq_enabled == TRUE) &&
12260 11960 senlen > 0 &&
12261 11961 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) {
12262 11962 sd_ssc_post(ssc, SD_FM_DRV_NOTICE);
12263 11963 }
12264 11964 break;
12265 11965 default:
12266 11966 /*
12267 11967 * Should not have other type of assessment.
12268 11968 */
12269 11969 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
12270 11970 "sd_ssc_assessment got wrong "
12271 11971 "sd_type_assessment %d.\n", tp_assess);
12272 11972 break;
12273 11973 }
12274 11974 /*
12275 11975 * Clear up the ssc_flags before return.
12276 11976 */
12277 11977 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12278 11978 }
12279 11979
12280 11980 /*
12281 11981 * Function: sd_ssc_post
12282 11982 *
12283 11983 * Description: 1. read the driver property to get fm-scsi-log flag.
12284 11984 * 2. print log if fm_log_capable is non-zero.
12285 11985 * 3. call sd_ssc_ereport_post to post ereport if possible.
12286 11986 *
12287 11987 * Context: May be called from kernel thread or interrupt context.
12288 11988 */
12289 11989 static void
12290 11990 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess)
12291 11991 {
12292 11992 struct sd_lun *un;
12293 11993 int sd_severity;
12294 11994
12295 11995 ASSERT(ssc != NULL);
12296 11996 un = ssc->ssc_un;
12297 11997 ASSERT(un != NULL);
12298 11998
12299 11999 /*
12300 12000 * We may enter here from sd_ssc_assessment(for USCSI command) or
12301 12001 * by directly called from sdintr context.
12302 12002 * We don't handle a non-disk drive(CD-ROM, removable media).
12303 12003 * Clear the ssc_flags before return in case we've set
12304 12004 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk
12305 12005 * driver.
12306 12006 */
12307 12007 if (ISCD(un) || un->un_f_has_removable_media) {
12308 12008 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12309 12009 return;
12310 12010 }
12311 12011
12312 12012 switch (sd_assess) {
12313 12013 case SD_FM_DRV_FATAL:
12314 12014 sd_severity = SCSI_ERR_FATAL;
12315 12015 break;
12316 12016 case SD_FM_DRV_RECOVERY:
12317 12017 sd_severity = SCSI_ERR_RECOVERED;
12318 12018 break;
12319 12019 case SD_FM_DRV_RETRY:
12320 12020 sd_severity = SCSI_ERR_RETRYABLE;
12321 12021 break;
12322 12022 case SD_FM_DRV_NOTICE:
12323 12023 sd_severity = SCSI_ERR_INFO;
12324 12024 break;
12325 12025 default:
12326 12026 sd_severity = SCSI_ERR_UNKNOWN;
12327 12027 }
12328 12028 /* print log */
12329 12029 sd_ssc_print(ssc, sd_severity);
12330 12030
12331 12031 /* always post ereport */
12332 12032 sd_ssc_ereport_post(ssc, sd_assess);
12333 12033 }
12334 12034
12335 12035 /*
12336 12036 * Function: sd_ssc_set_info
12337 12037 *
12338 12038 * Description: Mark ssc_flags and set ssc_info which would be the
12339 12039 * payload of uderr ereport. This function will cause
12340 12040 * sd_ssc_ereport_post to post uderr ereport only.
12341 12041 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI),
12342 12042 * the function will also call SD_ERROR or scsi_log for a
12343 12043 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device.
12344 12044 *
12345 12045 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12346 12046 * sd_uscsi_info in.
12347 12047 * ssc_flags - indicate the sub-category of a uderr.
12348 12048 * comp - this argument is meaningful only when
12349 12049 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible
12350 12050 * values include:
12351 12051 * > 0, SD_ERROR is used with comp as the driver logging
12352 12052 * component;
12353 12053 * = 0, scsi-log is used to log error telemetries;
12354 12054 * < 0, no log available for this telemetry.
12355 12055 *
12356 12056 * Context: Kernel thread or interrupt context
12357 12057 */
12358 12058 static void
12359 12059 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...)
12360 12060 {
12361 12061 va_list ap;
12362 12062
12363 12063 ASSERT(ssc != NULL);
12364 12064 ASSERT(ssc->ssc_un != NULL);
12365 12065
12366 12066 ssc->ssc_flags |= ssc_flags;
12367 12067 va_start(ap, fmt);
12368 12068 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap);
12369 12069 va_end(ap);
12370 12070
12371 12071 /*
12372 12072 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command
12373 12073 * with invalid data sent back. For non-uscsi command, the
12374 12074 * following code will be bypassed.
12375 12075 */
12376 12076 if (ssc_flags & SSC_FLAGS_INVALID_DATA) {
12377 12077 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) {
12378 12078 /*
12379 12079 * If the error belong to certain component and we
12380 12080 * do not want it to show up on the console, we
12381 12081 * will use SD_ERROR, otherwise scsi_log is
12382 12082 * preferred.
12383 12083 */
12384 12084 if (comp > 0) {
12385 12085 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info);
12386 12086 } else if (comp == 0) {
12387 12087 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label,
12388 12088 CE_WARN, ssc->ssc_info);
12389 12089 }
12390 12090 }
12391 12091 }
12392 12092 }
12393 12093
12394 12094 /*
12395 12095 * Function: sd_buf_iodone
12396 12096 *
12397 12097 * Description: Frees the sd_xbuf & returns the buf to its originator.
12398 12098 *
12399 12099 * Context: May be called from interrupt context.
12400 12100 */
12401 12101 /* ARGSUSED */
12402 12102 static void
12403 12103 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp)
12404 12104 {
12405 12105 struct sd_xbuf *xp;
12406 12106
12407 12107 ASSERT(un != NULL);
12408 12108 ASSERT(bp != NULL);
12409 12109 ASSERT(!mutex_owned(SD_MUTEX(un)));
12410 12110
12411 12111 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n");
12412 12112
12413 12113 xp = SD_GET_XBUF(bp);
12414 12114 ASSERT(xp != NULL);
12415 12115
12416 12116 /* xbuf is gone after this */
12417 12117 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) {
12418 12118 mutex_enter(SD_MUTEX(un));
12419 12119
12420 12120 /*
|
↓ open down ↓ |
348 lines elided |
↑ open up ↑ |
12421 12121 * Grab time when the cmd completed.
12422 12122 * This is used for determining if the system has been
12423 12123 * idle long enough to make it idle to the PM framework.
12424 12124 * This is for lowering the overhead, and therefore improving
12425 12125 * performance per I/O operation.
12426 12126 */
12427 12127 un->un_pm_idle_time = gethrtime();
12428 12128
12429 12129 un->un_ncmds_in_driver--;
12430 12130 ASSERT(un->un_ncmds_in_driver >= 0);
12131 + if (un->un_f_detach_waiting)
12132 + cv_signal(&un->un_detach_cv);
12431 12133 SD_INFO(SD_LOG_IO, un,
12432 12134 "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12433 12135 un->un_ncmds_in_driver);
12434 12136
12435 12137 mutex_exit(SD_MUTEX(un));
12436 12138 }
12437 12139
12438 12140 biodone(bp); /* bp is gone after this */
12439 12141
12440 12142 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
12441 12143 }
12442 12144
12443 12145
12444 12146 /*
12445 12147 * Function: sd_uscsi_iodone
12446 12148 *
12447 12149 * Description: Frees the sd_xbuf & returns the buf to its originator.
12448 12150 *
12449 12151 * Context: May be called from interrupt context.
12450 12152 */
12451 12153 /* ARGSUSED */
12452 12154 static void
12453 12155 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
12454 12156 {
12455 12157 struct sd_xbuf *xp;
12456 12158
12457 12159 ASSERT(un != NULL);
12458 12160 ASSERT(bp != NULL);
12459 12161
12460 12162 xp = SD_GET_XBUF(bp);
12461 12163 ASSERT(xp != NULL);
12462 12164 ASSERT(!mutex_owned(SD_MUTEX(un)));
12463 12165
12464 12166 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n");
12465 12167
12466 12168 bp->b_private = xp->xb_private;
12467 12169
12468 12170 mutex_enter(SD_MUTEX(un));
12469 12171
12470 12172 /*
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
12471 12173 * Grab time when the cmd completed.
12472 12174 * This is used for determining if the system has been
12473 12175 * idle long enough to make it idle to the PM framework.
12474 12176 * This is for lowering the overhead, and therefore improving
12475 12177 * performance per I/O operation.
12476 12178 */
12477 12179 un->un_pm_idle_time = gethrtime();
12478 12180
12479 12181 un->un_ncmds_in_driver--;
12480 12182 ASSERT(un->un_ncmds_in_driver >= 0);
12183 + if (un->un_f_detach_waiting)
12184 + cv_signal(&un->un_detach_cv);
12481 12185 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12482 12186 un->un_ncmds_in_driver);
12483 12187
12484 12188 mutex_exit(SD_MUTEX(un));
12485 12189
12486 12190 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
12487 12191 SENSE_LENGTH) {
12488 12192 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
12489 12193 MAX_SENSE_LENGTH);
12490 12194 } else {
12491 12195 kmem_free(xp, sizeof (struct sd_xbuf));
12492 12196 }
12493 12197
12494 12198 biodone(bp);
12495 12199
12496 12200 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n");
12497 12201 }
12498 12202
12499 12203
12500 12204 /*
12501 12205 * Function: sd_mapblockaddr_iostart
12502 12206 *
12503 12207 * Description: Verify request lies within the partition limits for
12504 12208 * the indicated minor device. Issue "overrun" buf if
12505 12209 * request would exceed partition range. Converts
12506 12210 * partition-relative block address to absolute.
12507 12211 *
12508 12212 * Upon exit of this function:
12509 12213 * 1.I/O is aligned
12510 12214 * xp->xb_blkno represents the absolute sector address
12511 12215 * 2.I/O is misaligned
12512 12216 * xp->xb_blkno represents the absolute logical block address
12513 12217 * based on DEV_BSIZE. The logical block address will be
12514 12218 * converted to physical sector address in sd_mapblocksize_\
12515 12219 * iostart.
12516 12220 * 3.I/O is misaligned but is aligned in "overrun" buf
12517 12221 * xp->xb_blkno represents the absolute logical block address
12518 12222 * based on DEV_BSIZE. The logical block address will be
12519 12223 * converted to physical sector address in sd_mapblocksize_\
12520 12224 * iostart. But no RMW will be issued in this case.
12521 12225 *
12522 12226 * Context: Can sleep
12523 12227 *
12524 12228 * Issues: This follows what the old code did, in terms of accessing
12525 12229 * some of the partition info in the unit struct without holding
12526 12230 * the mutext. This is a general issue, if the partition info
12527 12231 * can be altered while IO is in progress... as soon as we send
12528 12232 * a buf, its partitioning can be invalid before it gets to the
12529 12233 * device. Probably the right fix is to move partitioning out
12530 12234 * of the driver entirely.
12531 12235 */
12532 12236
12533 12237 static void
12534 12238 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp)
12535 12239 {
12536 12240 diskaddr_t nblocks; /* #blocks in the given partition */
12537 12241 daddr_t blocknum; /* Block number specified by the buf */
12538 12242 size_t requested_nblocks;
12539 12243 size_t available_nblocks;
12540 12244 int partition;
12541 12245 diskaddr_t partition_offset;
12542 12246 struct sd_xbuf *xp;
12543 12247 int secmask = 0, blknomask = 0;
12544 12248 ushort_t is_aligned = TRUE;
12545 12249
12546 12250 ASSERT(un != NULL);
12547 12251 ASSERT(bp != NULL);
12548 12252 ASSERT(!mutex_owned(SD_MUTEX(un)));
12549 12253
12550 12254 SD_TRACE(SD_LOG_IO_PARTITION, un,
12551 12255 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp);
12552 12256
12553 12257 xp = SD_GET_XBUF(bp);
12554 12258 ASSERT(xp != NULL);
12555 12259
12556 12260 /*
12557 12261 * If the geometry is not indicated as valid, attempt to access
12558 12262 * the unit & verify the geometry/label. This can be the case for
12559 12263 * removable-media devices, of if the device was opened in
12560 12264 * NDELAY/NONBLOCK mode.
12561 12265 */
12562 12266 partition = SDPART(bp->b_edev);
12563 12267
12564 12268 if (!SD_IS_VALID_LABEL(un)) {
12565 12269 sd_ssc_t *ssc;
12566 12270 /*
12567 12271 * Initialize sd_ssc_t for internal uscsi commands
12568 12272 * In case of potential porformance issue, we need
12569 12273 * to alloc memory only if there is invalid label
12570 12274 */
12571 12275 ssc = sd_ssc_init(un);
12572 12276
12573 12277 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) {
12574 12278 /*
12575 12279 * For removable devices it is possible to start an
12576 12280 * I/O without a media by opening the device in nodelay
12577 12281 * mode. Also for writable CDs there can be many
12578 12282 * scenarios where there is no geometry yet but volume
12579 12283 * manager is trying to issue a read() just because
12580 12284 * it can see TOC on the CD. So do not print a message
12581 12285 * for removables.
12582 12286 */
12583 12287 if (!un->un_f_has_removable_media) {
12584 12288 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
12585 12289 "i/o to invalid geometry\n");
12586 12290 }
12587 12291 bioerror(bp, EIO);
12588 12292 bp->b_resid = bp->b_bcount;
12589 12293 SD_BEGIN_IODONE(index, un, bp);
12590 12294
12591 12295 sd_ssc_fini(ssc);
12592 12296 return;
12593 12297 }
12594 12298 sd_ssc_fini(ssc);
12595 12299 }
12596 12300
12597 12301 nblocks = 0;
12598 12302 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
12599 12303 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT);
12600 12304
12601 12305 if (un->un_f_enable_rmw) {
12602 12306 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1;
12603 12307 secmask = un->un_phy_blocksize - 1;
12604 12308 } else {
12605 12309 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
12606 12310 secmask = un->un_tgt_blocksize - 1;
12607 12311 }
12608 12312
12609 12313 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) {
12610 12314 is_aligned = FALSE;
12611 12315 }
12612 12316
12613 12317 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) {
12614 12318 /*
12615 12319 * If I/O is aligned, no need to involve RMW(Read Modify Write)
12616 12320 * Convert the logical block number to target's physical sector
12617 12321 * number.
12618 12322 */
12619 12323 if (is_aligned) {
12620 12324 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno);
12621 12325 } else {
12622 12326 /*
12623 12327 * There is no RMW if we're just reading, so don't
12624 12328 * warn or error out because of it.
12625 12329 */
12626 12330 if (bp->b_flags & B_READ) {
12627 12331 /*EMPTY*/
12628 12332 } else if (!un->un_f_enable_rmw &&
12629 12333 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) {
12630 12334 bp->b_flags |= B_ERROR;
12631 12335 goto error_exit;
12632 12336 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) {
12633 12337 mutex_enter(SD_MUTEX(un));
12634 12338 if (!un->un_f_enable_rmw &&
12635 12339 un->un_rmw_msg_timeid == NULL) {
12636 12340 scsi_log(SD_DEVINFO(un), sd_label,
12637 12341 CE_WARN, "I/O request is not "
12638 12342 "aligned with %d disk sector size. "
12639 12343 "It is handled through Read Modify "
12640 12344 "Write but the performance is "
12641 12345 "very low.\n",
12642 12346 un->un_tgt_blocksize);
12643 12347 un->un_rmw_msg_timeid =
12644 12348 timeout(sd_rmw_msg_print_handler,
12645 12349 un, SD_RMW_MSG_PRINT_TIMEOUT);
12646 12350 } else {
12647 12351 un->un_rmw_incre_count ++;
12648 12352 }
12649 12353 mutex_exit(SD_MUTEX(un));
12650 12354 }
12651 12355
12652 12356 nblocks = SD_TGT2SYSBLOCK(un, nblocks);
12653 12357 partition_offset = SD_TGT2SYSBLOCK(un,
12654 12358 partition_offset);
12655 12359 }
12656 12360 }
12657 12361
12658 12362 /*
12659 12363 * blocknum is the starting block number of the request. At this
12660 12364 * point it is still relative to the start of the minor device.
12661 12365 */
12662 12366 blocknum = xp->xb_blkno;
12663 12367
12664 12368 /*
12665 12369 * Legacy: If the starting block number is one past the last block
12666 12370 * in the partition, do not set B_ERROR in the buf.
12667 12371 */
12668 12372 if (blocknum == nblocks) {
12669 12373 goto error_exit;
12670 12374 }
12671 12375
12672 12376 /*
12673 12377 * Confirm that the first block of the request lies within the
12674 12378 * partition limits. Also the requested number of bytes must be
12675 12379 * a multiple of the system block size.
12676 12380 */
12677 12381 if ((blocknum < 0) || (blocknum >= nblocks) ||
12678 12382 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) {
12679 12383 bp->b_flags |= B_ERROR;
12680 12384 goto error_exit;
12681 12385 }
12682 12386
12683 12387 /*
12684 12388 * If the requsted # blocks exceeds the available # blocks, that
12685 12389 * is an overrun of the partition.
12686 12390 */
12687 12391 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12688 12392 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
12689 12393 } else {
12690 12394 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount);
12691 12395 }
12692 12396
12693 12397 available_nblocks = (size_t)(nblocks - blocknum);
12694 12398 ASSERT(nblocks >= blocknum);
12695 12399
12696 12400 if (requested_nblocks > available_nblocks) {
12697 12401 size_t resid;
12698 12402
12699 12403 /*
12700 12404 * Allocate an "overrun" buf to allow the request to proceed
12701 12405 * for the amount of space available in the partition. The
12702 12406 * amount not transferred will be added into the b_resid
12703 12407 * when the operation is complete. The overrun buf
12704 12408 * replaces the original buf here, and the original buf
12705 12409 * is saved inside the overrun buf, for later use.
12706 12410 */
12707 12411 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12708 12412 resid = SD_TGTBLOCKS2BYTES(un,
12709 12413 (offset_t)(requested_nblocks - available_nblocks));
12710 12414 } else {
12711 12415 resid = SD_SYSBLOCKS2BYTES(
12712 12416 (offset_t)(requested_nblocks - available_nblocks));
12713 12417 }
12714 12418
12715 12419 size_t count = bp->b_bcount - resid;
12716 12420 /*
12717 12421 * Note: count is an unsigned entity thus it'll NEVER
12718 12422 * be less than 0 so ASSERT the original values are
12719 12423 * correct.
12720 12424 */
12721 12425 ASSERT(bp->b_bcount >= resid);
12722 12426
12723 12427 bp = sd_bioclone_alloc(bp, count, blocknum,
12724 12428 (int (*)(struct buf *)) sd_mapblockaddr_iodone);
12725 12429 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */
12726 12430 ASSERT(xp != NULL);
12727 12431 }
12728 12432
12729 12433 /* At this point there should be no residual for this buf. */
12730 12434 ASSERT(bp->b_resid == 0);
12731 12435
12732 12436 /* Convert the block number to an absolute address. */
12733 12437 xp->xb_blkno += partition_offset;
12734 12438
12735 12439 SD_NEXT_IOSTART(index, un, bp);
12736 12440
12737 12441 SD_TRACE(SD_LOG_IO_PARTITION, un,
12738 12442 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp);
12739 12443
12740 12444 return;
12741 12445
12742 12446 error_exit:
12743 12447 bp->b_resid = bp->b_bcount;
12744 12448 SD_BEGIN_IODONE(index, un, bp);
12745 12449 SD_TRACE(SD_LOG_IO_PARTITION, un,
12746 12450 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp);
12747 12451 }
12748 12452
12749 12453
12750 12454 /*
12751 12455 * Function: sd_mapblockaddr_iodone
12752 12456 *
12753 12457 * Description: Completion-side processing for partition management.
12754 12458 *
12755 12459 * Context: May be called under interrupt context
12756 12460 */
12757 12461
12758 12462 static void
12759 12463 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp)
12760 12464 {
12761 12465 /* int partition; */ /* Not used, see below. */
12762 12466 ASSERT(un != NULL);
12763 12467 ASSERT(bp != NULL);
12764 12468 ASSERT(!mutex_owned(SD_MUTEX(un)));
12765 12469
12766 12470 SD_TRACE(SD_LOG_IO_PARTITION, un,
12767 12471 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp);
12768 12472
12769 12473 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) {
12770 12474 /*
12771 12475 * We have an "overrun" buf to deal with...
12772 12476 */
12773 12477 struct sd_xbuf *xp;
12774 12478 struct buf *obp; /* ptr to the original buf */
12775 12479
12776 12480 xp = SD_GET_XBUF(bp);
12777 12481 ASSERT(xp != NULL);
12778 12482
12779 12483 /* Retrieve the pointer to the original buf */
12780 12484 obp = (struct buf *)xp->xb_private;
12781 12485 ASSERT(obp != NULL);
12782 12486
12783 12487 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid);
12784 12488 bioerror(obp, bp->b_error);
12785 12489
12786 12490 sd_bioclone_free(bp);
12787 12491
12788 12492 /*
12789 12493 * Get back the original buf.
12790 12494 * Note that since the restoration of xb_blkno below
12791 12495 * was removed, the sd_xbuf is not needed.
12792 12496 */
12793 12497 bp = obp;
12794 12498 /*
12795 12499 * xp = SD_GET_XBUF(bp);
12796 12500 * ASSERT(xp != NULL);
12797 12501 */
12798 12502 }
12799 12503
12800 12504 /*
12801 12505 * Convert sd->xb_blkno back to a minor-device relative value.
12802 12506 * Note: this has been commented out, as it is not needed in the
12803 12507 * current implementation of the driver (ie, since this function
12804 12508 * is at the top of the layering chains, so the info will be
12805 12509 * discarded) and it is in the "hot" IO path.
12806 12510 *
12807 12511 * partition = getminor(bp->b_edev) & SDPART_MASK;
12808 12512 * xp->xb_blkno -= un->un_offset[partition];
12809 12513 */
12810 12514
12811 12515 SD_NEXT_IODONE(index, un, bp);
12812 12516
12813 12517 SD_TRACE(SD_LOG_IO_PARTITION, un,
12814 12518 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp);
12815 12519 }
12816 12520
12817 12521
12818 12522 /*
12819 12523 * Function: sd_mapblocksize_iostart
12820 12524 *
12821 12525 * Description: Convert between system block size (un->un_sys_blocksize)
12822 12526 * and target block size (un->un_tgt_blocksize).
12823 12527 *
12824 12528 * Context: Can sleep to allocate resources.
12825 12529 *
12826 12530 * Assumptions: A higher layer has already performed any partition validation,
12827 12531 * and converted the xp->xb_blkno to an absolute value relative
12828 12532 * to the start of the device.
12829 12533 *
12830 12534 * It is also assumed that the higher layer has implemented
12831 12535 * an "overrun" mechanism for the case where the request would
12832 12536 * read/write beyond the end of a partition. In this case we
12833 12537 * assume (and ASSERT) that bp->b_resid == 0.
12834 12538 *
12835 12539 * Note: The implementation for this routine assumes the target
12836 12540 * block size remains constant between allocation and transport.
12837 12541 */
12838 12542
12839 12543 static void
12840 12544 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp)
12841 12545 {
12842 12546 struct sd_mapblocksize_info *bsp;
12843 12547 struct sd_xbuf *xp;
12844 12548 offset_t first_byte;
12845 12549 daddr_t start_block, end_block;
12846 12550 daddr_t request_bytes;
12847 12551 ushort_t is_aligned = FALSE;
12848 12552
12849 12553 ASSERT(un != NULL);
12850 12554 ASSERT(bp != NULL);
12851 12555 ASSERT(!mutex_owned(SD_MUTEX(un)));
12852 12556 ASSERT(bp->b_resid == 0);
12853 12557
12854 12558 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
12855 12559 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp);
12856 12560
12857 12561 /*
12858 12562 * For a non-writable CD, a write request is an error
12859 12563 */
12860 12564 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) &&
12861 12565 (un->un_f_mmc_writable_media == FALSE)) {
12862 12566 bioerror(bp, EIO);
12863 12567 bp->b_resid = bp->b_bcount;
12864 12568 SD_BEGIN_IODONE(index, un, bp);
12865 12569 return;
12866 12570 }
12867 12571
|
↓ open down ↓ |
377 lines elided |
↑ open up ↑ |
12868 12572 /*
12869 12573 * We do not need a shadow buf if the device is using
12870 12574 * un->un_sys_blocksize as its block size or if bcount == 0.
12871 12575 * In this case there is no layer-private data block allocated.
12872 12576 */
12873 12577 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
12874 12578 (bp->b_bcount == 0)) {
12875 12579 goto done;
12876 12580 }
12877 12581
12878 -#if defined(__i386) || defined(__amd64)
12879 12582 /* We do not support non-block-aligned transfers for ROD devices */
12880 12583 ASSERT(!ISROD(un));
12881 -#endif
12882 12584
12883 12585 xp = SD_GET_XBUF(bp);
12884 12586 ASSERT(xp != NULL);
12885 12587
12886 12588 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12887 12589 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12888 12590 un->un_tgt_blocksize, DEV_BSIZE);
12889 12591 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12890 12592 "request start block:0x%x\n", xp->xb_blkno);
12891 12593 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12892 12594 "request len:0x%x\n", bp->b_bcount);
12893 12595
12894 12596 /*
12895 12597 * Allocate the layer-private data area for the mapblocksize layer.
12896 12598 * Layers are allowed to use the xp_private member of the sd_xbuf
12897 12599 * struct to store the pointer to their layer-private data block, but
12898 12600 * each layer also has the responsibility of restoring the prior
12899 12601 * contents of xb_private before returning the buf/xbuf to the
12900 12602 * higher layer that sent it.
12901 12603 *
12902 12604 * Here we save the prior contents of xp->xb_private into the
12903 12605 * bsp->mbs_oprivate field of our layer-private data area. This value
12904 12606 * is restored by sd_mapblocksize_iodone() just prior to freeing up
12905 12607 * the layer-private area and returning the buf/xbuf to the layer
12906 12608 * that sent it.
12907 12609 *
12908 12610 * Note that here we use kmem_zalloc for the allocation as there are
12909 12611 * parts of the mapblocksize code that expect certain fields to be
12910 12612 * zero unless explicitly set to a required value.
12911 12613 */
12912 12614 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
12913 12615 bsp->mbs_oprivate = xp->xb_private;
12914 12616 xp->xb_private = bsp;
12915 12617
12916 12618 /*
12917 12619 * This treats the data on the disk (target) as an array of bytes.
12918 12620 * first_byte is the byte offset, from the beginning of the device,
12919 12621 * to the location of the request. This is converted from a
12920 12622 * un->un_sys_blocksize block address to a byte offset, and then back
12921 12623 * to a block address based upon a un->un_tgt_blocksize block size.
12922 12624 *
12923 12625 * xp->xb_blkno should be absolute upon entry into this function,
12924 12626 * but, but it is based upon partitions that use the "system"
12925 12627 * block size. It must be adjusted to reflect the block size of
12926 12628 * the target.
12927 12629 *
12928 12630 * Note that end_block is actually the block that follows the last
12929 12631 * block of the request, but that's what is needed for the computation.
12930 12632 */
12931 12633 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
12932 12634 if (un->un_f_enable_rmw) {
12933 12635 start_block = xp->xb_blkno =
12934 12636 (first_byte / un->un_phy_blocksize) *
12935 12637 (un->un_phy_blocksize / DEV_BSIZE);
12936 12638 end_block = ((first_byte + bp->b_bcount +
12937 12639 un->un_phy_blocksize - 1) / un->un_phy_blocksize) *
12938 12640 (un->un_phy_blocksize / DEV_BSIZE);
12939 12641 } else {
12940 12642 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize;
12941 12643 end_block = (first_byte + bp->b_bcount +
12942 12644 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
12943 12645 }
12944 12646
12945 12647 /* request_bytes is rounded up to a multiple of the target block size */
12946 12648 request_bytes = (end_block - start_block) * un->un_tgt_blocksize;
12947 12649
12948 12650 /*
12949 12651 * See if the starting address of the request and the request
12950 12652 * length are aligned on a un->un_tgt_blocksize boundary. If aligned
12951 12653 * then we do not need to allocate a shadow buf to handle the request.
12952 12654 */
12953 12655 if (un->un_f_enable_rmw) {
12954 12656 if (((first_byte % un->un_phy_blocksize) == 0) &&
12955 12657 ((bp->b_bcount % un->un_phy_blocksize) == 0)) {
12956 12658 is_aligned = TRUE;
12957 12659 }
12958 12660 } else {
12959 12661 if (((first_byte % un->un_tgt_blocksize) == 0) &&
12960 12662 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) {
12961 12663 is_aligned = TRUE;
12962 12664 }
12963 12665 }
12964 12666
12965 12667 if ((bp->b_flags & B_READ) == 0) {
12966 12668 /*
12967 12669 * Lock the range for a write operation. An aligned request is
12968 12670 * considered a simple write; otherwise the request must be a
12969 12671 * read-modify-write.
12970 12672 */
12971 12673 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1,
12972 12674 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW);
12973 12675 }
12974 12676
12975 12677 /*
12976 12678 * Alloc a shadow buf if the request is not aligned. Also, this is
12977 12679 * where the READ command is generated for a read-modify-write. (The
12978 12680 * write phase is deferred until after the read completes.)
12979 12681 */
12980 12682 if (is_aligned == FALSE) {
12981 12683
12982 12684 struct sd_mapblocksize_info *shadow_bsp;
12983 12685 struct sd_xbuf *shadow_xp;
12984 12686 struct buf *shadow_bp;
12985 12687
12986 12688 /*
12987 12689 * Allocate the shadow buf and it associated xbuf. Note that
12988 12690 * after this call the xb_blkno value in both the original
12989 12691 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the
12990 12692 * same: absolute relative to the start of the device, and
12991 12693 * adjusted for the target block size. The b_blkno in the
12992 12694 * shadow buf will also be set to this value. We should never
12993 12695 * change b_blkno in the original bp however.
12994 12696 *
12995 12697 * Note also that the shadow buf will always need to be a
12996 12698 * READ command, regardless of whether the incoming command
12997 12699 * is a READ or a WRITE.
12998 12700 */
12999 12701 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ,
13000 12702 xp->xb_blkno,
13001 12703 (int (*)(struct buf *)) sd_mapblocksize_iodone);
13002 12704
13003 12705 shadow_xp = SD_GET_XBUF(shadow_bp);
13004 12706
13005 12707 /*
13006 12708 * Allocate the layer-private data for the shadow buf.
13007 12709 * (No need to preserve xb_private in the shadow xbuf.)
13008 12710 */
13009 12711 shadow_xp->xb_private = shadow_bsp =
13010 12712 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
13011 12713
13012 12714 /*
13013 12715 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone
13014 12716 * to figure out where the start of the user data is (based upon
13015 12717 * the system block size) in the data returned by the READ
13016 12718 * command (which will be based upon the target blocksize). Note
13017 12719 * that this is only really used if the request is unaligned.
13018 12720 */
13019 12721 if (un->un_f_enable_rmw) {
13020 12722 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13021 12723 ((offset_t)xp->xb_blkno * un->un_sys_blocksize));
13022 12724 ASSERT((bsp->mbs_copy_offset >= 0) &&
13023 12725 (bsp->mbs_copy_offset < un->un_phy_blocksize));
13024 12726 } else {
13025 12727 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13026 12728 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize));
13027 12729 ASSERT((bsp->mbs_copy_offset >= 0) &&
13028 12730 (bsp->mbs_copy_offset < un->un_tgt_blocksize));
13029 12731 }
13030 12732
13031 12733 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset;
13032 12734
13033 12735 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index;
13034 12736
13035 12737 /* Transfer the wmap (if any) to the shadow buf */
13036 12738 shadow_bsp->mbs_wmp = bsp->mbs_wmp;
13037 12739 bsp->mbs_wmp = NULL;
13038 12740
13039 12741 /*
13040 12742 * The shadow buf goes on from here in place of the
13041 12743 * original buf.
13042 12744 */
13043 12745 shadow_bsp->mbs_orig_bp = bp;
13044 12746 bp = shadow_bp;
13045 12747 }
13046 12748
13047 12749 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13048 12750 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno);
13049 12751 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13050 12752 "sd_mapblocksize_iostart: tgt request len:0x%x\n",
13051 12753 request_bytes);
13052 12754 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13053 12755 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp);
13054 12756
13055 12757 done:
13056 12758 SD_NEXT_IOSTART(index, un, bp);
13057 12759
13058 12760 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13059 12761 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp);
13060 12762 }
13061 12763
13062 12764
13063 12765 /*
13064 12766 * Function: sd_mapblocksize_iodone
13065 12767 *
13066 12768 * Description: Completion side processing for block-size mapping.
13067 12769 *
13068 12770 * Context: May be called under interrupt context
13069 12771 */
13070 12772
13071 12773 static void
13072 12774 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp)
13073 12775 {
13074 12776 struct sd_mapblocksize_info *bsp;
13075 12777 struct sd_xbuf *xp;
13076 12778 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */
13077 12779 struct buf *orig_bp; /* ptr to the original buf */
13078 12780 offset_t shadow_end;
13079 12781 offset_t request_end;
13080 12782 offset_t shadow_start;
13081 12783 ssize_t copy_offset;
13082 12784 size_t copy_length;
13083 12785 size_t shortfall;
13084 12786 uint_t is_write; /* TRUE if this bp is a WRITE */
13085 12787 uint_t has_wmap; /* TRUE is this bp has a wmap */
13086 12788
13087 12789 ASSERT(un != NULL);
13088 12790 ASSERT(bp != NULL);
13089 12791
13090 12792 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13091 12793 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp);
13092 12794
13093 12795 /*
13094 12796 * There is no shadow buf or layer-private data if the target is
13095 12797 * using un->un_sys_blocksize as its block size or if bcount == 0.
13096 12798 */
13097 12799 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
13098 12800 (bp->b_bcount == 0)) {
13099 12801 goto exit;
13100 12802 }
13101 12803
13102 12804 xp = SD_GET_XBUF(bp);
13103 12805 ASSERT(xp != NULL);
13104 12806
13105 12807 /* Retrieve the pointer to the layer-private data area from the xbuf. */
13106 12808 bsp = xp->xb_private;
13107 12809
13108 12810 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE;
13109 12811 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE;
13110 12812
13111 12813 if (is_write) {
13112 12814 /*
13113 12815 * For a WRITE request we must free up the block range that
13114 12816 * we have locked up. This holds regardless of whether this is
13115 12817 * an aligned write request or a read-modify-write request.
13116 12818 */
13117 12819 sd_range_unlock(un, bsp->mbs_wmp);
13118 12820 bsp->mbs_wmp = NULL;
13119 12821 }
13120 12822
13121 12823 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) {
13122 12824 /*
13123 12825 * An aligned read or write command will have no shadow buf;
13124 12826 * there is not much else to do with it.
13125 12827 */
13126 12828 goto done;
13127 12829 }
13128 12830
13129 12831 orig_bp = bsp->mbs_orig_bp;
13130 12832 ASSERT(orig_bp != NULL);
13131 12833 orig_xp = SD_GET_XBUF(orig_bp);
13132 12834 ASSERT(orig_xp != NULL);
13133 12835 ASSERT(!mutex_owned(SD_MUTEX(un)));
13134 12836
13135 12837 if (!is_write && has_wmap) {
13136 12838 /*
13137 12839 * A READ with a wmap means this is the READ phase of a
13138 12840 * read-modify-write. If an error occurred on the READ then
13139 12841 * we do not proceed with the WRITE phase or copy any data.
13140 12842 * Just release the write maps and return with an error.
13141 12843 */
13142 12844 if ((bp->b_resid != 0) || (bp->b_error != 0)) {
13143 12845 orig_bp->b_resid = orig_bp->b_bcount;
13144 12846 bioerror(orig_bp, bp->b_error);
13145 12847 sd_range_unlock(un, bsp->mbs_wmp);
13146 12848 goto freebuf_done;
13147 12849 }
13148 12850 }
13149 12851
13150 12852 /*
13151 12853 * Here is where we set up to copy the data from the shadow buf
13152 12854 * into the space associated with the original buf.
13153 12855 *
13154 12856 * To deal with the conversion between block sizes, these
13155 12857 * computations treat the data as an array of bytes, with the
13156 12858 * first byte (byte 0) corresponding to the first byte in the
13157 12859 * first block on the disk.
13158 12860 */
13159 12861
13160 12862 /*
13161 12863 * shadow_start and shadow_len indicate the location and size of
13162 12864 * the data returned with the shadow IO request.
13163 12865 */
13164 12866 if (un->un_f_enable_rmw) {
13165 12867 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
13166 12868 } else {
13167 12869 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno);
13168 12870 }
13169 12871 shadow_end = shadow_start + bp->b_bcount - bp->b_resid;
13170 12872
13171 12873 /*
13172 12874 * copy_offset gives the offset (in bytes) from the start of the first
13173 12875 * block of the READ request to the beginning of the data. We retrieve
13174 12876 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved
13175 12877 * there by sd_mapblockize_iostart(). copy_length gives the amount of
13176 12878 * data to be copied (in bytes).
13177 12879 */
13178 12880 copy_offset = bsp->mbs_copy_offset;
13179 12881 if (un->un_f_enable_rmw) {
13180 12882 ASSERT((copy_offset >= 0) &&
13181 12883 (copy_offset < un->un_phy_blocksize));
13182 12884 } else {
13183 12885 ASSERT((copy_offset >= 0) &&
13184 12886 (copy_offset < un->un_tgt_blocksize));
13185 12887 }
13186 12888
13187 12889 copy_length = orig_bp->b_bcount;
13188 12890 request_end = shadow_start + copy_offset + orig_bp->b_bcount;
13189 12891
13190 12892 /*
13191 12893 * Set up the resid and error fields of orig_bp as appropriate.
13192 12894 */
13193 12895 if (shadow_end >= request_end) {
13194 12896 /* We got all the requested data; set resid to zero */
13195 12897 orig_bp->b_resid = 0;
13196 12898 } else {
13197 12899 /*
13198 12900 * We failed to get enough data to fully satisfy the original
13199 12901 * request. Just copy back whatever data we got and set
13200 12902 * up the residual and error code as required.
13201 12903 *
13202 12904 * 'shortfall' is the amount by which the data received with the
13203 12905 * shadow buf has "fallen short" of the requested amount.
13204 12906 */
13205 12907 shortfall = (size_t)(request_end - shadow_end);
13206 12908
13207 12909 if (shortfall > orig_bp->b_bcount) {
13208 12910 /*
13209 12911 * We did not get enough data to even partially
13210 12912 * fulfill the original request. The residual is
13211 12913 * equal to the amount requested.
13212 12914 */
13213 12915 orig_bp->b_resid = orig_bp->b_bcount;
13214 12916 } else {
13215 12917 /*
13216 12918 * We did not get all the data that we requested
13217 12919 * from the device, but we will try to return what
13218 12920 * portion we did get.
13219 12921 */
13220 12922 orig_bp->b_resid = shortfall;
13221 12923 }
13222 12924 ASSERT(copy_length >= orig_bp->b_resid);
13223 12925 copy_length -= orig_bp->b_resid;
13224 12926 }
13225 12927
13226 12928 /* Propagate the error code from the shadow buf to the original buf */
13227 12929 bioerror(orig_bp, bp->b_error);
13228 12930
13229 12931 if (is_write) {
13230 12932 goto freebuf_done; /* No data copying for a WRITE */
13231 12933 }
13232 12934
13233 12935 if (has_wmap) {
13234 12936 /*
13235 12937 * This is a READ command from the READ phase of a
13236 12938 * read-modify-write request. We have to copy the data given
13237 12939 * by the user OVER the data returned by the READ command,
13238 12940 * then convert the command from a READ to a WRITE and send
13239 12941 * it back to the target.
13240 12942 */
13241 12943 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset,
13242 12944 copy_length);
13243 12945
13244 12946 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */
13245 12947
13246 12948 /*
13247 12949 * Dispatch the WRITE command to the taskq thread, which
13248 12950 * will in turn send the command to the target. When the
13249 12951 * WRITE command completes, we (sd_mapblocksize_iodone())
13250 12952 * will get called again as part of the iodone chain
13251 12953 * processing for it. Note that we will still be dealing
13252 12954 * with the shadow buf at that point.
13253 12955 */
13254 12956 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp,
13255 12957 KM_NOSLEEP) != 0) {
13256 12958 /*
13257 12959 * Dispatch was successful so we are done. Return
13258 12960 * without going any higher up the iodone chain. Do
13259 12961 * not free up any layer-private data until after the
13260 12962 * WRITE completes.
13261 12963 */
13262 12964 return;
13263 12965 }
13264 12966
13265 12967 /*
13266 12968 * Dispatch of the WRITE command failed; set up the error
13267 12969 * condition and send this IO back up the iodone chain.
13268 12970 */
13269 12971 bioerror(orig_bp, EIO);
13270 12972 orig_bp->b_resid = orig_bp->b_bcount;
13271 12973
13272 12974 } else {
13273 12975 /*
13274 12976 * This is a regular READ request (ie, not a RMW). Copy the
13275 12977 * data from the shadow buf into the original buf. The
13276 12978 * copy_offset compensates for any "misalignment" between the
13277 12979 * shadow buf (with its un->un_tgt_blocksize blocks) and the
13278 12980 * original buf (with its un->un_sys_blocksize blocks).
13279 12981 */
13280 12982 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr,
13281 12983 copy_length);
13282 12984 }
13283 12985
13284 12986 freebuf_done:
13285 12987
13286 12988 /*
13287 12989 * At this point we still have both the shadow buf AND the original
13288 12990 * buf to deal with, as well as the layer-private data area in each.
13289 12991 * Local variables are as follows:
13290 12992 *
13291 12993 * bp -- points to shadow buf
13292 12994 * xp -- points to xbuf of shadow buf
13293 12995 * bsp -- points to layer-private data area of shadow buf
13294 12996 * orig_bp -- points to original buf
13295 12997 *
13296 12998 * First free the shadow buf and its associated xbuf, then free the
13297 12999 * layer-private data area from the shadow buf. There is no need to
13298 13000 * restore xb_private in the shadow xbuf.
13299 13001 */
13300 13002 sd_shadow_buf_free(bp);
13301 13003 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13302 13004
13303 13005 /*
13304 13006 * Now update the local variables to point to the original buf, xbuf,
13305 13007 * and layer-private area.
13306 13008 */
13307 13009 bp = orig_bp;
13308 13010 xp = SD_GET_XBUF(bp);
13309 13011 ASSERT(xp != NULL);
13310 13012 ASSERT(xp == orig_xp);
13311 13013 bsp = xp->xb_private;
13312 13014 ASSERT(bsp != NULL);
13313 13015
13314 13016 done:
13315 13017 /*
13316 13018 * Restore xb_private to whatever it was set to by the next higher
13317 13019 * layer in the chain, then free the layer-private data area.
13318 13020 */
13319 13021 xp->xb_private = bsp->mbs_oprivate;
13320 13022 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13321 13023
13322 13024 exit:
13323 13025 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp),
13324 13026 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp);
13325 13027
13326 13028 SD_NEXT_IODONE(index, un, bp);
13327 13029 }
13328 13030
13329 13031
13330 13032 /*
13331 13033 * Function: sd_checksum_iostart
13332 13034 *
13333 13035 * Description: A stub function for a layer that's currently not used.
13334 13036 * For now just a placeholder.
13335 13037 *
13336 13038 * Context: Kernel thread context
13337 13039 */
13338 13040
13339 13041 static void
13340 13042 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp)
13341 13043 {
13342 13044 ASSERT(un != NULL);
13343 13045 ASSERT(bp != NULL);
13344 13046 ASSERT(!mutex_owned(SD_MUTEX(un)));
13345 13047 SD_NEXT_IOSTART(index, un, bp);
13346 13048 }
13347 13049
13348 13050
13349 13051 /*
13350 13052 * Function: sd_checksum_iodone
13351 13053 *
13352 13054 * Description: A stub function for a layer that's currently not used.
13353 13055 * For now just a placeholder.
13354 13056 *
13355 13057 * Context: May be called under interrupt context
13356 13058 */
13357 13059
13358 13060 static void
13359 13061 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp)
13360 13062 {
13361 13063 ASSERT(un != NULL);
13362 13064 ASSERT(bp != NULL);
13363 13065 ASSERT(!mutex_owned(SD_MUTEX(un)));
13364 13066 SD_NEXT_IODONE(index, un, bp);
13365 13067 }
13366 13068
13367 13069
13368 13070 /*
13369 13071 * Function: sd_checksum_uscsi_iostart
13370 13072 *
13371 13073 * Description: A stub function for a layer that's currently not used.
13372 13074 * For now just a placeholder.
13373 13075 *
13374 13076 * Context: Kernel thread context
13375 13077 */
13376 13078
13377 13079 static void
13378 13080 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp)
13379 13081 {
13380 13082 ASSERT(un != NULL);
13381 13083 ASSERT(bp != NULL);
13382 13084 ASSERT(!mutex_owned(SD_MUTEX(un)));
13383 13085 SD_NEXT_IOSTART(index, un, bp);
13384 13086 }
13385 13087
13386 13088
13387 13089 /*
13388 13090 * Function: sd_checksum_uscsi_iodone
13389 13091 *
13390 13092 * Description: A stub function for a layer that's currently not used.
13391 13093 * For now just a placeholder.
13392 13094 *
13393 13095 * Context: May be called under interrupt context
13394 13096 */
13395 13097
13396 13098 static void
13397 13099 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
13398 13100 {
13399 13101 ASSERT(un != NULL);
13400 13102 ASSERT(bp != NULL);
13401 13103 ASSERT(!mutex_owned(SD_MUTEX(un)));
13402 13104 SD_NEXT_IODONE(index, un, bp);
13403 13105 }
13404 13106
13405 13107
13406 13108 /*
13407 13109 * Function: sd_pm_iostart
13408 13110 *
13409 13111 * Description: iostart-side routine for Power mangement.
13410 13112 *
13411 13113 * Context: Kernel thread context
13412 13114 */
13413 13115
13414 13116 static void
13415 13117 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp)
13416 13118 {
13417 13119 ASSERT(un != NULL);
13418 13120 ASSERT(bp != NULL);
13419 13121 ASSERT(!mutex_owned(SD_MUTEX(un)));
13420 13122 ASSERT(!mutex_owned(&un->un_pm_mutex));
13421 13123
13422 13124 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n");
13423 13125
13424 13126 if (sd_pm_entry(un) != DDI_SUCCESS) {
13425 13127 /*
13426 13128 * Set up to return the failed buf back up the 'iodone'
13427 13129 * side of the calling chain.
13428 13130 */
13429 13131 bioerror(bp, EIO);
13430 13132 bp->b_resid = bp->b_bcount;
13431 13133
13432 13134 SD_BEGIN_IODONE(index, un, bp);
13433 13135
13434 13136 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13435 13137 return;
13436 13138 }
13437 13139
13438 13140 SD_NEXT_IOSTART(index, un, bp);
13439 13141
13440 13142 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13441 13143 }
13442 13144
13443 13145
13444 13146 /*
13445 13147 * Function: sd_pm_iodone
13446 13148 *
13447 13149 * Description: iodone-side routine for power mangement.
13448 13150 *
13449 13151 * Context: may be called from interrupt context
13450 13152 */
13451 13153
13452 13154 static void
13453 13155 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp)
13454 13156 {
13455 13157 ASSERT(un != NULL);
13456 13158 ASSERT(bp != NULL);
13457 13159 ASSERT(!mutex_owned(&un->un_pm_mutex));
13458 13160
13459 13161 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n");
13460 13162
13461 13163 /*
13462 13164 * After attach the following flag is only read, so don't
13463 13165 * take the penalty of acquiring a mutex for it.
13464 13166 */
13465 13167 if (un->un_f_pm_is_enabled == TRUE) {
13466 13168 sd_pm_exit(un);
13467 13169 }
13468 13170
13469 13171 SD_NEXT_IODONE(index, un, bp);
13470 13172
13471 13173 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n");
13472 13174 }
13473 13175
13474 13176
13475 13177 /*
13476 13178 * Function: sd_core_iostart
13477 13179 *
13478 13180 * Description: Primary driver function for enqueuing buf(9S) structs from
13479 13181 * the system and initiating IO to the target device
13480 13182 *
13481 13183 * Context: Kernel thread context. Can sleep.
13482 13184 *
13483 13185 * Assumptions: - The given xp->xb_blkno is absolute
13484 13186 * (ie, relative to the start of the device).
13485 13187 * - The IO is to be done using the native blocksize of
13486 13188 * the device, as specified in un->un_tgt_blocksize.
13487 13189 */
13488 13190 /* ARGSUSED */
13489 13191 static void
13490 13192 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp)
13491 13193 {
13492 13194 struct sd_xbuf *xp;
13493 13195
13494 13196 ASSERT(un != NULL);
13495 13197 ASSERT(bp != NULL);
13496 13198 ASSERT(!mutex_owned(SD_MUTEX(un)));
13497 13199 ASSERT(bp->b_resid == 0);
13498 13200
13499 13201 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp);
13500 13202
13501 13203 xp = SD_GET_XBUF(bp);
13502 13204 ASSERT(xp != NULL);
13503 13205
13504 13206 mutex_enter(SD_MUTEX(un));
13505 13207
13506 13208 /*
13507 13209 * If we are currently in the failfast state, fail any new IO
13508 13210 * that has B_FAILFAST set, then return.
13509 13211 */
13510 13212 if ((bp->b_flags & B_FAILFAST) &&
13511 13213 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) {
13512 13214 mutex_exit(SD_MUTEX(un));
13513 13215 bioerror(bp, EIO);
13514 13216 bp->b_resid = bp->b_bcount;
13515 13217 SD_BEGIN_IODONE(index, un, bp);
13516 13218 return;
13517 13219 }
13518 13220
13519 13221 if (SD_IS_DIRECT_PRIORITY(xp)) {
13520 13222 /*
13521 13223 * Priority command -- transport it immediately.
13522 13224 *
13523 13225 * Note: We may want to assert that USCSI_DIAGNOSE is set,
13524 13226 * because all direct priority commands should be associated
13525 13227 * with error recovery actions which we don't want to retry.
13526 13228 */
13527 13229 sd_start_cmds(un, bp);
13528 13230 } else {
13529 13231 /*
13530 13232 * Normal command -- add it to the wait queue, then start
13531 13233 * transporting commands from the wait queue.
13532 13234 */
13533 13235 sd_add_buf_to_waitq(un, bp);
13534 13236 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
13535 13237 sd_start_cmds(un, NULL);
13536 13238 }
13537 13239
13538 13240 mutex_exit(SD_MUTEX(un));
13539 13241
13540 13242 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp);
13541 13243 }
13542 13244
13543 13245
13544 13246 /*
13545 13247 * Function: sd_init_cdb_limits
13546 13248 *
13547 13249 * Description: This is to handle scsi_pkt initialization differences
13548 13250 * between the driver platforms.
13549 13251 *
13550 13252 * Legacy behaviors:
13551 13253 *
13552 13254 * If the block number or the sector count exceeds the
13553 13255 * capabilities of a Group 0 command, shift over to a
13554 13256 * Group 1 command. We don't blindly use Group 1
13555 13257 * commands because a) some drives (CDC Wren IVs) get a
13556 13258 * bit confused, and b) there is probably a fair amount
13557 13259 * of speed difference for a target to receive and decode
13558 13260 * a 10 byte command instead of a 6 byte command.
13559 13261 *
13560 13262 * The xfer time difference of 6 vs 10 byte CDBs is
13561 13263 * still significant so this code is still worthwhile.
13562 13264 * 10 byte CDBs are very inefficient with the fas HBA driver
13563 13265 * and older disks. Each CDB byte took 1 usec with some
13564 13266 * popular disks.
13565 13267 *
13566 13268 * Context: Must be called at attach time
13567 13269 */
13568 13270
13569 13271 static void
|
↓ open down ↓ |
678 lines elided |
↑ open up ↑ |
13570 13272 sd_init_cdb_limits(struct sd_lun *un)
13571 13273 {
13572 13274 int hba_cdb_limit;
13573 13275
13574 13276 /*
13575 13277 * Use CDB_GROUP1 commands for most devices except for
13576 13278 * parallel SCSI fixed drives in which case we get better
13577 13279 * performance using CDB_GROUP0 commands (where applicable).
13578 13280 */
13579 13281 un->un_mincdb = SD_CDB_GROUP1;
13580 -#if !defined(__fibre)
13581 13282 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
13582 13283 !un->un_f_has_removable_media) {
13583 13284 un->un_mincdb = SD_CDB_GROUP0;
13584 13285 }
13585 -#endif
13586 13286
13587 13287 /*
13588 13288 * Try to read the max-cdb-length supported by HBA.
13589 13289 */
13590 13290 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
13591 13291 if (0 >= un->un_max_hba_cdb) {
13592 13292 un->un_max_hba_cdb = CDB_GROUP4;
13593 13293 hba_cdb_limit = SD_CDB_GROUP4;
13594 13294 } else if (0 < un->un_max_hba_cdb &&
13595 13295 un->un_max_hba_cdb < CDB_GROUP1) {
13596 13296 hba_cdb_limit = SD_CDB_GROUP0;
13597 13297 } else if (CDB_GROUP1 <= un->un_max_hba_cdb &&
13598 13298 un->un_max_hba_cdb < CDB_GROUP5) {
13599 13299 hba_cdb_limit = SD_CDB_GROUP1;
13600 13300 } else if (CDB_GROUP5 <= un->un_max_hba_cdb &&
13601 13301 un->un_max_hba_cdb < CDB_GROUP4) {
13602 13302 hba_cdb_limit = SD_CDB_GROUP5;
13603 13303 } else {
13604 13304 hba_cdb_limit = SD_CDB_GROUP4;
13605 13305 }
13606 13306
13607 13307 /*
13608 13308 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4
13609 13309 * commands for fixed disks unless we are building for a 32 bit
13610 13310 * kernel.
13611 13311 */
13612 13312 #ifdef _LP64
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
13613 13313 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13614 13314 min(hba_cdb_limit, SD_CDB_GROUP4);
13615 13315 #else
13616 13316 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13617 13317 min(hba_cdb_limit, SD_CDB_GROUP1);
13618 13318 #endif
13619 13319
13620 13320 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
13621 13321 ? sizeof (struct scsi_arq_status) : 1);
13622 13322 if (!ISCD(un))
13623 - un->un_cmd_timeout = (ushort_t)sd_io_time;
13323 + un->un_cmd_timeout = (ushort_t)un->un_io_time;
13624 13324 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
13625 13325 }
13626 13326
13627 13327
13628 13328 /*
13629 13329 * Function: sd_initpkt_for_buf
13630 13330 *
13631 13331 * Description: Allocate and initialize for transport a scsi_pkt struct,
13632 13332 * based upon the info specified in the given buf struct.
13633 13333 *
13634 13334 * Assumes the xb_blkno in the request is absolute (ie,
13635 13335 * relative to the start of the device (NOT partition!).
13636 13336 * Also assumes that the request is using the native block
13637 13337 * size of the device (as returned by the READ CAPACITY
13638 13338 * command).
13639 13339 *
13640 13340 * Return Code: SD_PKT_ALLOC_SUCCESS
13641 13341 * SD_PKT_ALLOC_FAILURE
13642 13342 * SD_PKT_ALLOC_FAILURE_NO_DMA
13643 13343 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13644 13344 *
13645 13345 * Context: Kernel thread and may be called from software interrupt context
13646 13346 * as part of a sdrunout callback. This function may not block or
13647 13347 * call routines that block
13648 13348 */
13649 13349
13650 13350 static int
13651 13351 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp)
13652 13352 {
13653 13353 struct sd_xbuf *xp;
13654 13354 struct scsi_pkt *pktp = NULL;
13655 13355 struct sd_lun *un;
13656 13356 size_t blockcount;
13657 13357 daddr_t startblock;
13658 13358 int rval;
13659 13359 int cmd_flags;
13660 13360
13661 13361 ASSERT(bp != NULL);
13662 13362 ASSERT(pktpp != NULL);
13663 13363 xp = SD_GET_XBUF(bp);
13664 13364 ASSERT(xp != NULL);
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
13665 13365 un = SD_GET_UN(bp);
13666 13366 ASSERT(un != NULL);
13667 13367 ASSERT(mutex_owned(SD_MUTEX(un)));
13668 13368 ASSERT(bp->b_resid == 0);
13669 13369
13670 13370 SD_TRACE(SD_LOG_IO_CORE, un,
13671 13371 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
13672 13372
13673 13373 mutex_exit(SD_MUTEX(un));
13674 13374
13675 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13676 13375 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
13677 13376 /*
13678 13377 * Already have a scsi_pkt -- just need DMA resources.
13679 13378 * We must recompute the CDB in case the mapping returns
13680 13379 * a nonzero pkt_resid.
13681 13380 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13682 13381 * that is being retried, the unmap/remap of the DMA resouces
13683 13382 * will result in the entire transfer starting over again
13684 13383 * from the very first block.
13685 13384 */
13686 13385 ASSERT(xp->xb_pktp != NULL);
13687 13386 pktp = xp->xb_pktp;
13688 13387 } else {
13689 13388 pktp = NULL;
13690 13389 }
13691 -#endif /* __i386 || __amd64 */
13692 13390
13693 13391 startblock = xp->xb_blkno; /* Absolute block num. */
13694 13392 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
13695 13393
13696 13394 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
13697 13395
13698 13396 /*
13699 13397 * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13700 13398 * call scsi_init_pkt, and build the CDB.
13701 13399 */
13702 13400 rval = sd_setup_rw_pkt(un, &pktp, bp,
13703 13401 cmd_flags, sdrunout, (caddr_t)un,
13704 13402 startblock, blockcount);
13705 13403
13706 13404 if (rval == 0) {
13707 13405 /*
13708 13406 * Success.
13709 13407 *
13710 13408 * If partial DMA is being used and required for this transfer.
13711 13409 * set it up here.
13712 13410 */
13713 13411 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 &&
13714 13412 (pktp->pkt_resid != 0)) {
13715 13413
13716 13414 /*
13717 13415 * Save the CDB length and pkt_resid for the
13718 13416 * next xfer
13719 13417 */
13720 13418 xp->xb_dma_resid = pktp->pkt_resid;
13721 13419
13722 13420 /* rezero resid */
13723 13421 pktp->pkt_resid = 0;
13724 13422
13725 13423 } else {
13726 13424 xp->xb_dma_resid = 0;
13727 13425 }
13728 13426
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
13729 13427 pktp->pkt_flags = un->un_tagflags;
13730 13428 pktp->pkt_time = un->un_cmd_timeout;
13731 13429 pktp->pkt_comp = sdintr;
13732 13430
13733 13431 pktp->pkt_private = bp;
13734 13432 *pktpp = pktp;
13735 13433
13736 13434 SD_TRACE(SD_LOG_IO_CORE, un,
13737 13435 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
13738 13436
13739 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13740 13437 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
13741 -#endif
13742 13438
13743 13439 mutex_enter(SD_MUTEX(un));
13744 13440 return (SD_PKT_ALLOC_SUCCESS);
13745 13441
13746 13442 }
13747 13443
13748 13444 /*
13749 13445 * SD_PKT_ALLOC_FAILURE is the only expected failure code
13750 13446 * from sd_setup_rw_pkt.
13751 13447 */
13752 13448 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
13753 13449
13754 13450 if (rval == SD_PKT_ALLOC_FAILURE) {
13755 13451 *pktpp = NULL;
13756 13452 /*
13757 13453 * Set the driver state to RWAIT to indicate the driver
13758 13454 * is waiting on resource allocations. The driver will not
13759 13455 * suspend, pm_suspend, or detatch while the state is RWAIT.
13760 13456 */
13761 13457 mutex_enter(SD_MUTEX(un));
13762 13458 New_state(un, SD_STATE_RWAIT);
13763 13459
13764 13460 SD_ERROR(SD_LOG_IO_CORE, un,
13765 13461 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp);
13766 13462
13767 13463 if ((bp->b_flags & B_ERROR) != 0) {
13768 13464 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
13769 13465 }
13770 13466 return (SD_PKT_ALLOC_FAILURE);
13771 13467 } else {
13772 13468 /*
13773 13469 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13774 13470 *
13775 13471 * This should never happen. Maybe someone messed with the
13776 13472 * kernel's minphys?
13777 13473 */
13778 13474 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
13779 13475 "Request rejected: too large for CDB: "
13780 13476 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount);
13781 13477 SD_ERROR(SD_LOG_IO_CORE, un,
13782 13478 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp);
13783 13479 mutex_enter(SD_MUTEX(un));
13784 13480 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13785 13481
13786 13482 }
13787 13483 }
13788 13484
13789 13485
13790 13486 /*
13791 13487 * Function: sd_destroypkt_for_buf
13792 13488 *
13793 13489 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing).
13794 13490 *
13795 13491 * Context: Kernel thread or interrupt context
13796 13492 */
13797 13493
13798 13494 static void
13799 13495 sd_destroypkt_for_buf(struct buf *bp)
13800 13496 {
13801 13497 ASSERT(bp != NULL);
13802 13498 ASSERT(SD_GET_UN(bp) != NULL);
13803 13499
13804 13500 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13805 13501 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp);
13806 13502
13807 13503 ASSERT(SD_GET_PKTP(bp) != NULL);
13808 13504 scsi_destroy_pkt(SD_GET_PKTP(bp));
13809 13505
13810 13506 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13811 13507 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp);
13812 13508 }
13813 13509
13814 13510 /*
13815 13511 * Function: sd_setup_rw_pkt
13816 13512 *
13817 13513 * Description: Determines appropriate CDB group for the requested LBA
13818 13514 * and transfer length, calls scsi_init_pkt, and builds
13819 13515 * the CDB. Do not use for partial DMA transfers except
13820 13516 * for the initial transfer since the CDB size must
13821 13517 * remain constant.
13822 13518 *
13823 13519 * Context: Kernel thread and may be called from software interrupt
13824 13520 * context as part of a sdrunout callback. This function may not
13825 13521 * block or call routines that block
13826 13522 */
13827 13523
13828 13524
13829 13525 int
13830 13526 sd_setup_rw_pkt(struct sd_lun *un,
13831 13527 struct scsi_pkt **pktpp, struct buf *bp, int flags,
13832 13528 int (*callback)(caddr_t), caddr_t callback_arg,
13833 13529 diskaddr_t lba, uint32_t blockcount)
13834 13530 {
13835 13531 struct scsi_pkt *return_pktp;
13836 13532 union scsi_cdb *cdbp;
13837 13533 struct sd_cdbinfo *cp = NULL;
13838 13534 int i;
13839 13535
13840 13536 /*
13841 13537 * See which size CDB to use, based upon the request.
13842 13538 */
13843 13539 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) {
13844 13540
13845 13541 /*
13846 13542 * Check lba and block count against sd_cdbtab limits.
13847 13543 * In the partial DMA case, we have to use the same size
13848 13544 * CDB for all the transfers. Check lba + blockcount
13849 13545 * against the max LBA so we know that segment of the
13850 13546 * transfer can use the CDB we select.
13851 13547 */
13852 13548 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) &&
13853 13549 (blockcount <= sd_cdbtab[i].sc_maxlen)) {
13854 13550
13855 13551 /*
13856 13552 * The command will fit into the CDB type
13857 13553 * specified by sd_cdbtab[i].
13858 13554 */
13859 13555 cp = sd_cdbtab + i;
13860 13556
13861 13557 /*
13862 13558 * Call scsi_init_pkt so we can fill in the
13863 13559 * CDB.
13864 13560 */
13865 13561 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp,
13866 13562 bp, cp->sc_grpcode, un->un_status_len, 0,
13867 13563 flags, callback, callback_arg);
13868 13564
13869 13565 if (return_pktp != NULL) {
13870 13566
13871 13567 /*
13872 13568 * Return new value of pkt
13873 13569 */
13874 13570 *pktpp = return_pktp;
13875 13571
13876 13572 /*
13877 13573 * To be safe, zero the CDB insuring there is
13878 13574 * no leftover data from a previous command.
13879 13575 */
13880 13576 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode);
13881 13577
13882 13578 /*
13883 13579 * Handle partial DMA mapping
13884 13580 */
13885 13581 if (return_pktp->pkt_resid != 0) {
13886 13582
13887 13583 /*
13888 13584 * Not going to xfer as many blocks as
13889 13585 * originally expected
13890 13586 */
13891 13587 blockcount -=
13892 13588 SD_BYTES2TGTBLOCKS(un,
13893 13589 return_pktp->pkt_resid);
13894 13590 }
13895 13591
13896 13592 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp;
13897 13593
13898 13594 /*
13899 13595 * Set command byte based on the CDB
13900 13596 * type we matched.
13901 13597 */
13902 13598 cdbp->scc_cmd = cp->sc_grpmask |
13903 13599 ((bp->b_flags & B_READ) ?
13904 13600 SCMD_READ : SCMD_WRITE);
13905 13601
13906 13602 SD_FILL_SCSI1_LUN(un, return_pktp);
13907 13603
13908 13604 /*
13909 13605 * Fill in LBA and length
13910 13606 */
13911 13607 ASSERT((cp->sc_grpcode == CDB_GROUP1) ||
13912 13608 (cp->sc_grpcode == CDB_GROUP4) ||
13913 13609 (cp->sc_grpcode == CDB_GROUP0) ||
13914 13610 (cp->sc_grpcode == CDB_GROUP5));
13915 13611
13916 13612 if (cp->sc_grpcode == CDB_GROUP1) {
13917 13613 FORMG1ADDR(cdbp, lba);
13918 13614 FORMG1COUNT(cdbp, blockcount);
13919 13615 return (0);
13920 13616 } else if (cp->sc_grpcode == CDB_GROUP4) {
13921 13617 FORMG4LONGADDR(cdbp, lba);
13922 13618 FORMG4COUNT(cdbp, blockcount);
13923 13619 return (0);
13924 13620 } else if (cp->sc_grpcode == CDB_GROUP0) {
13925 13621 FORMG0ADDR(cdbp, lba);
13926 13622 FORMG0COUNT(cdbp, blockcount);
13927 13623 return (0);
13928 13624 } else if (cp->sc_grpcode == CDB_GROUP5) {
13929 13625 FORMG5ADDR(cdbp, lba);
13930 13626 FORMG5COUNT(cdbp, blockcount);
13931 13627 return (0);
13932 13628 }
13933 13629
13934 13630 /*
13935 13631 * It should be impossible to not match one
13936 13632 * of the CDB types above, so we should never
13937 13633 * reach this point. Set the CDB command byte
13938 13634 * to test-unit-ready to avoid writing
13939 13635 * to somewhere we don't intend.
13940 13636 */
13941 13637 cdbp->scc_cmd = SCMD_TEST_UNIT_READY;
13942 13638 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13943 13639 } else {
13944 13640 /*
13945 13641 * Couldn't get scsi_pkt
13946 13642 */
13947 13643 return (SD_PKT_ALLOC_FAILURE);
13948 13644 }
13949 13645 }
13950 13646 }
13951 13647
13952 13648 /*
13953 13649 * None of the available CDB types were suitable. This really
13954 13650 * should never happen: on a 64 bit system we support
13955 13651 * READ16/WRITE16 which will hold an entire 64 bit disk address
13956 13652 * and on a 32 bit system we will refuse to bind to a device
13957 13653 * larger than 2TB so addresses will never be larger than 32 bits.
13958 13654 */
13959 13655 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13960 13656 }
13961 13657
13962 13658 /*
13963 13659 * Function: sd_setup_next_rw_pkt
13964 13660 *
13965 13661 * Description: Setup packet for partial DMA transfers, except for the
13966 13662 * initial transfer. sd_setup_rw_pkt should be used for
13967 13663 * the initial transfer.
13968 13664 *
13969 13665 * Context: Kernel thread and may be called from interrupt context.
13970 13666 */
13971 13667
13972 13668 int
13973 13669 sd_setup_next_rw_pkt(struct sd_lun *un,
13974 13670 struct scsi_pkt *pktp, struct buf *bp,
13975 13671 diskaddr_t lba, uint32_t blockcount)
13976 13672 {
13977 13673 uchar_t com;
13978 13674 union scsi_cdb *cdbp;
13979 13675 uchar_t cdb_group_id;
13980 13676
13981 13677 ASSERT(pktp != NULL);
13982 13678 ASSERT(pktp->pkt_cdbp != NULL);
13983 13679
13984 13680 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
13985 13681 com = cdbp->scc_cmd;
13986 13682 cdb_group_id = CDB_GROUPID(com);
13987 13683
13988 13684 ASSERT((cdb_group_id == CDB_GROUPID_0) ||
13989 13685 (cdb_group_id == CDB_GROUPID_1) ||
13990 13686 (cdb_group_id == CDB_GROUPID_4) ||
13991 13687 (cdb_group_id == CDB_GROUPID_5));
13992 13688
13993 13689 /*
13994 13690 * Move pkt to the next portion of the xfer.
13995 13691 * func is NULL_FUNC so we do not have to release
13996 13692 * the disk mutex here.
13997 13693 */
13998 13694 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0,
13999 13695 NULL_FUNC, NULL) == pktp) {
14000 13696 /* Success. Handle partial DMA */
14001 13697 if (pktp->pkt_resid != 0) {
14002 13698 blockcount -=
14003 13699 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid);
14004 13700 }
14005 13701
14006 13702 cdbp->scc_cmd = com;
14007 13703 SD_FILL_SCSI1_LUN(un, pktp);
14008 13704 if (cdb_group_id == CDB_GROUPID_1) {
14009 13705 FORMG1ADDR(cdbp, lba);
14010 13706 FORMG1COUNT(cdbp, blockcount);
14011 13707 return (0);
14012 13708 } else if (cdb_group_id == CDB_GROUPID_4) {
14013 13709 FORMG4LONGADDR(cdbp, lba);
14014 13710 FORMG4COUNT(cdbp, blockcount);
14015 13711 return (0);
14016 13712 } else if (cdb_group_id == CDB_GROUPID_0) {
14017 13713 FORMG0ADDR(cdbp, lba);
14018 13714 FORMG0COUNT(cdbp, blockcount);
14019 13715 return (0);
14020 13716 } else if (cdb_group_id == CDB_GROUPID_5) {
14021 13717 FORMG5ADDR(cdbp, lba);
14022 13718 FORMG5COUNT(cdbp, blockcount);
14023 13719 return (0);
14024 13720 }
14025 13721
14026 13722 /* Unreachable */
14027 13723 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
14028 13724 }
14029 13725
14030 13726 /*
14031 13727 * Error setting up next portion of cmd transfer.
14032 13728 * Something is definitely very wrong and this
14033 13729 * should not happen.
14034 13730 */
14035 13731 return (SD_PKT_ALLOC_FAILURE);
14036 13732 }
14037 13733
14038 13734 /*
14039 13735 * Function: sd_initpkt_for_uscsi
14040 13736 *
14041 13737 * Description: Allocate and initialize for transport a scsi_pkt struct,
14042 13738 * based upon the info specified in the given uscsi_cmd struct.
14043 13739 *
14044 13740 * Return Code: SD_PKT_ALLOC_SUCCESS
14045 13741 * SD_PKT_ALLOC_FAILURE
14046 13742 * SD_PKT_ALLOC_FAILURE_NO_DMA
14047 13743 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
14048 13744 *
14049 13745 * Context: Kernel thread and may be called from software interrupt context
14050 13746 * as part of a sdrunout callback. This function may not block or
14051 13747 * call routines that block
14052 13748 */
14053 13749
14054 13750 static int
14055 13751 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp)
14056 13752 {
14057 13753 struct uscsi_cmd *uscmd;
14058 13754 struct sd_xbuf *xp;
14059 13755 struct scsi_pkt *pktp;
14060 13756 struct sd_lun *un;
14061 13757 uint32_t flags = 0;
14062 13758
14063 13759 ASSERT(bp != NULL);
14064 13760 ASSERT(pktpp != NULL);
14065 13761 xp = SD_GET_XBUF(bp);
14066 13762 ASSERT(xp != NULL);
14067 13763 un = SD_GET_UN(bp);
14068 13764 ASSERT(un != NULL);
14069 13765 ASSERT(mutex_owned(SD_MUTEX(un)));
14070 13766
14071 13767 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14072 13768 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14073 13769 ASSERT(uscmd != NULL);
14074 13770
14075 13771 SD_TRACE(SD_LOG_IO_CORE, un,
14076 13772 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp);
14077 13773
14078 13774 /*
14079 13775 * Allocate the scsi_pkt for the command.
14080 13776 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path
14081 13777 * during scsi_init_pkt time and will continue to use the
14082 13778 * same path as long as the same scsi_pkt is used without
14083 13779 * intervening scsi_dma_free(). Since uscsi command does
14084 13780 * not call scsi_dmafree() before retry failed command, it
14085 13781 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT
14086 13782 * set such that scsi_vhci can use other available path for
14087 13783 * retry. Besides, ucsci command does not allow DMA breakup,
14088 13784 * so there is no need to set PKT_DMA_PARTIAL flag.
14089 13785 */
14090 13786 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14091 13787 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14092 13788 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14093 13789 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status)
14094 13790 - sizeof (struct scsi_extended_sense)), 0,
14095 13791 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ,
14096 13792 sdrunout, (caddr_t)un);
14097 13793 } else {
14098 13794 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14099 13795 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14100 13796 sizeof (struct scsi_arq_status), 0,
14101 13797 (un->un_pkt_flags & ~PKT_DMA_PARTIAL),
14102 13798 sdrunout, (caddr_t)un);
14103 13799 }
14104 13800
14105 13801 if (pktp == NULL) {
14106 13802 *pktpp = NULL;
14107 13803 /*
14108 13804 * Set the driver state to RWAIT to indicate the driver
14109 13805 * is waiting on resource allocations. The driver will not
14110 13806 * suspend, pm_suspend, or detatch while the state is RWAIT.
14111 13807 */
14112 13808 New_state(un, SD_STATE_RWAIT);
14113 13809
14114 13810 SD_ERROR(SD_LOG_IO_CORE, un,
14115 13811 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp);
14116 13812
14117 13813 if ((bp->b_flags & B_ERROR) != 0) {
14118 13814 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
14119 13815 }
14120 13816 return (SD_PKT_ALLOC_FAILURE);
14121 13817 }
14122 13818
14123 13819 /*
14124 13820 * We do not do DMA breakup for USCSI commands, so return failure
14125 13821 * here if all the needed DMA resources were not allocated.
14126 13822 */
14127 13823 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) &&
14128 13824 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) {
14129 13825 scsi_destroy_pkt(pktp);
14130 13826 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: "
14131 13827 "No partial DMA for USCSI. exit: buf:0x%p\n", bp);
14132 13828 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL);
14133 13829 }
14134 13830
14135 13831 /* Init the cdb from the given uscsi struct */
14136 13832 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp,
14137 13833 uscmd->uscsi_cdb[0], 0, 0, 0);
14138 13834
14139 13835 SD_FILL_SCSI1_LUN(un, pktp);
14140 13836
14141 13837 /*
14142 13838 * Set up the optional USCSI flags. See the uscsi (7I) man page
14143 13839 * for listing of the supported flags.
14144 13840 */
14145 13841
14146 13842 if (uscmd->uscsi_flags & USCSI_SILENT) {
14147 13843 flags |= FLAG_SILENT;
14148 13844 }
14149 13845
14150 13846 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) {
14151 13847 flags |= FLAG_DIAGNOSE;
14152 13848 }
14153 13849
14154 13850 if (uscmd->uscsi_flags & USCSI_ISOLATE) {
14155 13851 flags |= FLAG_ISOLATE;
14156 13852 }
14157 13853
14158 13854 if (un->un_f_is_fibre == FALSE) {
14159 13855 if (uscmd->uscsi_flags & USCSI_RENEGOT) {
14160 13856 flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
14161 13857 }
14162 13858 }
14163 13859
14164 13860 /*
14165 13861 * Set the pkt flags here so we save time later.
14166 13862 * Note: These flags are NOT in the uscsi man page!!!
14167 13863 */
14168 13864 if (uscmd->uscsi_flags & USCSI_HEAD) {
14169 13865 flags |= FLAG_HEAD;
14170 13866 }
14171 13867
14172 13868 if (uscmd->uscsi_flags & USCSI_NOINTR) {
14173 13869 flags |= FLAG_NOINTR;
14174 13870 }
14175 13871
14176 13872 /*
14177 13873 * For tagged queueing, things get a bit complicated.
14178 13874 * Check first for head of queue and last for ordered queue.
14179 13875 * If neither head nor order, use the default driver tag flags.
14180 13876 */
14181 13877 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) {
14182 13878 if (uscmd->uscsi_flags & USCSI_HTAG) {
14183 13879 flags |= FLAG_HTAG;
14184 13880 } else if (uscmd->uscsi_flags & USCSI_OTAG) {
14185 13881 flags |= FLAG_OTAG;
14186 13882 } else {
14187 13883 flags |= un->un_tagflags & FLAG_TAGMASK;
14188 13884 }
14189 13885 }
14190 13886
14191 13887 if (uscmd->uscsi_flags & USCSI_NODISCON) {
14192 13888 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON;
14193 13889 }
14194 13890
14195 13891 pktp->pkt_flags = flags;
14196 13892
14197 13893 /* Transfer uscsi information to scsi_pkt */
14198 13894 (void) scsi_uscsi_pktinit(uscmd, pktp);
14199 13895
14200 13896 /* Copy the caller's CDB into the pkt... */
14201 13897 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen);
14202 13898
14203 13899 if (uscmd->uscsi_timeout == 0) {
14204 13900 pktp->pkt_time = un->un_uscsi_timeout;
14205 13901 } else {
14206 13902 pktp->pkt_time = uscmd->uscsi_timeout;
14207 13903 }
14208 13904
14209 13905 /* need it later to identify USCSI request in sdintr */
14210 13906 xp->xb_pkt_flags |= SD_XB_USCSICMD;
14211 13907
14212 13908 xp->xb_sense_resid = uscmd->uscsi_rqresid;
14213 13909
14214 13910 pktp->pkt_private = bp;
14215 13911 pktp->pkt_comp = sdintr;
14216 13912 *pktpp = pktp;
14217 13913
14218 13914 SD_TRACE(SD_LOG_IO_CORE, un,
14219 13915 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp);
14220 13916
14221 13917 return (SD_PKT_ALLOC_SUCCESS);
14222 13918 }
14223 13919
14224 13920
14225 13921 /*
14226 13922 * Function: sd_destroypkt_for_uscsi
14227 13923 *
14228 13924 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi
14229 13925 * IOs.. Also saves relevant info into the associated uscsi_cmd
14230 13926 * struct.
14231 13927 *
14232 13928 * Context: May be called under interrupt context
14233 13929 */
14234 13930
14235 13931 static void
14236 13932 sd_destroypkt_for_uscsi(struct buf *bp)
14237 13933 {
14238 13934 struct uscsi_cmd *uscmd;
14239 13935 struct sd_xbuf *xp;
14240 13936 struct scsi_pkt *pktp;
14241 13937 struct sd_lun *un;
14242 13938 struct sd_uscsi_info *suip;
14243 13939
14244 13940 ASSERT(bp != NULL);
14245 13941 xp = SD_GET_XBUF(bp);
14246 13942 ASSERT(xp != NULL);
14247 13943 un = SD_GET_UN(bp);
14248 13944 ASSERT(un != NULL);
14249 13945 ASSERT(!mutex_owned(SD_MUTEX(un)));
14250 13946 pktp = SD_GET_PKTP(bp);
14251 13947 ASSERT(pktp != NULL);
14252 13948
14253 13949 SD_TRACE(SD_LOG_IO_CORE, un,
14254 13950 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp);
14255 13951
14256 13952 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14257 13953 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14258 13954 ASSERT(uscmd != NULL);
14259 13955
14260 13956 /* Save the status and the residual into the uscsi_cmd struct */
14261 13957 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
14262 13958 uscmd->uscsi_resid = bp->b_resid;
14263 13959
14264 13960 /* Transfer scsi_pkt information to uscsi */
14265 13961 (void) scsi_uscsi_pktfini(pktp, uscmd);
14266 13962
14267 13963 /*
14268 13964 * If enabled, copy any saved sense data into the area specified
14269 13965 * by the uscsi command.
14270 13966 */
14271 13967 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) &&
14272 13968 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) {
14273 13969 /*
14274 13970 * Note: uscmd->uscsi_rqbuf should always point to a buffer
14275 13971 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd())
14276 13972 */
14277 13973 uscmd->uscsi_rqstatus = xp->xb_sense_status;
14278 13974 uscmd->uscsi_rqresid = xp->xb_sense_resid;
14279 13975 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14280 13976 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14281 13977 MAX_SENSE_LENGTH);
14282 13978 } else {
14283 13979 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14284 13980 SENSE_LENGTH);
14285 13981 }
14286 13982 }
14287 13983 /*
14288 13984 * The following assignments are for SCSI FMA.
14289 13985 */
14290 13986 ASSERT(xp->xb_private != NULL);
14291 13987 suip = (struct sd_uscsi_info *)xp->xb_private;
14292 13988 suip->ui_pkt_reason = pktp->pkt_reason;
14293 13989 suip->ui_pkt_state = pktp->pkt_state;
14294 13990 suip->ui_pkt_statistics = pktp->pkt_statistics;
14295 13991 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
14296 13992
14297 13993 /* We are done with the scsi_pkt; free it now */
14298 13994 ASSERT(SD_GET_PKTP(bp) != NULL);
14299 13995 scsi_destroy_pkt(SD_GET_PKTP(bp));
14300 13996
14301 13997 SD_TRACE(SD_LOG_IO_CORE, un,
14302 13998 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp);
14303 13999 }
14304 14000
14305 14001
14306 14002 /*
14307 14003 * Function: sd_bioclone_alloc
14308 14004 *
14309 14005 * Description: Allocate a buf(9S) and init it as per the given buf
14310 14006 * and the various arguments. The associated sd_xbuf
14311 14007 * struct is (nearly) duplicated. The struct buf *bp
14312 14008 * argument is saved in new_xp->xb_private.
14313 14009 *
14314 14010 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14315 14011 * datalen - size of data area for the shadow bp
14316 14012 * blkno - starting LBA
14317 14013 * func - function pointer for b_iodone in the shadow buf. (May
14318 14014 * be NULL if none.)
14319 14015 *
14320 14016 * Return Code: Pointer to allocates buf(9S) struct
14321 14017 *
14322 14018 * Context: Can sleep.
14323 14019 */
14324 14020
14325 14021 static struct buf *
14326 14022 sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno,
14327 14023 int (*func)(struct buf *))
14328 14024 {
14329 14025 struct sd_lun *un;
14330 14026 struct sd_xbuf *xp;
14331 14027 struct sd_xbuf *new_xp;
14332 14028 struct buf *new_bp;
14333 14029
14334 14030 ASSERT(bp != NULL);
14335 14031 xp = SD_GET_XBUF(bp);
14336 14032 ASSERT(xp != NULL);
14337 14033 un = SD_GET_UN(bp);
14338 14034 ASSERT(un != NULL);
14339 14035 ASSERT(!mutex_owned(SD_MUTEX(un)));
14340 14036
14341 14037 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func,
14342 14038 NULL, KM_SLEEP);
14343 14039
14344 14040 new_bp->b_lblkno = blkno;
14345 14041
14346 14042 /*
14347 14043 * Allocate an xbuf for the shadow bp and copy the contents of the
14348 14044 * original xbuf into it.
14349 14045 */
14350 14046 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14351 14047 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14352 14048
14353 14049 /*
14354 14050 * The given bp is automatically saved in the xb_private member
14355 14051 * of the new xbuf. Callers are allowed to depend on this.
14356 14052 */
14357 14053 new_xp->xb_private = bp;
14358 14054
14359 14055 new_bp->b_private = new_xp;
14360 14056
14361 14057 return (new_bp);
14362 14058 }
14363 14059
14364 14060 /*
14365 14061 * Function: sd_shadow_buf_alloc
14366 14062 *
14367 14063 * Description: Allocate a buf(9S) and init it as per the given buf
14368 14064 * and the various arguments. The associated sd_xbuf
14369 14065 * struct is (nearly) duplicated. The struct buf *bp
14370 14066 * argument is saved in new_xp->xb_private.
14371 14067 *
14372 14068 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14373 14069 * datalen - size of data area for the shadow bp
14374 14070 * bflags - B_READ or B_WRITE (pseudo flag)
14375 14071 * blkno - starting LBA
14376 14072 * func - function pointer for b_iodone in the shadow buf. (May
14377 14073 * be NULL if none.)
14378 14074 *
14379 14075 * Return Code: Pointer to allocates buf(9S) struct
14380 14076 *
14381 14077 * Context: Can sleep.
14382 14078 */
14383 14079
14384 14080 static struct buf *
14385 14081 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags,
14386 14082 daddr_t blkno, int (*func)(struct buf *))
14387 14083 {
14388 14084 struct sd_lun *un;
14389 14085 struct sd_xbuf *xp;
14390 14086 struct sd_xbuf *new_xp;
14391 14087 struct buf *new_bp;
14392 14088
14393 14089 ASSERT(bp != NULL);
14394 14090 xp = SD_GET_XBUF(bp);
|
↓ open down ↓ |
643 lines elided |
↑ open up ↑ |
14395 14091 ASSERT(xp != NULL);
14396 14092 un = SD_GET_UN(bp);
14397 14093 ASSERT(un != NULL);
14398 14094 ASSERT(!mutex_owned(SD_MUTEX(un)));
14399 14095
14400 14096 if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
14401 14097 bp_mapin(bp);
14402 14098 }
14403 14099
14404 14100 bflags &= (B_READ | B_WRITE);
14405 -#if defined(__i386) || defined(__amd64)
14406 14101 new_bp = getrbuf(KM_SLEEP);
14407 14102 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
14408 14103 new_bp->b_bcount = datalen;
14409 14104 new_bp->b_flags = bflags |
14410 14105 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
14411 -#else
14412 - new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
14413 - datalen, bflags, SLEEP_FUNC, NULL);
14414 -#endif
14415 14106 new_bp->av_forw = NULL;
14416 14107 new_bp->av_back = NULL;
14417 14108 new_bp->b_dev = bp->b_dev;
14418 14109 new_bp->b_blkno = blkno;
14419 14110 new_bp->b_iodone = func;
14420 14111 new_bp->b_edev = bp->b_edev;
14421 14112 new_bp->b_resid = 0;
14422 14113
14423 14114 /* We need to preserve the B_FAILFAST flag */
14424 14115 if (bp->b_flags & B_FAILFAST) {
14425 14116 new_bp->b_flags |= B_FAILFAST;
14426 14117 }
14427 14118
14428 14119 /*
14429 14120 * Allocate an xbuf for the shadow bp and copy the contents of the
14430 14121 * original xbuf into it.
14431 14122 */
14432 14123 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14433 14124 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14434 14125
14435 14126 /* Need later to copy data between the shadow buf & original buf! */
14436 14127 new_xp->xb_pkt_flags |= PKT_CONSISTENT;
14437 14128
14438 14129 /*
14439 14130 * The given bp is automatically saved in the xb_private member
14440 14131 * of the new xbuf. Callers are allowed to depend on this.
14441 14132 */
14442 14133 new_xp->xb_private = bp;
14443 14134
14444 14135 new_bp->b_private = new_xp;
14445 14136
14446 14137 return (new_bp);
14447 14138 }
14448 14139
14449 14140 /*
14450 14141 * Function: sd_bioclone_free
14451 14142 *
14452 14143 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations
14453 14144 * in the larger than partition operation.
14454 14145 *
14455 14146 * Context: May be called under interrupt context
14456 14147 */
14457 14148
14458 14149 static void
14459 14150 sd_bioclone_free(struct buf *bp)
14460 14151 {
14461 14152 struct sd_xbuf *xp;
14462 14153
14463 14154 ASSERT(bp != NULL);
14464 14155 xp = SD_GET_XBUF(bp);
14465 14156 ASSERT(xp != NULL);
14466 14157
14467 14158 /*
14468 14159 * Call bp_mapout() before freeing the buf, in case a lower
14469 14160 * layer or HBA had done a bp_mapin(). we must do this here
14470 14161 * as we are the "originator" of the shadow buf.
14471 14162 */
14472 14163 bp_mapout(bp);
14473 14164
14474 14165 /*
14475 14166 * Null out b_iodone before freeing the bp, to ensure that the driver
14476 14167 * never gets confused by a stale value in this field. (Just a little
14477 14168 * extra defensiveness here.)
14478 14169 */
14479 14170 bp->b_iodone = NULL;
14480 14171
14481 14172 freerbuf(bp);
14482 14173
14483 14174 kmem_free(xp, sizeof (struct sd_xbuf));
14484 14175 }
14485 14176
14486 14177 /*
14487 14178 * Function: sd_shadow_buf_free
14488 14179 *
14489 14180 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations.
14490 14181 *
14491 14182 * Context: May be called under interrupt context
14492 14183 */
|
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
14493 14184
14494 14185 static void
14495 14186 sd_shadow_buf_free(struct buf *bp)
14496 14187 {
14497 14188 struct sd_xbuf *xp;
14498 14189
14499 14190 ASSERT(bp != NULL);
14500 14191 xp = SD_GET_XBUF(bp);
14501 14192 ASSERT(xp != NULL);
14502 14193
14503 -#if defined(__sparc)
14504 14194 /*
14505 - * Call bp_mapout() before freeing the buf, in case a lower
14506 - * layer or HBA had done a bp_mapin(). we must do this here
14507 - * as we are the "originator" of the shadow buf.
14508 - */
14509 - bp_mapout(bp);
14510 -#endif
14511 -
14512 - /*
14513 14195 * Null out b_iodone before freeing the bp, to ensure that the driver
14514 14196 * never gets confused by a stale value in this field. (Just a little
14515 14197 * extra defensiveness here.)
14516 14198 */
14517 14199 bp->b_iodone = NULL;
14518 14200
14519 -#if defined(__i386) || defined(__amd64)
14520 14201 kmem_free(bp->b_un.b_addr, bp->b_bcount);
14521 14202 freerbuf(bp);
14522 -#else
14523 - scsi_free_consistent_buf(bp);
14524 -#endif
14525 14203
14526 14204 kmem_free(xp, sizeof (struct sd_xbuf));
14527 14205 }
14528 14206
14529 14207
14530 14208 /*
14531 14209 * Function: sd_print_transport_rejected_message
14532 14210 *
14533 14211 * Description: This implements the ludicrously complex rules for printing
14534 14212 * a "transport rejected" message. This is to address the
14535 14213 * specific problem of having a flood of this error message
14536 14214 * produced when a failover occurs.
14537 14215 *
14538 14216 * Context: Any.
14539 14217 */
14540 14218
14541 14219 static void
14542 14220 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp,
14543 14221 int code)
14544 14222 {
14545 14223 ASSERT(un != NULL);
14546 14224 ASSERT(mutex_owned(SD_MUTEX(un)));
14547 14225 ASSERT(xp != NULL);
14548 14226
14549 14227 /*
14550 14228 * Print the "transport rejected" message under the following
14551 14229 * conditions:
14552 14230 *
14553 14231 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set
14554 14232 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR.
14555 14233 * - If the error code IS a TRAN_FATAL_ERROR, then the message is
14556 14234 * printed the FIRST time a TRAN_FATAL_ERROR is returned from
14557 14235 * scsi_transport(9F) (which indicates that the target might have
14558 14236 * gone off-line). This uses the un->un_tran_fatal_count
14559 14237 * count, which is incremented whenever a TRAN_FATAL_ERROR is
14560 14238 * received, and reset to zero whenver a TRAN_ACCEPT is returned
14561 14239 * from scsi_transport().
14562 14240 *
14563 14241 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of
14564 14242 * the preceeding cases in order for the message to be printed.
14565 14243 */
14566 14244 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) &&
14567 14245 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) {
14568 14246 if ((sd_level_mask & SD_LOGMASK_DIAG) ||
14569 14247 (code != TRAN_FATAL_ERROR) ||
14570 14248 (un->un_tran_fatal_count == 1)) {
14571 14249 switch (code) {
14572 14250 case TRAN_BADPKT:
14573 14251 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14574 14252 "transport rejected bad packet\n");
14575 14253 break;
14576 14254 case TRAN_FATAL_ERROR:
14577 14255 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14578 14256 "transport rejected fatal error\n");
14579 14257 break;
14580 14258 default:
14581 14259 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14582 14260 "transport rejected (%d)\n", code);
14583 14261 break;
14584 14262 }
14585 14263 }
14586 14264 }
14587 14265 }
14588 14266
14589 14267
14590 14268 /*
14591 14269 * Function: sd_add_buf_to_waitq
14592 14270 *
14593 14271 * Description: Add the given buf(9S) struct to the wait queue for the
14594 14272 * instance. If sorting is enabled, then the buf is added
14595 14273 * to the queue via an elevator sort algorithm (a la
14596 14274 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key.
14597 14275 * If sorting is not enabled, then the buf is just added
14598 14276 * to the end of the wait queue.
14599 14277 *
14600 14278 * Return Code: void
14601 14279 *
14602 14280 * Context: Does not sleep/block, therefore technically can be called
14603 14281 * from any context. However if sorting is enabled then the
14604 14282 * execution time is indeterminate, and may take long if
14605 14283 * the wait queue grows large.
14606 14284 */
14607 14285
14608 14286 static void
14609 14287 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp)
14610 14288 {
14611 14289 struct buf *ap;
14612 14290
14613 14291 ASSERT(bp != NULL);
14614 14292 ASSERT(un != NULL);
14615 14293 ASSERT(mutex_owned(SD_MUTEX(un)));
14616 14294
14617 14295 /* If the queue is empty, add the buf as the only entry & return. */
14618 14296 if (un->un_waitq_headp == NULL) {
14619 14297 ASSERT(un->un_waitq_tailp == NULL);
14620 14298 un->un_waitq_headp = un->un_waitq_tailp = bp;
14621 14299 bp->av_forw = NULL;
14622 14300 return;
14623 14301 }
14624 14302
14625 14303 ASSERT(un->un_waitq_tailp != NULL);
14626 14304
14627 14305 /*
14628 14306 * If sorting is disabled, just add the buf to the tail end of
14629 14307 * the wait queue and return.
14630 14308 */
14631 14309 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) {
14632 14310 un->un_waitq_tailp->av_forw = bp;
14633 14311 un->un_waitq_tailp = bp;
14634 14312 bp->av_forw = NULL;
14635 14313 return;
14636 14314 }
14637 14315
14638 14316 /*
14639 14317 * Sort thru the list of requests currently on the wait queue
14640 14318 * and add the new buf request at the appropriate position.
14641 14319 *
14642 14320 * The un->un_waitq_headp is an activity chain pointer on which
14643 14321 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The
14644 14322 * first queue holds those requests which are positioned after
14645 14323 * the current SD_GET_BLKNO() (in the first request); the second holds
14646 14324 * requests which came in after their SD_GET_BLKNO() number was passed.
14647 14325 * Thus we implement a one way scan, retracting after reaching
14648 14326 * the end of the drive to the first request on the second
14649 14327 * queue, at which time it becomes the first queue.
14650 14328 * A one-way scan is natural because of the way UNIX read-ahead
14651 14329 * blocks are allocated.
14652 14330 *
14653 14331 * If we lie after the first request, then we must locate the
14654 14332 * second request list and add ourselves to it.
14655 14333 */
14656 14334 ap = un->un_waitq_headp;
14657 14335 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) {
14658 14336 while (ap->av_forw != NULL) {
14659 14337 /*
14660 14338 * Look for an "inversion" in the (normally
14661 14339 * ascending) block numbers. This indicates
14662 14340 * the start of the second request list.
14663 14341 */
14664 14342 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) {
14665 14343 /*
14666 14344 * Search the second request list for the
14667 14345 * first request at a larger block number.
14668 14346 * We go before that; however if there is
14669 14347 * no such request, we go at the end.
14670 14348 */
14671 14349 do {
14672 14350 if (SD_GET_BLKNO(bp) <
14673 14351 SD_GET_BLKNO(ap->av_forw)) {
14674 14352 goto insert;
14675 14353 }
14676 14354 ap = ap->av_forw;
14677 14355 } while (ap->av_forw != NULL);
14678 14356 goto insert; /* after last */
14679 14357 }
14680 14358 ap = ap->av_forw;
14681 14359 }
14682 14360
14683 14361 /*
14684 14362 * No inversions... we will go after the last, and
14685 14363 * be the first request in the second request list.
14686 14364 */
14687 14365 goto insert;
14688 14366 }
14689 14367
14690 14368 /*
14691 14369 * Request is at/after the current request...
14692 14370 * sort in the first request list.
14693 14371 */
14694 14372 while (ap->av_forw != NULL) {
14695 14373 /*
14696 14374 * We want to go after the current request (1) if
14697 14375 * there is an inversion after it (i.e. it is the end
14698 14376 * of the first request list), or (2) if the next
14699 14377 * request is a larger block no. than our request.
14700 14378 */
14701 14379 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) ||
14702 14380 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) {
14703 14381 goto insert;
14704 14382 }
14705 14383 ap = ap->av_forw;
14706 14384 }
14707 14385
14708 14386 /*
14709 14387 * Neither a second list nor a larger request, therefore
14710 14388 * we go at the end of the first list (which is the same
14711 14389 * as the end of the whole schebang).
14712 14390 */
14713 14391 insert:
14714 14392 bp->av_forw = ap->av_forw;
14715 14393 ap->av_forw = bp;
14716 14394
14717 14395 /*
14718 14396 * If we inserted onto the tail end of the waitq, make sure the
14719 14397 * tail pointer is updated.
14720 14398 */
14721 14399 if (ap == un->un_waitq_tailp) {
14722 14400 un->un_waitq_tailp = bp;
14723 14401 }
14724 14402 }
14725 14403
14726 14404
14727 14405 /*
14728 14406 * Function: sd_start_cmds
14729 14407 *
14730 14408 * Description: Remove and transport cmds from the driver queues.
14731 14409 *
14732 14410 * Arguments: un - pointer to the unit (soft state) struct for the target.
14733 14411 *
14734 14412 * immed_bp - ptr to a buf to be transported immediately. Only
14735 14413 * the immed_bp is transported; bufs on the waitq are not
14736 14414 * processed and the un_retry_bp is not checked. If immed_bp is
14737 14415 * NULL, then normal queue processing is performed.
14738 14416 *
14739 14417 * Context: May be called from kernel thread context, interrupt context,
|
↓ open down ↓ |
205 lines elided |
↑ open up ↑ |
14740 14418 * or runout callback context. This function may not block or
14741 14419 * call routines that block.
14742 14420 */
14743 14421
14744 14422 static void
14745 14423 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
14746 14424 {
14747 14425 struct sd_xbuf *xp;
14748 14426 struct buf *bp;
14749 14427 void (*statp)(kstat_io_t *);
14750 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14751 14428 void (*saved_statp)(kstat_io_t *);
14752 -#endif
14753 14429 int rval;
14754 14430 struct sd_fm_internal *sfip = NULL;
14755 14431
14756 14432 ASSERT(un != NULL);
14757 14433 ASSERT(mutex_owned(SD_MUTEX(un)));
14758 14434 ASSERT(un->un_ncmds_in_transport >= 0);
14759 14435 ASSERT(un->un_throttle >= 0);
14760 14436
14761 14437 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
14762 14438
14439 + /*
14440 + * If device is currently retired, we should abort all pending I/O.
14441 + */
14442 + if (DEVI(un->un_sd->sd_dev)->devi_flags & DEVI_RETIRED) {
14443 + if (immed_bp) {
14444 + immed_bp->b_resid = immed_bp->b_bcount;
14445 + bioerror(immed_bp, ENXIO);
14446 + biodone(immed_bp);
14447 + }
14448 + /* abort in-flight IO */
14449 + (void) scsi_abort(SD_ADDRESS(un), NULL);
14450 + /* abort pending IO */
14451 + un->un_failfast_state = SD_FAILFAST_ACTIVE;
14452 + un->un_failfast_bp = NULL;
14453 + sd_failfast_flushq(un, B_TRUE);
14454 + return;
14455 + }
14456 +
14763 14457 do {
14764 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14765 14458 saved_statp = NULL;
14766 -#endif
14767 14459
14768 14460 /*
14769 14461 * If we are syncing or dumping, fail the command to
14770 14462 * avoid recursively calling back into scsi_transport().
14771 14463 * The dump I/O itself uses a separate code path so this
14772 14464 * only prevents non-dump I/O from being sent while dumping.
14773 14465 * File system sync takes place before dumping begins.
14774 14466 * During panic, filesystem I/O is allowed provided
14775 14467 * un_in_callback is <= 1. This is to prevent recursion
14776 14468 * such as sd_start_cmds -> scsi_transport -> sdintr ->
14777 14469 * sd_start_cmds and so on. See panic.c for more information
14778 14470 * about the states the system can be in during panic.
14779 14471 */
14780 14472 if ((un->un_state == SD_STATE_DUMPING) ||
14781 14473 (ddi_in_panic() && (un->un_in_callback > 1))) {
14782 14474 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14783 14475 "sd_start_cmds: panicking\n");
14784 14476 goto exit;
14785 14477 }
14786 14478
14787 14479 if ((bp = immed_bp) != NULL) {
14788 14480 /*
14789 14481 * We have a bp that must be transported immediately.
14790 14482 * It's OK to transport the immed_bp here without doing
14791 14483 * the throttle limit check because the immed_bp is
14792 14484 * always used in a retry/recovery case. This means
14793 14485 * that we know we are not at the throttle limit by
14794 14486 * virtue of the fact that to get here we must have
14795 14487 * already gotten a command back via sdintr(). This also
14796 14488 * relies on (1) the command on un_retry_bp preventing
14797 14489 * further commands from the waitq from being issued;
14798 14490 * and (2) the code in sd_retry_command checking the
14799 14491 * throttle limit before issuing a delayed or immediate
14800 14492 * retry. This holds even if the throttle limit is
14801 14493 * currently ratcheted down from its maximum value.
14802 14494 */
14803 14495 statp = kstat_runq_enter;
14804 14496 if (bp == un->un_retry_bp) {
14805 14497 ASSERT((un->un_retry_statp == NULL) ||
14806 14498 (un->un_retry_statp == kstat_waitq_enter) ||
14807 14499 (un->un_retry_statp ==
14808 14500 kstat_runq_back_to_waitq));
14809 14501 /*
14810 14502 * If the waitq kstat was incremented when
14811 14503 * sd_set_retry_bp() queued this bp for a retry,
14812 14504 * then we must set up statp so that the waitq
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
14813 14505 * count will get decremented correctly below.
14814 14506 * Also we must clear un->un_retry_statp to
14815 14507 * ensure that we do not act on a stale value
14816 14508 * in this field.
14817 14509 */
14818 14510 if ((un->un_retry_statp == kstat_waitq_enter) ||
14819 14511 (un->un_retry_statp ==
14820 14512 kstat_runq_back_to_waitq)) {
14821 14513 statp = kstat_waitq_to_runq;
14822 14514 }
14823 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14824 14515 saved_statp = un->un_retry_statp;
14825 -#endif
14826 14516 un->un_retry_statp = NULL;
14827 14517
14828 14518 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
14829 14519 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14830 14520 "un_throttle:%d un_ncmds_in_transport:%d\n",
14831 14521 un, un->un_retry_bp, un->un_throttle,
14832 14522 un->un_ncmds_in_transport);
14833 14523 } else {
14834 14524 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
14835 14525 "processing priority bp:0x%p\n", bp);
14836 14526 }
14837 14527
14838 14528 } else if ((bp = un->un_waitq_headp) != NULL) {
14839 14529 /*
14840 14530 * A command on the waitq is ready to go, but do not
14841 14531 * send it if:
14842 14532 *
14843 14533 * (1) the throttle limit has been reached, or
14844 14534 * (2) a retry is pending, or
14845 14535 * (3) a START_STOP_UNIT callback pending, or
14846 14536 * (4) a callback for a SD_PATH_DIRECT_PRIORITY
14847 14537 * command is pending.
14848 14538 *
14849 14539 * For all of these conditions, IO processing will
14850 14540 * restart after the condition is cleared.
14851 14541 */
14852 14542 if (un->un_ncmds_in_transport >= un->un_throttle) {
14853 14543 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14854 14544 "sd_start_cmds: exiting, "
14855 14545 "throttle limit reached!\n");
14856 14546 goto exit;
14857 14547 }
14858 14548 if (un->un_retry_bp != NULL) {
14859 14549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14860 14550 "sd_start_cmds: exiting, retry pending!\n");
14861 14551 goto exit;
14862 14552 }
14863 14553 if (un->un_startstop_timeid != NULL) {
14864 14554 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14865 14555 "sd_start_cmds: exiting, "
14866 14556 "START_STOP pending!\n");
14867 14557 goto exit;
14868 14558 }
14869 14559 if (un->un_direct_priority_timeid != NULL) {
14870 14560 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14871 14561 "sd_start_cmds: exiting, "
14872 14562 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
14873 14563 goto exit;
14874 14564 }
14875 14565
14876 14566 /* Dequeue the command */
14877 14567 un->un_waitq_headp = bp->av_forw;
14878 14568 if (un->un_waitq_headp == NULL) {
14879 14569 un->un_waitq_tailp = NULL;
14880 14570 }
14881 14571 bp->av_forw = NULL;
14882 14572 statp = kstat_waitq_to_runq;
14883 14573 SD_TRACE(SD_LOG_IO_CORE, un,
14884 14574 "sd_start_cmds: processing waitq bp:0x%p\n", bp);
14885 14575
14886 14576 } else {
14887 14577 /* No work to do so bail out now */
14888 14578 SD_TRACE(SD_LOG_IO_CORE, un,
14889 14579 "sd_start_cmds: no more work, exiting!\n");
14890 14580 goto exit;
14891 14581 }
|
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
14892 14582
14893 14583 /*
14894 14584 * Reset the state to normal. This is the mechanism by which
14895 14585 * the state transitions from either SD_STATE_RWAIT or
14896 14586 * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14897 14587 * If state is SD_STATE_PM_CHANGING then this command is
14898 14588 * part of the device power control and the state must
14899 14589 * not be put back to normal. Doing so would would
14900 14590 * allow new commands to proceed when they shouldn't,
14901 14591 * the device may be going off.
14592 + *
14593 + * Similarly, if the state is SD_STATE_ATTACHING we should
14594 + * not set it to SD_STATE_NORMAL to avoid corruption.
14902 14595 */
14903 14596 if ((un->un_state != SD_STATE_SUSPENDED) &&
14904 - (un->un_state != SD_STATE_PM_CHANGING)) {
14597 + (un->un_state != SD_STATE_PM_CHANGING) &&
14598 + (un->un_state != SD_STATE_ATTACHING)) {
14905 14599 New_state(un, SD_STATE_NORMAL);
14906 14600 }
14907 14601
14908 14602 xp = SD_GET_XBUF(bp);
14909 14603 ASSERT(xp != NULL);
14910 14604
14911 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14912 14605 /*
14913 14606 * Allocate the scsi_pkt if we need one, or attach DMA
14914 14607 * resources if we have a scsi_pkt that needs them. The
14915 14608 * latter should only occur for commands that are being
14916 14609 * retried.
14917 14610 */
14918 14611 if ((xp->xb_pktp == NULL) ||
14919 14612 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
14920 -#else
14921 - if (xp->xb_pktp == NULL) {
14922 -#endif
14923 14613 /*
14924 14614 * There is no scsi_pkt allocated for this buf. Call
14925 14615 * the initpkt function to allocate & init one.
14926 14616 *
14927 14617 * The scsi_init_pkt runout callback functionality is
14928 14618 * implemented as follows:
14929 14619 *
14930 14620 * 1) The initpkt function always calls
14931 14621 * scsi_init_pkt(9F) with sdrunout specified as the
14932 14622 * callback routine.
14933 14623 * 2) A successful packet allocation is initialized and
14934 14624 * the I/O is transported.
14935 14625 * 3) The I/O associated with an allocation resource
14936 14626 * failure is left on its queue to be retried via
14937 14627 * runout or the next I/O.
14938 14628 * 4) The I/O associated with a DMA error is removed
14939 14629 * from the queue and failed with EIO. Processing of
14940 14630 * the transport queues is also halted to be
14941 14631 * restarted via runout or the next I/O.
14942 14632 * 5) The I/O associated with a CDB size or packet
14943 14633 * size error is removed from the queue and failed
14944 14634 * with EIO. Processing of the transport queues is
14945 14635 * continued.
14946 14636 *
14947 14637 * Note: there is no interface for canceling a runout
14948 14638 * callback. To prevent the driver from detaching or
14949 14639 * suspending while a runout is pending the driver
14950 14640 * state is set to SD_STATE_RWAIT
14951 14641 *
14952 14642 * Note: using the scsi_init_pkt callback facility can
14953 14643 * result in an I/O request persisting at the head of
14954 14644 * the list which cannot be satisfied even after
14955 14645 * multiple retries. In the future the driver may
14956 14646 * implement some kind of maximum runout count before
14957 14647 * failing an I/O.
14958 14648 *
14959 14649 * Note: the use of funcp below may seem superfluous,
14960 14650 * but it helps warlock figure out the correct
14961 14651 * initpkt function calls (see [s]sd.wlcmd).
14962 14652 */
14963 14653 struct scsi_pkt *pktp;
14964 14654 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp);
14965 14655
14966 14656 ASSERT(bp != un->un_rqs_bp);
14967 14657
14968 14658 funcp = sd_initpkt_map[xp->xb_chain_iostart];
14969 14659 switch ((*funcp)(bp, &pktp)) {
14970 14660 case SD_PKT_ALLOC_SUCCESS:
14971 14661 xp->xb_pktp = pktp;
14972 14662 SD_TRACE(SD_LOG_IO_CORE, un,
14973 14663 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
14974 14664 pktp);
14975 14665 goto got_pkt;
14976 14666
14977 14667 case SD_PKT_ALLOC_FAILURE:
14978 14668 /*
14979 14669 * Temporary (hopefully) resource depletion.
|
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
14980 14670 * Since retries and RQS commands always have a
14981 14671 * scsi_pkt allocated, these cases should never
14982 14672 * get here. So the only cases this needs to
14983 14673 * handle is a bp from the waitq (which we put
14984 14674 * back onto the waitq for sdrunout), or a bp
14985 14675 * sent as an immed_bp (which we just fail).
14986 14676 */
14987 14677 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14988 14678 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
14989 14679
14990 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14991 -
14992 14680 if (bp == immed_bp) {
14993 14681 /*
14994 14682 * If SD_XB_DMA_FREED is clear, then
14995 14683 * this is a failure to allocate a
14996 14684 * scsi_pkt, and we must fail the
14997 14685 * command.
14998 14686 */
14999 14687 if ((xp->xb_pkt_flags &
15000 14688 SD_XB_DMA_FREED) == 0) {
15001 14689 break;
15002 14690 }
15003 14691
15004 14692 /*
15005 14693 * If this immediate command is NOT our
15006 14694 * un_retry_bp, then we must fail it.
15007 14695 */
15008 14696 if (bp != un->un_retry_bp) {
15009 14697 break;
15010 14698 }
15011 14699
15012 14700 /*
15013 14701 * We get here if this cmd is our
15014 14702 * un_retry_bp that was DMAFREED, but
15015 14703 * scsi_init_pkt() failed to reallocate
15016 14704 * DMA resources when we attempted to
15017 14705 * retry it. This can happen when an
15018 14706 * mpxio failover is in progress, but
15019 14707 * we don't want to just fail the
15020 14708 * command in this case.
15021 14709 *
15022 14710 * Use timeout(9F) to restart it after
15023 14711 * a 100ms delay. We don't want to
15024 14712 * let sdrunout() restart it, because
15025 14713 * sdrunout() is just supposed to start
15026 14714 * commands that are sitting on the
15027 14715 * wait queue. The un_retry_bp stays
15028 14716 * set until the command completes, but
15029 14717 * sdrunout can be called many times
15030 14718 * before that happens. Since sdrunout
15031 14719 * cannot tell if the un_retry_bp is
15032 14720 * already in the transport, it could
15033 14721 * end up calling scsi_transport() for
15034 14722 * the un_retry_bp multiple times.
15035 14723 *
15036 14724 * Also: don't schedule the callback
15037 14725 * if some other callback is already
15038 14726 * pending.
15039 14727 */
15040 14728 if (un->un_retry_statp == NULL) {
15041 14729 /*
15042 14730 * restore the kstat pointer to
15043 14731 * keep kstat counts coherent
15044 14732 * when we do retry the command.
15045 14733 */
15046 14734 un->un_retry_statp =
15047 14735 saved_statp;
15048 14736 }
15049 14737
15050 14738 if ((un->un_startstop_timeid == NULL) &&
15051 14739 (un->un_retry_timeid == NULL) &&
15052 14740 (un->un_direct_priority_timeid ==
|
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
15053 14741 NULL)) {
15054 14742
15055 14743 un->un_retry_timeid =
15056 14744 timeout(
15057 14745 sd_start_retry_command,
15058 14746 un, SD_RESTART_TIMEOUT);
15059 14747 }
15060 14748 goto exit;
15061 14749 }
15062 14750
15063 -#else
15064 - if (bp == immed_bp) {
15065 - break; /* Just fail the command */
15066 - }
15067 -#endif
15068 -
15069 14751 /* Add the buf back to the head of the waitq */
15070 14752 bp->av_forw = un->un_waitq_headp;
15071 14753 un->un_waitq_headp = bp;
15072 14754 if (un->un_waitq_tailp == NULL) {
15073 14755 un->un_waitq_tailp = bp;
15074 14756 }
15075 14757 goto exit;
15076 14758
15077 14759 case SD_PKT_ALLOC_FAILURE_NO_DMA:
15078 14760 /*
15079 14761 * HBA DMA resource failure. Fail the command
15080 14762 * and continue processing of the queues.
15081 14763 */
15082 14764 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15083 14765 "sd_start_cmds: "
15084 14766 "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15085 14767 break;
15086 14768
15087 14769 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
15088 14770 /*
15089 - * Note:x86: Partial DMA mapping not supported
15090 - * for USCSI commands, and all the needed DMA
15091 - * resources were not allocated.
14771 + * Partial DMA mapping not supported for USCSI
14772 + * commands, and all the needed DMA resources
14773 + * were not allocated.
15092 14774 */
15093 14775 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15094 14776 "sd_start_cmds: "
15095 14777 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15096 14778 break;
15097 14779
15098 14780 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
15099 14781 /*
15100 - * Note:x86: Request cannot fit into CDB based
15101 - * on lba and len.
14782 + * Request cannot fit into CDB based on lba
14783 + * and len.
15102 14784 */
15103 14785 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15104 14786 "sd_start_cmds: "
15105 14787 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15106 14788 break;
15107 14789
15108 14790 default:
15109 14791 /* Should NEVER get here! */
15110 14792 panic("scsi_initpkt error");
15111 14793 /*NOTREACHED*/
15112 14794 }
15113 14795
15114 14796 /*
15115 14797 * Fatal error in allocating a scsi_pkt for this buf.
15116 14798 * Update kstats & return the buf with an error code.
15117 14799 * We must use sd_return_failed_command_no_restart() to
15118 14800 * avoid a recursive call back into sd_start_cmds().
15119 14801 * However this also means that we must keep processing
15120 14802 * the waitq here in order to avoid stalling.
15121 14803 */
15122 14804 if (statp == kstat_waitq_to_runq) {
15123 14805 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
15124 14806 }
15125 14807 sd_return_failed_command_no_restart(un, bp, EIO);
15126 14808 if (bp == immed_bp) {
15127 14809 /* immed_bp is gone by now, so clear this */
15128 14810 immed_bp = NULL;
15129 14811 }
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
15130 14812 continue;
15131 14813 }
15132 14814 got_pkt:
15133 14815 if (bp == immed_bp) {
15134 14816 /* goto the head of the class.... */
15135 14817 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15136 14818 }
15137 14819
15138 14820 un->un_ncmds_in_transport++;
15139 14821 SD_UPDATE_KSTATS(un, statp, bp);
14822 + /* The start time MAY be overriden by the HBA driver. */
14823 + xp->xb_pktp->pkt_start = gethrtime();
14824 + xp->xb_pktp->pkt_stop = 0;
15140 14825
15141 14826 /*
15142 14827 * Call scsi_transport() to send the command to the target.
15143 14828 * According to SCSA architecture, we must drop the mutex here
15144 14829 * before calling scsi_transport() in order to avoid deadlock.
15145 14830 * Note that the scsi_pkt's completion routine can be executed
15146 14831 * (from interrupt context) even before the call to
15147 14832 * scsi_transport() returns.
15148 14833 */
15149 14834 SD_TRACE(SD_LOG_IO_CORE, un,
15150 14835 "sd_start_cmds: calling scsi_transport()\n");
15151 14836 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
15152 14837
14838 +#ifdef SD_FAULT_INJECTION
14839 + /*
14840 + * Packet is ready for submission to the HBA. Perform HBA-based
14841 + * fault-injection.
14842 + */
14843 + sd_prefaultinjection(xp->xb_pktp);
14844 +#endif /* SD_FAULT_INJECTION */
14845 +
15153 14846 mutex_exit(SD_MUTEX(un));
15154 14847 rval = scsi_transport(xp->xb_pktp);
15155 14848 mutex_enter(SD_MUTEX(un));
15156 14849
15157 14850 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15158 14851 "sd_start_cmds: scsi_transport() returned %d\n", rval);
15159 14852
15160 14853 switch (rval) {
15161 14854 case TRAN_ACCEPT:
15162 14855 /* Clear this with every pkt accepted by the HBA */
15163 14856 un->un_tran_fatal_count = 0;
15164 14857 break; /* Success; try the next cmd (if any) */
15165 14858
15166 14859 case TRAN_BUSY:
15167 14860 un->un_ncmds_in_transport--;
15168 14861 ASSERT(un->un_ncmds_in_transport >= 0);
15169 14862
14863 +#ifdef SD_FAULT_INJECTION
15170 14864 /*
14865 + * If the packet was rejected during active fault
14866 + * injection session, move to the next fault slot
14867 + * and reset packet flag related to rejection.
14868 + */
14869 + if (sd_fault_injection_on) {
14870 + uint_t i = un->sd_fi_fifo_start;
14871 +
14872 + if (un->sd_fi_fifo_tran[i] != NULL) {
14873 + kmem_free(un->sd_fi_fifo_tran[i],
14874 + sizeof (struct sd_fi_tran));
14875 + un->sd_fi_fifo_tran[i] = NULL;
14876 + }
14877 + un->sd_fi_fifo_start++;
14878 + }
14879 +
14880 + if (xp->xb_pktp->pkt_flags & FLAG_PKT_BUSY) {
14881 + xp->xb_pktp->pkt_flags &= ~FLAG_PKT_BUSY;
14882 + }
14883 +#endif /* SD_FAULT_INJECTION */
14884 +
14885 + /*
15171 14886 * Don't retry request sense, the sense data
15172 14887 * is lost when another request is sent.
15173 14888 * Free up the rqs buf and retry
15174 14889 * the original failed cmd. Update kstat.
15175 14890 */
15176 - if (bp == un->un_rqs_bp) {
14891 + if ((un->un_ncmds_in_transport > 0) &&
14892 + (bp == un->un_rqs_bp)) {
15177 14893 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15178 14894 bp = sd_mark_rqs_idle(un, xp);
15179 14895 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
15180 14896 NULL, NULL, EIO, un->un_busy_timeout / 500,
15181 14897 kstat_waitq_enter);
15182 14898 goto exit;
15183 14899 }
15184 14900
15185 -#if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15186 14901 /*
15187 14902 * Free the DMA resources for the scsi_pkt. This will
15188 14903 * allow mpxio to select another path the next time
15189 14904 * we call scsi_transport() with this scsi_pkt.
15190 14905 * See sdintr() for the rationalization behind this.
15191 14906 */
15192 14907 if ((un->un_f_is_fibre == TRUE) &&
15193 14908 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
15194 14909 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
15195 14910 scsi_dmafree(xp->xb_pktp);
15196 14911 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
15197 14912 }
15198 -#endif
15199 14913
15200 14914 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
15201 14915 /*
15202 14916 * Commands that are SD_PATH_DIRECT_PRIORITY
15203 14917 * are for error recovery situations. These do
15204 14918 * not use the normal command waitq, so if they
15205 14919 * get a TRAN_BUSY we cannot put them back onto
15206 14920 * the waitq for later retry. One possible
15207 14921 * problem is that there could already be some
15208 14922 * other command on un_retry_bp that is waiting
15209 14923 * for this one to complete, so we would be
15210 14924 * deadlocked if we put this command back onto
15211 14925 * the waitq for later retry (since un_retry_bp
15212 14926 * must complete before the driver gets back to
15213 14927 * commands on the waitq).
15214 14928 *
15215 14929 * To avoid deadlock we must schedule a callback
15216 14930 * that will restart this command after a set
15217 14931 * interval. This should keep retrying for as
15218 14932 * long as the underlying transport keeps
15219 14933 * returning TRAN_BUSY (just like for other
15220 14934 * commands). Use the same timeout interval as
15221 14935 * for the ordinary TRAN_BUSY retry.
15222 14936 */
15223 14937 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15224 14938 "sd_start_cmds: scsi_transport() returned "
15225 14939 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
15226 14940
15227 14941 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15228 14942 un->un_direct_priority_timeid =
15229 14943 timeout(sd_start_direct_priority_command,
15230 14944 bp, un->un_busy_timeout / 500);
15231 14945
15232 14946 goto exit;
15233 14947 }
15234 14948
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
15235 14949 /*
15236 14950 * For TRAN_BUSY, we want to reduce the throttle value,
15237 14951 * unless we are retrying a command.
15238 14952 */
15239 14953 if (bp != un->un_retry_bp) {
15240 14954 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
15241 14955 }
15242 14956
15243 14957 /*
15244 14958 * Set up the bp to be tried again 10 ms later.
15245 - * Note:x86: Is there a timeout value in the sd_lun
14959 + * XXX Is there a timeout value in the sd_lun
15246 14960 * for this condition?
15247 14961 */
15248 14962 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
15249 14963 kstat_runq_back_to_waitq);
15250 14964 goto exit;
15251 14965
15252 14966 case TRAN_FATAL_ERROR:
15253 14967 un->un_tran_fatal_count++;
15254 14968 /* FALLTHRU */
15255 14969
15256 14970 case TRAN_BADPKT:
15257 14971 default:
15258 14972 un->un_ncmds_in_transport--;
15259 14973 ASSERT(un->un_ncmds_in_transport >= 0);
15260 14974
15261 14975 /*
15262 14976 * If this is our REQUEST SENSE command with a
15263 14977 * transport error, we must get back the pointers
15264 14978 * to the original buf, and mark the REQUEST
15265 14979 * SENSE command as "available".
15266 14980 */
15267 14981 if (bp == un->un_rqs_bp) {
15268 14982 bp = sd_mark_rqs_idle(un, xp);
15269 14983 xp = SD_GET_XBUF(bp);
15270 14984 } else {
15271 14985 /*
15272 14986 * Legacy behavior: do not update transport
15273 14987 * error count for request sense commands.
15274 14988 */
15275 14989 SD_UPDATE_ERRSTATS(un, sd_transerrs);
15276 14990 }
15277 14991
15278 14992 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15279 14993 sd_print_transport_rejected_message(un, xp, rval);
15280 14994
15281 14995 /*
15282 14996 * This command will be terminated by SD driver due
15283 14997 * to a fatal transport error. We should post
15284 14998 * ereport.io.scsi.cmd.disk.tran with driver-assessment
15285 14999 * of "fail" for any command to indicate this
15286 15000 * situation.
15287 15001 */
15288 15002 if (xp->xb_ena > 0) {
15289 15003 ASSERT(un->un_fm_private != NULL);
15290 15004 sfip = un->un_fm_private;
15291 15005 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT;
15292 15006 sd_ssc_extract_info(&sfip->fm_ssc, un,
15293 15007 xp->xb_pktp, bp, xp);
15294 15008 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15295 15009 }
15296 15010
15297 15011 /*
15298 15012 * We must use sd_return_failed_command_no_restart() to
15299 15013 * avoid a recursive call back into sd_start_cmds().
15300 15014 * However this also means that we must keep processing
15301 15015 * the waitq here in order to avoid stalling.
15302 15016 */
15303 15017 sd_return_failed_command_no_restart(un, bp, EIO);
15304 15018
15305 15019 /*
15306 15020 * Notify any threads waiting in sd_ddi_suspend() that
15307 15021 * a command completion has occurred.
15308 15022 */
15309 15023 if (un->un_state == SD_STATE_SUSPENDED) {
15310 15024 cv_broadcast(&un->un_disk_busy_cv);
15311 15025 }
15312 15026
15313 15027 if (bp == immed_bp) {
15314 15028 /* immed_bp is gone by now, so clear this */
15315 15029 immed_bp = NULL;
15316 15030 }
15317 15031 break;
15318 15032 }
15319 15033
15320 15034 } while (immed_bp == NULL);
15321 15035
15322 15036 exit:
15323 15037 ASSERT(mutex_owned(SD_MUTEX(un)));
15324 15038 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n");
15325 15039 }
15326 15040
15327 15041
15328 15042 /*
15329 15043 * Function: sd_return_command
15330 15044 *
15331 15045 * Description: Returns a command to its originator (with or without an
15332 15046 * error). Also starts commands waiting to be transported
15333 15047 * to the target.
15334 15048 *
15335 15049 * Context: May be called from interrupt, kernel, or timeout context
15336 15050 */
15337 15051
15338 15052 static void
15339 15053 sd_return_command(struct sd_lun *un, struct buf *bp)
15340 15054 {
15341 15055 struct sd_xbuf *xp;
15342 15056 struct scsi_pkt *pktp;
15343 15057 struct sd_fm_internal *sfip;
15344 15058
15345 15059 ASSERT(bp != NULL);
15346 15060 ASSERT(un != NULL);
15347 15061 ASSERT(mutex_owned(SD_MUTEX(un)));
15348 15062 ASSERT(bp != un->un_rqs_bp);
15349 15063 xp = SD_GET_XBUF(bp);
15350 15064 ASSERT(xp != NULL);
15351 15065
15352 15066 pktp = SD_GET_PKTP(bp);
15353 15067 sfip = (struct sd_fm_internal *)un->un_fm_private;
15354 15068 ASSERT(sfip != NULL);
15355 15069
15356 15070 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n");
15357 15071
15358 15072 /*
15359 15073 * Note: check for the "sdrestart failed" case.
15360 15074 */
15361 15075 if ((un->un_partial_dma_supported == 1) &&
15362 15076 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) &&
15363 15077 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) &&
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
15364 15078 (xp->xb_pktp->pkt_resid == 0)) {
15365 15079
15366 15080 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
15367 15081 /*
15368 15082 * Successfully set up next portion of cmd
15369 15083 * transfer, try sending it
15370 15084 */
15371 15085 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
15372 15086 NULL, NULL, 0, (clock_t)0, NULL);
15373 15087 sd_start_cmds(un, NULL);
15374 - return; /* Note:x86: need a return here? */
15088 + return; /* XXX need a return here? */
15375 15089 }
15376 15090 }
15377 15091
15378 15092 /*
15379 15093 * If this is the failfast bp, clear it from un_failfast_bp. This
15380 15094 * can happen if upon being re-tried the failfast bp either
15381 15095 * succeeded or encountered another error (possibly even a different
15382 15096 * error than the one that precipitated the failfast state, but in
15383 15097 * that case it would have had to exhaust retries as well). Regardless,
15384 15098 * this should not occur whenever the instance is in the active
15385 15099 * failfast state.
15386 15100 */
15387 15101 if (bp == un->un_failfast_bp) {
15388 15102 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15389 15103 un->un_failfast_bp = NULL;
15390 15104 }
15391 15105
15392 15106 /*
15393 15107 * Clear the failfast state upon successful completion of ANY cmd.
15394 15108 */
15395 15109 if (bp->b_error == 0) {
15396 15110 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15397 15111 /*
15398 15112 * If this is a successful command, but used to be retried,
15399 15113 * we will take it as a recovered command and post an
15400 15114 * ereport with driver-assessment of "recovered".
15401 15115 */
15402 15116 if (xp->xb_ena > 0) {
15403 15117 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15404 15118 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY);
15405 15119 }
15406 15120 } else {
15407 15121 /*
15408 15122 * If this is a failed non-USCSI command we will post an
15409 15123 * ereport with driver-assessment set accordingly("fail" or
15410 15124 * "fatal").
15411 15125 */
15412 15126 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15413 15127 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15414 15128 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15415 15129 }
15416 15130 }
15417 15131
15418 15132 /*
15419 15133 * This is used if the command was retried one or more times. Show that
15420 15134 * we are done with it, and allow processing of the waitq to resume.
15421 15135 */
15422 15136 if (bp == un->un_retry_bp) {
15423 15137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15424 15138 "sd_return_command: un:0x%p: "
15425 15139 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15426 15140 un->un_retry_bp = NULL;
15427 15141 un->un_retry_statp = NULL;
15428 15142 }
15429 15143
15430 15144 SD_UPDATE_RDWR_STATS(un, bp);
15431 15145 SD_UPDATE_PARTITION_STATS(un, bp);
15432 15146
15433 15147 switch (un->un_state) {
15434 15148 case SD_STATE_SUSPENDED:
15435 15149 /*
15436 15150 * Notify any threads waiting in sd_ddi_suspend() that
15437 15151 * a command completion has occurred.
15438 15152 */
15439 15153 cv_broadcast(&un->un_disk_busy_cv);
15440 15154 break;
15441 15155 default:
15442 15156 sd_start_cmds(un, NULL);
15443 15157 break;
15444 15158 }
15445 15159
15446 15160 /* Return this command up the iodone chain to its originator. */
15447 15161 mutex_exit(SD_MUTEX(un));
15448 15162
15449 15163 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15450 15164 xp->xb_pktp = NULL;
15451 15165
15452 15166 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15453 15167
15454 15168 ASSERT(!mutex_owned(SD_MUTEX(un)));
15455 15169 mutex_enter(SD_MUTEX(un));
15456 15170
15457 15171 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n");
15458 15172 }
15459 15173
15460 15174
15461 15175 /*
15462 15176 * Function: sd_return_failed_command
15463 15177 *
15464 15178 * Description: Command completion when an error occurred.
15465 15179 *
15466 15180 * Context: May be called from interrupt context
15467 15181 */
15468 15182
15469 15183 static void
15470 15184 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode)
15471 15185 {
15472 15186 ASSERT(bp != NULL);
15473 15187 ASSERT(un != NULL);
15474 15188 ASSERT(mutex_owned(SD_MUTEX(un)));
15475 15189
15476 15190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15477 15191 "sd_return_failed_command: entry\n");
15478 15192
15479 15193 /*
15480 15194 * b_resid could already be nonzero due to a partial data
15481 15195 * transfer, so do not change it here.
15482 15196 */
15483 15197 SD_BIOERROR(bp, errcode);
15484 15198
15485 15199 sd_return_command(un, bp);
15486 15200 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15487 15201 "sd_return_failed_command: exit\n");
15488 15202 }
15489 15203
15490 15204
15491 15205 /*
15492 15206 * Function: sd_return_failed_command_no_restart
15493 15207 *
15494 15208 * Description: Same as sd_return_failed_command, but ensures that no
15495 15209 * call back into sd_start_cmds will be issued.
15496 15210 *
15497 15211 * Context: May be called from interrupt context
15498 15212 */
15499 15213
15500 15214 static void
15501 15215 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp,
15502 15216 int errcode)
15503 15217 {
15504 15218 struct sd_xbuf *xp;
15505 15219
15506 15220 ASSERT(bp != NULL);
15507 15221 ASSERT(un != NULL);
15508 15222 ASSERT(mutex_owned(SD_MUTEX(un)));
15509 15223 xp = SD_GET_XBUF(bp);
15510 15224 ASSERT(xp != NULL);
15511 15225 ASSERT(errcode != 0);
15512 15226
15513 15227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15514 15228 "sd_return_failed_command_no_restart: entry\n");
15515 15229
15516 15230 /*
15517 15231 * b_resid could already be nonzero due to a partial data
15518 15232 * transfer, so do not change it here.
15519 15233 */
15520 15234 SD_BIOERROR(bp, errcode);
15521 15235
15522 15236 /*
15523 15237 * If this is the failfast bp, clear it. This can happen if the
15524 15238 * failfast bp encounterd a fatal error when we attempted to
15525 15239 * re-try it (such as a scsi_transport(9F) failure). However
15526 15240 * we should NOT be in an active failfast state if the failfast
15527 15241 * bp is not NULL.
15528 15242 */
15529 15243 if (bp == un->un_failfast_bp) {
15530 15244 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15531 15245 un->un_failfast_bp = NULL;
15532 15246 }
15533 15247
15534 15248 if (bp == un->un_retry_bp) {
15535 15249 /*
15536 15250 * This command was retried one or more times. Show that we are
15537 15251 * done with it, and allow processing of the waitq to resume.
15538 15252 */
15539 15253 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15540 15254 "sd_return_failed_command_no_restart: "
15541 15255 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15542 15256 un->un_retry_bp = NULL;
15543 15257 un->un_retry_statp = NULL;
15544 15258 }
15545 15259
15546 15260 SD_UPDATE_RDWR_STATS(un, bp);
15547 15261 SD_UPDATE_PARTITION_STATS(un, bp);
15548 15262
15549 15263 mutex_exit(SD_MUTEX(un));
15550 15264
15551 15265 if (xp->xb_pktp != NULL) {
15552 15266 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15553 15267 xp->xb_pktp = NULL;
15554 15268 }
15555 15269
15556 15270 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15557 15271
15558 15272 mutex_enter(SD_MUTEX(un));
15559 15273
15560 15274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15561 15275 "sd_return_failed_command_no_restart: exit\n");
15562 15276 }
15563 15277
15564 15278
15565 15279 /*
15566 15280 * Function: sd_retry_command
15567 15281 *
15568 15282 * Description: queue up a command for retry, or (optionally) fail it
15569 15283 * if retry counts are exhausted.
15570 15284 *
15571 15285 * Arguments: un - Pointer to the sd_lun struct for the target.
15572 15286 *
15573 15287 * bp - Pointer to the buf for the command to be retried.
15574 15288 *
15575 15289 * retry_check_flag - Flag to see which (if any) of the retry
15576 15290 * counts should be decremented/checked. If the indicated
15577 15291 * retry count is exhausted, then the command will not be
15578 15292 * retried; it will be failed instead. This should use a
15579 15293 * value equal to one of the following:
|
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
15580 15294 *
15581 15295 * SD_RETRIES_NOCHECK
15582 15296 * SD_RESD_RETRIES_STANDARD
15583 15297 * SD_RETRIES_VICTIM
15584 15298 *
15585 15299 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15586 15300 * if the check should be made to see of FLAG_ISOLATE is set
15587 15301 * in the pkt. If FLAG_ISOLATE is set, then the command is
15588 15302 * not retried, it is simply failed.
15589 15303 *
15304 + * Optionally may be bitwise-OR'ed with SD_RETRIES_FAILFAST
15305 + * to indicate a retry following a command timeout, and check
15306 + * if the target should transition to failfast pending or
15307 + * failfast active. If the buf has B_FAILFAST set, the
15308 + * command should be failed when failfast is active.
15309 + *
15590 15310 * user_funcp - Ptr to function to call before dispatching the
15591 15311 * command. May be NULL if no action needs to be performed.
15592 15312 * (Primarily intended for printing messages.)
15593 15313 *
15594 15314 * user_arg - Optional argument to be passed along to
15595 15315 * the user_funcp call.
15596 15316 *
15597 15317 * failure_code - errno return code to set in the bp if the
15598 15318 * command is going to be failed.
15599 15319 *
15600 15320 * retry_delay - Retry delay interval in (clock_t) units. May
15601 15321 * be zero which indicates that the retry should be retried
15602 15322 * immediately (ie, without an intervening delay).
15603 15323 *
15604 15324 * statp - Ptr to kstat function to be updated if the command
15605 15325 * is queued for a delayed retry. May be NULL if no kstat
15606 15326 * update is desired.
15607 15327 *
15608 15328 * Context: May be called from interrupt context.
15609 15329 */
15610 15330
15611 15331 static void
15612 15332 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag,
15613 15333 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code),
15614 15334 void *user_arg, int failure_code, clock_t retry_delay,
15615 15335 void (*statp)(kstat_io_t *))
15616 15336 {
15617 15337 struct sd_xbuf *xp;
15618 15338 struct scsi_pkt *pktp;
15619 15339 struct sd_fm_internal *sfip;
15620 15340
15621 15341 ASSERT(un != NULL);
15622 15342 ASSERT(mutex_owned(SD_MUTEX(un)));
15623 15343 ASSERT(bp != NULL);
15624 15344 xp = SD_GET_XBUF(bp);
15625 15345 ASSERT(xp != NULL);
15626 15346 pktp = SD_GET_PKTP(bp);
15627 15347 ASSERT(pktp != NULL);
15628 15348
15629 15349 sfip = (struct sd_fm_internal *)un->un_fm_private;
15630 15350 ASSERT(sfip != NULL);
15631 15351
15632 15352 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15633 15353 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp);
15634 15354
15635 15355 /*
15636 15356 * If we are syncing or dumping, fail the command to avoid
15637 15357 * recursively calling back into scsi_transport().
15638 15358 */
15639 15359 if (ddi_in_panic()) {
15640 15360 goto fail_command_no_log;
15641 15361 }
15642 15362
15643 15363 /*
15644 15364 * We should never be be retrying a command with FLAG_DIAGNOSE set, so
15645 15365 * log an error and fail the command.
15646 15366 */
15647 15367 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
15648 15368 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
15649 15369 "ERROR, retrying FLAG_DIAGNOSE command.\n");
15650 15370 sd_dump_memory(un, SD_LOG_IO, "CDB",
15651 15371 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
15652 15372 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
15653 15373 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
15654 15374 goto fail_command;
15655 15375 }
15656 15376
15657 15377 /*
15658 15378 * If we are suspended, then put the command onto head of the
15659 15379 * wait queue since we don't want to start more commands, and
15660 15380 * clear the un_retry_bp. Next time when we are resumed, will
15661 15381 * handle the command in the wait queue.
15662 15382 */
15663 15383 switch (un->un_state) {
15664 15384 case SD_STATE_SUSPENDED:
15665 15385 case SD_STATE_DUMPING:
15666 15386 bp->av_forw = un->un_waitq_headp;
15667 15387 un->un_waitq_headp = bp;
15668 15388 if (un->un_waitq_tailp == NULL) {
15669 15389 un->un_waitq_tailp = bp;
15670 15390 }
15671 15391 if (bp == un->un_retry_bp) {
15672 15392 un->un_retry_bp = NULL;
15673 15393 un->un_retry_statp = NULL;
15674 15394 }
15675 15395 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
15676 15396 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: "
15677 15397 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp);
15678 15398 return;
15679 15399 default:
15680 15400 break;
15681 15401 }
15682 15402
15683 15403 /*
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
15684 15404 * If the caller wants us to check FLAG_ISOLATE, then see if that
15685 15405 * is set; if it is then we do not want to retry the command.
15686 15406 * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15687 15407 */
15688 15408 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
15689 15409 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
15690 15410 goto fail_command;
15691 15411 }
15692 15412 }
15693 15413
15414 + if (sd_failfast_enable & (SD_FAILFAST_ENABLE_FAIL_RETRIES |
15415 + SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES)) {
15416 + if (sd_failfast_enable & SD_FAILFAST_ENABLE_FAIL_ALL_RETRIES) {
15417 + /*
15418 + * Fail ALL retries when in active failfast state,
15419 + * regardless of reason.
15420 + */
15421 + if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
15422 + goto fail_command;
15423 + }
15424 + }
15425 + /*
15426 + * Treat bufs being retried as if they have the
15427 + * B_FAILFAST flag set.
15428 + */
15429 + bp->b_flags |= B_FAILFAST;
15430 + }
15694 15431
15695 15432 /*
15696 15433 * If SD_RETRIES_FAILFAST is set, it indicates that either a
15697 15434 * command timeout or a selection timeout has occurred. This means
15698 15435 * that we were unable to establish an kind of communication with
15699 15436 * the target, and subsequent retries and/or commands are likely
15700 15437 * to encounter similar results and take a long time to complete.
15701 15438 *
15702 15439 * If this is a failfast error condition, we need to update the
15703 15440 * failfast state, even if this bp does not have B_FAILFAST set.
15704 15441 */
15705 15442 if (retry_check_flag & SD_RETRIES_FAILFAST) {
15706 15443 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
15707 15444 ASSERT(un->un_failfast_bp == NULL);
15708 15445 /*
15709 15446 * If we are already in the active failfast state, and
15710 15447 * another failfast error condition has been detected,
15711 15448 * then fail this command if it has B_FAILFAST set.
15712 15449 * If B_FAILFAST is clear, then maintain the legacy
15713 15450 * behavior of retrying heroically, even tho this will
15714 15451 * take a lot more time to fail the command.
15715 15452 */
15716 15453 if (bp->b_flags & B_FAILFAST) {
15717 15454 goto fail_command;
15718 15455 }
15719 15456 } else {
15720 15457 /*
15721 15458 * We're not in the active failfast state, but we
15722 15459 * have a failfast error condition, so we must begin
15723 15460 * transition to the next state. We do this regardless
15724 15461 * of whether or not this bp has B_FAILFAST set.
15725 15462 */
15726 15463 if (un->un_failfast_bp == NULL) {
15727 15464 /*
15728 15465 * This is the first bp to meet a failfast
15729 15466 * condition so save it on un_failfast_bp &
15730 15467 * do normal retry processing. Do not enter
15731 15468 * active failfast state yet. This marks
15732 15469 * entry into the "failfast pending" state.
15733 15470 */
15734 15471 un->un_failfast_bp = bp;
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
15735 15472
15736 15473 } else if (un->un_failfast_bp == bp) {
15737 15474 /*
15738 15475 * This is the second time *this* bp has
15739 15476 * encountered a failfast error condition,
15740 15477 * so enter active failfast state & flush
15741 15478 * queues as appropriate.
15742 15479 */
15743 15480 un->un_failfast_state = SD_FAILFAST_ACTIVE;
15744 15481 un->un_failfast_bp = NULL;
15745 - sd_failfast_flushq(un);
15482 + sd_failfast_flushq(un, B_FALSE);
15746 15483
15747 15484 /*
15748 15485 * Fail this bp now if B_FAILFAST set;
15749 15486 * otherwise continue with retries. (It would
15750 15487 * be pretty ironic if this bp succeeded on a
15751 15488 * subsequent retry after we just flushed all
15752 15489 * the queues).
15753 15490 */
15754 15491 if (bp->b_flags & B_FAILFAST) {
15755 15492 goto fail_command;
15756 15493 }
15757 15494
15758 15495 #if !defined(lint) && !defined(__lint)
15759 15496 } else {
15760 15497 /*
15761 15498 * If neither of the preceeding conditionals
15762 15499 * was true, it means that there is some
15763 15500 * *other* bp that has met an inital failfast
15764 15501 * condition and is currently either being
15765 15502 * retried or is waiting to be retried. In
15766 15503 * that case we should perform normal retry
15767 15504 * processing on *this* bp, since there is a
15768 15505 * chance that the current failfast condition
15769 15506 * is transient and recoverable. If that does
15770 15507 * not turn out to be the case, then retries
15771 15508 * will be cleared when the wait queue is
15772 15509 * flushed anyway.
15773 15510 */
15774 15511 #endif
15775 15512 }
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
15776 15513 }
15777 15514 } else {
15778 15515 /*
15779 15516 * SD_RETRIES_FAILFAST is clear, which indicates that we
15780 15517 * likely were able to at least establish some level of
15781 15518 * communication with the target and subsequent commands
15782 15519 * and/or retries are likely to get through to the target,
15783 15520 * In this case we want to be aggressive about clearing
15784 15521 * the failfast state. Note that this does not affect
15785 15522 * the "failfast pending" condition.
15523 + *
15524 + * We limit this to retries that are not a side effect of an
15525 + * unrelated event, as it would be unwise to clear failfast
15526 + * active state when we see retries due to a reset.
15786 15527 */
15787 - un->un_failfast_state = SD_FAILFAST_INACTIVE;
15528 + if ((sd_failfast_enable & SD_FAILFAST_ENABLE_FORCE_INACTIVE) &&
15529 + (retry_check_flag & SD_RETRIES_MASK) != SD_RETRIES_VICTIM)
15530 + un->un_failfast_state = SD_FAILFAST_INACTIVE;
15788 15531 }
15789 15532
15790 15533
15791 15534 /*
15792 15535 * Check the specified retry count to see if we can still do
15793 15536 * any retries with this pkt before we should fail it.
15794 15537 */
15795 15538 switch (retry_check_flag & SD_RETRIES_MASK) {
15796 15539 case SD_RETRIES_VICTIM:
15797 15540 /*
15798 15541 * Check the victim retry count. If exhausted, then fall
15799 15542 * thru & check against the standard retry count.
15800 15543 */
15801 15544 if (xp->xb_victim_retry_count < un->un_victim_retry_count) {
15802 15545 /* Increment count & proceed with the retry */
15803 15546 xp->xb_victim_retry_count++;
15804 15547 break;
15805 15548 }
15806 15549 /* Victim retries exhausted, fall back to std. retries... */
15807 15550 /* FALLTHRU */
15808 15551
15809 15552 case SD_RETRIES_STANDARD:
15810 15553 if (xp->xb_retry_count >= un->un_retry_count) {
15811 15554 /* Retries exhausted, fail the command */
15812 15555 SD_TRACE(SD_LOG_IO_CORE, un,
15813 15556 "sd_retry_command: retries exhausted!\n");
15814 15557 /*
15815 15558 * update b_resid for failed SCMD_READ & SCMD_WRITE
15816 15559 * commands with nonzero pkt_resid.
15817 15560 */
15818 15561 if ((pktp->pkt_reason == CMD_CMPLT) &&
15819 15562 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) &&
15820 15563 (pktp->pkt_resid != 0)) {
15821 15564 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F;
15822 15565 if ((op == SCMD_READ) || (op == SCMD_WRITE)) {
15823 15566 SD_UPDATE_B_RESID(bp, pktp);
15824 15567 }
15825 15568 }
15826 15569 goto fail_command;
15827 15570 }
15828 15571 xp->xb_retry_count++;
15829 15572 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15830 15573 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15831 15574 break;
15832 15575
15833 15576 case SD_RETRIES_UA:
15834 15577 if (xp->xb_ua_retry_count >= sd_ua_retry_count) {
15835 15578 /* Retries exhausted, fail the command */
15836 15579 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
15837 15580 "Unit Attention retries exhausted. "
15838 15581 "Check the target.\n");
15839 15582 goto fail_command;
15840 15583 }
15841 15584 xp->xb_ua_retry_count++;
15842 15585 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15843 15586 "sd_retry_command: retry count:%d\n",
15844 15587 xp->xb_ua_retry_count);
15845 15588 break;
15846 15589
15847 15590 case SD_RETRIES_BUSY:
15848 15591 if (xp->xb_retry_count >= un->un_busy_retry_count) {
15849 15592 /* Retries exhausted, fail the command */
15850 15593 SD_TRACE(SD_LOG_IO_CORE, un,
15851 15594 "sd_retry_command: retries exhausted!\n");
15852 15595 goto fail_command;
15853 15596 }
15854 15597 xp->xb_retry_count++;
15855 15598 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15856 15599 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15857 15600 break;
15858 15601
15859 15602 case SD_RETRIES_NOCHECK:
15860 15603 default:
15861 15604 /* No retry count to check. Just proceed with the retry */
15862 15605 break;
15863 15606 }
15864 15607
15865 15608 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15866 15609
15867 15610 /*
15868 15611 * If this is a non-USCSI command being retried
15869 15612 * during execution last time, we should post an ereport with
15870 15613 * driver-assessment of the value "retry".
15871 15614 * For partial DMA, request sense and STATUS_QFULL, there are no
15872 15615 * hardware errors, we bypass ereport posting.
15873 15616 */
15874 15617 if (failure_code != 0) {
15875 15618 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15876 15619 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15877 15620 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY);
15878 15621 }
15879 15622 }
15880 15623
15881 15624 /*
15882 15625 * If we were given a zero timeout, we must attempt to retry the
15883 15626 * command immediately (ie, without a delay).
15884 15627 */
15885 15628 if (retry_delay == 0) {
15886 15629 /*
15887 15630 * Check some limiting conditions to see if we can actually
15888 15631 * do the immediate retry. If we cannot, then we must
15889 15632 * fall back to queueing up a delayed retry.
15890 15633 */
15891 15634 if (un->un_ncmds_in_transport >= un->un_throttle) {
15892 15635 /*
15893 15636 * We are at the throttle limit for the target,
15894 15637 * fall back to delayed retry.
15895 15638 */
15896 15639 retry_delay = un->un_busy_timeout;
15897 15640 statp = kstat_waitq_enter;
15898 15641 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15899 15642 "sd_retry_command: immed. retry hit "
15900 15643 "throttle!\n");
15901 15644 } else {
15902 15645 /*
15903 15646 * We're clear to proceed with the immediate retry.
15904 15647 * First call the user-provided function (if any)
15905 15648 */
15906 15649 if (user_funcp != NULL) {
15907 15650 (*user_funcp)(un, bp, user_arg,
15908 15651 SD_IMMEDIATE_RETRY_ISSUED);
15909 15652 #ifdef __lock_lint
15910 15653 sd_print_incomplete_msg(un, bp, user_arg,
15911 15654 SD_IMMEDIATE_RETRY_ISSUED);
15912 15655 sd_print_cmd_incomplete_msg(un, bp, user_arg,
15913 15656 SD_IMMEDIATE_RETRY_ISSUED);
15914 15657 sd_print_sense_failed_msg(un, bp, user_arg,
15915 15658 SD_IMMEDIATE_RETRY_ISSUED);
15916 15659 #endif
15917 15660 }
15918 15661
15919 15662 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15920 15663 "sd_retry_command: issuing immediate retry\n");
15921 15664
15922 15665 /*
15923 15666 * Call sd_start_cmds() to transport the command to
15924 15667 * the target.
15925 15668 */
15926 15669 sd_start_cmds(un, bp);
15927 15670
15928 15671 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15929 15672 "sd_retry_command exit\n");
15930 15673 return;
15931 15674 }
15932 15675 }
15933 15676
15934 15677 /*
15935 15678 * Set up to retry the command after a delay.
15936 15679 * First call the user-provided function (if any)
15937 15680 */
15938 15681 if (user_funcp != NULL) {
15939 15682 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED);
15940 15683 }
15941 15684
15942 15685 sd_set_retry_bp(un, bp, retry_delay, statp);
15943 15686
15944 15687 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15945 15688 return;
15946 15689
15947 15690 fail_command:
15948 15691
15949 15692 if (user_funcp != NULL) {
15950 15693 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED);
15951 15694 }
15952 15695
15953 15696 fail_command_no_log:
15954 15697
15955 15698 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15956 15699 "sd_retry_command: returning failed command\n");
15957 15700
15958 15701 sd_return_failed_command(un, bp, failure_code);
15959 15702
15960 15703 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15961 15704 }
15962 15705
15963 15706
15964 15707 /*
15965 15708 * Function: sd_set_retry_bp
15966 15709 *
15967 15710 * Description: Set up the given bp for retry.
15968 15711 *
15969 15712 * Arguments: un - ptr to associated softstate
15970 15713 * bp - ptr to buf(9S) for the command
15971 15714 * retry_delay - time interval before issuing retry (may be 0)
15972 15715 * statp - optional pointer to kstat function
15973 15716 *
15974 15717 * Context: May be called under interrupt context
15975 15718 */
15976 15719
15977 15720 static void
15978 15721 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay,
15979 15722 void (*statp)(kstat_io_t *))
15980 15723 {
15981 15724 ASSERT(un != NULL);
15982 15725 ASSERT(mutex_owned(SD_MUTEX(un)));
15983 15726 ASSERT(bp != NULL);
15984 15727
15985 15728 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15986 15729 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp);
15987 15730
15988 15731 /*
15989 15732 * Indicate that the command is being retried. This will not allow any
15990 15733 * other commands on the wait queue to be transported to the target
15991 15734 * until this command has been completed (success or failure). The
15992 15735 * "retry command" is not transported to the target until the given
15993 15736 * time delay expires, unless the user specified a 0 retry_delay.
15994 15737 *
15995 15738 * Note: the timeout(9F) callback routine is what actually calls
15996 15739 * sd_start_cmds() to transport the command, with the exception of a
15997 15740 * zero retry_delay. The only current implementor of a zero retry delay
15998 15741 * is the case where a START_STOP_UNIT is sent to spin-up a device.
15999 15742 */
16000 15743 if (un->un_retry_bp == NULL) {
16001 15744 ASSERT(un->un_retry_statp == NULL);
16002 15745 un->un_retry_bp = bp;
16003 15746
16004 15747 /*
16005 15748 * If the user has not specified a delay the command should
16006 15749 * be queued and no timeout should be scheduled.
16007 15750 */
16008 15751 if (retry_delay == 0) {
16009 15752 /*
16010 15753 * Save the kstat pointer that will be used in the
16011 15754 * call to SD_UPDATE_KSTATS() below, so that
16012 15755 * sd_start_cmds() can correctly decrement the waitq
16013 15756 * count when it is time to transport this command.
16014 15757 */
16015 15758 un->un_retry_statp = statp;
16016 15759 goto done;
16017 15760 }
16018 15761 }
16019 15762
16020 15763 if (un->un_retry_bp == bp) {
16021 15764 /*
16022 15765 * Save the kstat pointer that will be used in the call to
16023 15766 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can
16024 15767 * correctly decrement the waitq count when it is time to
16025 15768 * transport this command.
16026 15769 */
16027 15770 un->un_retry_statp = statp;
16028 15771
16029 15772 /*
16030 15773 * Schedule a timeout if:
16031 15774 * 1) The user has specified a delay.
16032 15775 * 2) There is not a START_STOP_UNIT callback pending.
16033 15776 *
16034 15777 * If no delay has been specified, then it is up to the caller
16035 15778 * to ensure that IO processing continues without stalling.
16036 15779 * Effectively, this means that the caller will issue the
16037 15780 * required call to sd_start_cmds(). The START_STOP_UNIT
16038 15781 * callback does this after the START STOP UNIT command has
16039 15782 * completed. In either of these cases we should not schedule
16040 15783 * a timeout callback here. Also don't schedule the timeout if
16041 15784 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart.
16042 15785 */
16043 15786 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) &&
16044 15787 (un->un_direct_priority_timeid == NULL)) {
16045 15788 un->un_retry_timeid =
16046 15789 timeout(sd_start_retry_command, un, retry_delay);
16047 15790 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16048 15791 "sd_set_retry_bp: setting timeout: un: 0x%p"
16049 15792 " bp:0x%p un_retry_timeid:0x%p\n",
16050 15793 un, bp, un->un_retry_timeid);
16051 15794 }
16052 15795 } else {
16053 15796 /*
16054 15797 * We only get in here if there is already another command
16055 15798 * waiting to be retried. In this case, we just put the
16056 15799 * given command onto the wait queue, so it can be transported
16057 15800 * after the current retry command has completed.
16058 15801 *
16059 15802 * Also we have to make sure that if the command at the head
16060 15803 * of the wait queue is the un_failfast_bp, that we do not
16061 15804 * put ahead of it any other commands that are to be retried.
16062 15805 */
16063 15806 if ((un->un_failfast_bp != NULL) &&
16064 15807 (un->un_failfast_bp == un->un_waitq_headp)) {
16065 15808 /*
16066 15809 * Enqueue this command AFTER the first command on
16067 15810 * the wait queue (which is also un_failfast_bp).
16068 15811 */
16069 15812 bp->av_forw = un->un_waitq_headp->av_forw;
16070 15813 un->un_waitq_headp->av_forw = bp;
16071 15814 if (un->un_waitq_headp == un->un_waitq_tailp) {
16072 15815 un->un_waitq_tailp = bp;
16073 15816 }
16074 15817 } else {
16075 15818 /* Enqueue this command at the head of the waitq. */
16076 15819 bp->av_forw = un->un_waitq_headp;
16077 15820 un->un_waitq_headp = bp;
16078 15821 if (un->un_waitq_tailp == NULL) {
16079 15822 un->un_waitq_tailp = bp;
16080 15823 }
16081 15824 }
16082 15825
16083 15826 if (statp == NULL) {
16084 15827 statp = kstat_waitq_enter;
16085 15828 }
16086 15829 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16087 15830 "sd_set_retry_bp: un:0x%p already delayed retry\n", un);
16088 15831 }
16089 15832
16090 15833 done:
16091 15834 if (statp != NULL) {
16092 15835 SD_UPDATE_KSTATS(un, statp, bp);
16093 15836 }
16094 15837
16095 15838 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16096 15839 "sd_set_retry_bp: exit un:0x%p\n", un);
16097 15840 }
16098 15841
16099 15842
16100 15843 /*
16101 15844 * Function: sd_start_retry_command
16102 15845 *
16103 15846 * Description: Start the command that has been waiting on the target's
16104 15847 * retry queue. Called from timeout(9F) context after the
16105 15848 * retry delay interval has expired.
16106 15849 *
16107 15850 * Arguments: arg - pointer to associated softstate for the device.
16108 15851 *
16109 15852 * Context: timeout(9F) thread context. May not sleep.
16110 15853 */
16111 15854
16112 15855 static void
16113 15856 sd_start_retry_command(void *arg)
16114 15857 {
16115 15858 struct sd_lun *un = arg;
16116 15859
16117 15860 ASSERT(un != NULL);
16118 15861 ASSERT(!mutex_owned(SD_MUTEX(un)));
16119 15862
16120 15863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16121 15864 "sd_start_retry_command: entry\n");
16122 15865
16123 15866 mutex_enter(SD_MUTEX(un));
16124 15867
16125 15868 un->un_retry_timeid = NULL;
16126 15869
16127 15870 if (un->un_retry_bp != NULL) {
16128 15871 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16129 15872 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
16130 15873 un, un->un_retry_bp);
16131 15874 sd_start_cmds(un, un->un_retry_bp);
16132 15875 }
16133 15876
16134 15877 mutex_exit(SD_MUTEX(un));
16135 15878
16136 15879 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16137 15880 "sd_start_retry_command: exit\n");
16138 15881 }
16139 15882
16140 15883 /*
16141 15884 * Function: sd_rmw_msg_print_handler
16142 15885 *
16143 15886 * Description: If RMW mode is enabled and warning message is triggered
16144 15887 * print I/O count during a fixed interval.
16145 15888 *
16146 15889 * Arguments: arg - pointer to associated softstate for the device.
16147 15890 *
16148 15891 * Context: timeout(9F) thread context. May not sleep.
16149 15892 */
16150 15893 static void
16151 15894 sd_rmw_msg_print_handler(void *arg)
16152 15895 {
16153 15896 struct sd_lun *un = arg;
16154 15897
16155 15898 ASSERT(un != NULL);
16156 15899 ASSERT(!mutex_owned(SD_MUTEX(un)));
16157 15900
16158 15901 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16159 15902 "sd_rmw_msg_print_handler: entry\n");
16160 15903
16161 15904 mutex_enter(SD_MUTEX(un));
16162 15905
16163 15906 if (un->un_rmw_incre_count > 0) {
16164 15907 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16165 15908 "%"PRIu64" I/O requests are not aligned with %d disk "
16166 15909 "sector size in %ld seconds. They are handled through "
16167 15910 "Read Modify Write but the performance is very low!\n",
16168 15911 un->un_rmw_incre_count, un->un_tgt_blocksize,
16169 15912 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000);
16170 15913 un->un_rmw_incre_count = 0;
16171 15914 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler,
16172 15915 un, SD_RMW_MSG_PRINT_TIMEOUT);
16173 15916 } else {
16174 15917 un->un_rmw_msg_timeid = NULL;
16175 15918 }
16176 15919
16177 15920 mutex_exit(SD_MUTEX(un));
16178 15921
16179 15922 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16180 15923 "sd_rmw_msg_print_handler: exit\n");
16181 15924 }
16182 15925
16183 15926 /*
16184 15927 * Function: sd_start_direct_priority_command
16185 15928 *
16186 15929 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had
16187 15930 * received TRAN_BUSY when we called scsi_transport() to send it
16188 15931 * to the underlying HBA. This function is called from timeout(9F)
16189 15932 * context after the delay interval has expired.
16190 15933 *
16191 15934 * Arguments: arg - pointer to associated buf(9S) to be restarted.
16192 15935 *
16193 15936 * Context: timeout(9F) thread context. May not sleep.
16194 15937 */
16195 15938
16196 15939 static void
16197 15940 sd_start_direct_priority_command(void *arg)
16198 15941 {
16199 15942 struct buf *priority_bp = arg;
16200 15943 struct sd_lun *un;
16201 15944
16202 15945 ASSERT(priority_bp != NULL);
16203 15946 un = SD_GET_UN(priority_bp);
16204 15947 ASSERT(un != NULL);
16205 15948 ASSERT(!mutex_owned(SD_MUTEX(un)));
16206 15949
16207 15950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16208 15951 "sd_start_direct_priority_command: entry\n");
16209 15952
16210 15953 mutex_enter(SD_MUTEX(un));
16211 15954 un->un_direct_priority_timeid = NULL;
16212 15955 sd_start_cmds(un, priority_bp);
16213 15956 mutex_exit(SD_MUTEX(un));
16214 15957
16215 15958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16216 15959 "sd_start_direct_priority_command: exit\n");
16217 15960 }
|
↓ open down ↓ |
420 lines elided |
↑ open up ↑ |
16218 15961
16219 15962
16220 15963 /*
16221 15964 * Function: sd_send_request_sense_command
16222 15965 *
16223 15966 * Description: Sends a REQUEST SENSE command to the target
16224 15967 *
16225 15968 * Context: May be called from interrupt context.
16226 15969 */
16227 15970
16228 -static void
16229 -sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
16230 - struct scsi_pkt *pktp)
15971 +static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
15972 + int retry_check_flag, struct scsi_pkt *pktp)
16231 15973 {
16232 15974 ASSERT(bp != NULL);
16233 15975 ASSERT(un != NULL);
16234 15976 ASSERT(mutex_owned(SD_MUTEX(un)));
16235 15977
16236 15978 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
16237 15979 "entry: buf:0x%p\n", bp);
16238 15980
16239 15981 /*
16240 15982 * If we are syncing or dumping, then fail the command to avoid a
16241 15983 * recursive callback into scsi_transport(). Also fail the command
16242 15984 * if we are suspended (legacy behavior).
16243 15985 */
16244 15986 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
16245 15987 (un->un_state == SD_STATE_DUMPING)) {
16246 15988 sd_return_failed_command(un, bp, EIO);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
16247 15989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16248 15990 "sd_send_request_sense_command: syncing/dumping, exit\n");
16249 15991 return;
16250 15992 }
16251 15993
16252 15994 /*
16253 15995 * Retry the failed command and don't issue the request sense if:
16254 15996 * 1) the sense buf is busy
16255 15997 * 2) we have 1 or more outstanding commands on the target
16256 15998 * (the sense data will be cleared or invalidated any way)
16257 - *
16258 - * Note: There could be an issue with not checking a retry limit here,
16259 - * the problem is determining which retry limit to check.
16260 15999 */
16261 16000 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
16262 16001 /* Don't retry if the command is flagged as non-retryable */
16263 16002 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
16264 - sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
16003 + sd_retry_command(un, bp, retry_check_flag,
16265 16004 NULL, NULL, 0, un->un_busy_timeout,
16266 16005 kstat_waitq_enter);
16267 16006 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16268 16007 "sd_send_request_sense_command: "
16269 16008 "at full throttle, retrying exit\n");
16270 16009 } else {
16271 16010 sd_return_failed_command(un, bp, EIO);
16272 16011 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16273 16012 "sd_send_request_sense_command: "
16274 16013 "at full throttle, non-retryable exit\n");
16275 16014 }
16276 16015 return;
16277 16016 }
16278 16017
16279 16018 sd_mark_rqs_busy(un, bp);
16280 16019 sd_start_cmds(un, un->un_rqs_bp);
16281 16020
16282 16021 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16283 16022 "sd_send_request_sense_command: exit\n");
16284 16023 }
16285 16024
16286 16025
16287 16026 /*
16288 16027 * Function: sd_mark_rqs_busy
16289 16028 *
16290 16029 * Description: Indicate that the request sense bp for this instance is
16291 16030 * in use.
16292 16031 *
16293 16032 * Context: May be called under interrupt context
16294 16033 */
16295 16034
16296 16035 static void
16297 16036 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp)
16298 16037 {
16299 16038 struct sd_xbuf *sense_xp;
16300 16039
16301 16040 ASSERT(un != NULL);
16302 16041 ASSERT(bp != NULL);
16303 16042 ASSERT(mutex_owned(SD_MUTEX(un)));
16304 16043 ASSERT(un->un_sense_isbusy == 0);
16305 16044
16306 16045 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: "
16307 16046 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un);
16308 16047
16309 16048 sense_xp = SD_GET_XBUF(un->un_rqs_bp);
16310 16049 ASSERT(sense_xp != NULL);
16311 16050
16312 16051 SD_INFO(SD_LOG_IO, un,
16313 16052 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp);
16314 16053
16315 16054 ASSERT(sense_xp->xb_pktp != NULL);
16316 16055 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD))
16317 16056 == (FLAG_SENSING | FLAG_HEAD));
16318 16057
16319 16058 un->un_sense_isbusy = 1;
16320 16059 un->un_rqs_bp->b_resid = 0;
16321 16060 sense_xp->xb_pktp->pkt_resid = 0;
16322 16061 sense_xp->xb_pktp->pkt_reason = 0;
16323 16062
16324 16063 /* So we can get back the bp at interrupt time! */
16325 16064 sense_xp->xb_sense_bp = bp;
16326 16065
16327 16066 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH);
16328 16067
16329 16068 /*
16330 16069 * Mark this buf as awaiting sense data. (This is already set in
16331 16070 * the pkt_flags for the RQS packet.)
16332 16071 */
16333 16072 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING;
16334 16073
16335 16074 /* Request sense down same path */
16336 16075 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) &&
16337 16076 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance)
16338 16077 sense_xp->xb_pktp->pkt_path_instance =
16339 16078 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance;
16340 16079
16341 16080 sense_xp->xb_retry_count = 0;
16342 16081 sense_xp->xb_victim_retry_count = 0;
16343 16082 sense_xp->xb_ua_retry_count = 0;
16344 16083 sense_xp->xb_nr_retry_count = 0;
16345 16084 sense_xp->xb_dma_resid = 0;
16346 16085
16347 16086 /* Clean up the fields for auto-request sense */
16348 16087 sense_xp->xb_sense_status = 0;
16349 16088 sense_xp->xb_sense_state = 0;
16350 16089 sense_xp->xb_sense_resid = 0;
16351 16090 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data));
16352 16091
16353 16092 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n");
16354 16093 }
16355 16094
16356 16095
16357 16096 /*
16358 16097 * Function: sd_mark_rqs_idle
16359 16098 *
16360 16099 * Description: SD_MUTEX must be held continuously through this routine
16361 16100 * to prevent reuse of the rqs struct before the caller can
16362 16101 * complete it's processing.
16363 16102 *
16364 16103 * Return Code: Pointer to the RQS buf
16365 16104 *
16366 16105 * Context: May be called under interrupt context
16367 16106 */
16368 16107
16369 16108 static struct buf *
16370 16109 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp)
16371 16110 {
16372 16111 struct buf *bp;
16373 16112 ASSERT(un != NULL);
16374 16113 ASSERT(sense_xp != NULL);
16375 16114 ASSERT(mutex_owned(SD_MUTEX(un)));
16376 16115 ASSERT(un->un_sense_isbusy != 0);
16377 16116
16378 16117 un->un_sense_isbusy = 0;
16379 16118 bp = sense_xp->xb_sense_bp;
16380 16119 sense_xp->xb_sense_bp = NULL;
16381 16120
16382 16121 /* This pkt is no longer interested in getting sense data */
16383 16122 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING;
16384 16123
16385 16124 return (bp);
16386 16125 }
16387 16126
16388 16127
16389 16128
16390 16129 /*
16391 16130 * Function: sd_alloc_rqs
16392 16131 *
16393 16132 * Description: Set up the unit to receive auto request sense data
16394 16133 *
16395 16134 * Return Code: DDI_SUCCESS or DDI_FAILURE
16396 16135 *
16397 16136 * Context: Called under attach(9E) context
16398 16137 */
16399 16138
16400 16139 static int
16401 16140 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un)
16402 16141 {
16403 16142 struct sd_xbuf *xp;
16404 16143
16405 16144 ASSERT(un != NULL);
16406 16145 ASSERT(!mutex_owned(SD_MUTEX(un)));
16407 16146 ASSERT(un->un_rqs_bp == NULL);
16408 16147 ASSERT(un->un_rqs_pktp == NULL);
16409 16148
16410 16149 /*
16411 16150 * First allocate the required buf and scsi_pkt structs, then set up
16412 16151 * the CDB in the scsi_pkt for a REQUEST SENSE command.
16413 16152 */
16414 16153 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
16415 16154 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
16416 16155 if (un->un_rqs_bp == NULL) {
16417 16156 return (DDI_FAILURE);
16418 16157 }
16419 16158
16420 16159 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
16421 16160 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
16422 16161
16423 16162 if (un->un_rqs_pktp == NULL) {
16424 16163 sd_free_rqs(un);
|
↓ open down ↓ |
150 lines elided |
↑ open up ↑ |
16425 16164 return (DDI_FAILURE);
16426 16165 }
16427 16166
16428 16167 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16429 16168 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
16430 16169 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
16431 16170
16432 16171 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
16433 16172
16434 16173 /* Set up the other needed members in the ARQ scsi_pkt. */
16435 - un->un_rqs_pktp->pkt_comp = sdintr;
16436 - un->un_rqs_pktp->pkt_time = sd_io_time;
16437 - un->un_rqs_pktp->pkt_flags |=
16438 - (FLAG_SENSING | FLAG_HEAD); /* (1222170) */
16174 + un->un_rqs_pktp->pkt_comp = sdintr;
16175 + un->un_rqs_pktp->pkt_time = ((ISCD(un)) ? 2 : 1) *
16176 + (ushort_t)un->un_io_time;
16177 + un->un_rqs_pktp->pkt_flags |= (FLAG_SENSING | FLAG_HEAD);
16439 16178
16440 16179 /*
16441 16180 * Allocate & init the sd_xbuf struct for the RQS command. Do not
16442 16181 * provide any intpkt, destroypkt routines as we take care of
16443 16182 * scsi_pkt allocation/freeing here and in sd_free_rqs().
16444 16183 */
16445 16184 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
16446 16185 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
16447 16186 xp->xb_pktp = un->un_rqs_pktp;
16448 16187 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16449 16188 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
16450 16189 un, xp, un->un_rqs_pktp, un->un_rqs_bp);
16451 16190
16452 16191 /*
16453 16192 * Save the pointer to the request sense private bp so it can
16454 16193 * be retrieved in sdintr.
16455 16194 */
16456 16195 un->un_rqs_pktp->pkt_private = un->un_rqs_bp;
16457 16196 ASSERT(un->un_rqs_bp->b_private == xp);
16458 16197
16459 16198 /*
16460 16199 * See if the HBA supports auto-request sense for the specified
16461 16200 * target/lun. If it does, then try to enable it (if not already
16462 16201 * enabled).
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
16463 16202 *
16464 16203 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16465 16204 * failure, while for other HBAs (pln) scsi_ifsetcap will always
16466 16205 * return success. However, in both of these cases ARQ is always
16467 16206 * enabled and scsi_ifgetcap will always return true. The best approach
16468 16207 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16469 16208 *
16470 16209 * The 3rd case is the HBA (adp) always return enabled on
16471 16210 * scsi_ifgetgetcap even when it's not enable, the best approach
16472 16211 * is issue a scsi_ifsetcap then a scsi_ifgetcap
16473 - * Note: this case is to circumvent the Adaptec bug. (x86 only)
16474 16212 */
16475 16213
16476 16214 if (un->un_f_is_fibre == TRUE) {
16477 16215 un->un_f_arq_enabled = TRUE;
16478 16216 } else {
16479 -#if defined(__i386) || defined(__amd64)
16480 16217 /*
16481 - * Circumvent the Adaptec bug, remove this code when
16482 - * the bug is fixed
16218 + * XXX Circumvent the Adaptec bug, remove this code when
16219 + * the bug is fixed.
16483 16220 */
16484 16221 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
16485 -#endif
16486 16222 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
16487 16223 case 0:
16488 16224 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16489 16225 "sd_alloc_rqs: HBA supports ARQ\n");
16490 16226 /*
16491 16227 * ARQ is supported by this HBA but currently is not
16492 16228 * enabled. Attempt to enable it and if successful then
16493 16229 * mark this instance as ARQ enabled.
16494 16230 */
16495 16231 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
16496 16232 == 1) {
16497 16233 /* Successfully enabled ARQ in the HBA */
16498 16234 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16499 16235 "sd_alloc_rqs: ARQ enabled\n");
16500 16236 un->un_f_arq_enabled = TRUE;
16501 16237 } else {
16502 16238 /* Could not enable ARQ in the HBA */
16503 16239 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16504 16240 "sd_alloc_rqs: failed ARQ enable\n");
16505 16241 un->un_f_arq_enabled = FALSE;
16506 16242 }
16507 16243 break;
16508 16244 case 1:
16509 16245 /*
16510 16246 * ARQ is supported by this HBA and is already enabled.
16511 16247 * Just mark ARQ as enabled for this instance.
16512 16248 */
16513 16249 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16514 16250 "sd_alloc_rqs: ARQ already enabled\n");
16515 16251 un->un_f_arq_enabled = TRUE;
16516 16252 break;
16517 16253 default:
16518 16254 /*
16519 16255 * ARQ is not supported by this HBA; disable it for this
16520 16256 * instance.
16521 16257 */
16522 16258 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16523 16259 "sd_alloc_rqs: HBA does not support ARQ\n");
16524 16260 un->un_f_arq_enabled = FALSE;
16525 16261 break;
16526 16262 }
16527 16263 }
16528 16264
16529 16265 return (DDI_SUCCESS);
16530 16266 }
16531 16267
16532 16268
16533 16269 /*
16534 16270 * Function: sd_free_rqs
16535 16271 *
16536 16272 * Description: Cleanup for the pre-instance RQS command.
16537 16273 *
16538 16274 * Context: Kernel thread context
16539 16275 */
16540 16276
16541 16277 static void
16542 16278 sd_free_rqs(struct sd_lun *un)
16543 16279 {
16544 16280 ASSERT(un != NULL);
16545 16281
16546 16282 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n");
16547 16283
16548 16284 /*
16549 16285 * If consistent memory is bound to a scsi_pkt, the pkt
16550 16286 * has to be destroyed *before* freeing the consistent memory.
16551 16287 * Don't change the sequence of this operations.
16552 16288 * scsi_destroy_pkt() might access memory, which isn't allowed,
16553 16289 * after it was freed in scsi_free_consistent_buf().
16554 16290 */
16555 16291 if (un->un_rqs_pktp != NULL) {
16556 16292 scsi_destroy_pkt(un->un_rqs_pktp);
16557 16293 un->un_rqs_pktp = NULL;
16558 16294 }
16559 16295
16560 16296 if (un->un_rqs_bp != NULL) {
16561 16297 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp);
16562 16298 if (xp != NULL) {
16563 16299 kmem_free(xp, sizeof (struct sd_xbuf));
16564 16300 }
16565 16301 scsi_free_consistent_buf(un->un_rqs_bp);
16566 16302 un->un_rqs_bp = NULL;
16567 16303 }
16568 16304 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n");
16569 16305 }
16570 16306
16571 16307
16572 16308
16573 16309 /*
16574 16310 * Function: sd_reduce_throttle
16575 16311 *
16576 16312 * Description: Reduces the maximum # of outstanding commands on a
16577 16313 * target to the current number of outstanding commands.
16578 16314 * Queues a tiemout(9F) callback to restore the limit
16579 16315 * after a specified interval has elapsed.
16580 16316 * Typically used when we get a TRAN_BUSY return code
16581 16317 * back from scsi_transport().
16582 16318 *
16583 16319 * Arguments: un - ptr to the sd_lun softstate struct
16584 16320 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL
16585 16321 *
16586 16322 * Context: May be called from interrupt context
16587 16323 */
16588 16324
16589 16325 static void
16590 16326 sd_reduce_throttle(struct sd_lun *un, int throttle_type)
16591 16327 {
16592 16328 ASSERT(un != NULL);
16593 16329 ASSERT(mutex_owned(SD_MUTEX(un)));
16594 16330 ASSERT(un->un_ncmds_in_transport >= 0);
16595 16331
16596 16332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16597 16333 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
16598 16334 un, un->un_throttle, un->un_ncmds_in_transport);
16599 16335
16600 16336 if (un->un_throttle > 1) {
16601 16337 if (un->un_f_use_adaptive_throttle == TRUE) {
16602 16338 switch (throttle_type) {
16603 16339 case SD_THROTTLE_TRAN_BUSY:
16604 16340 if (un->un_busy_throttle == 0) {
16605 16341 un->un_busy_throttle = un->un_throttle;
16606 16342 }
16607 16343 break;
16608 16344 case SD_THROTTLE_QFULL:
16609 16345 un->un_busy_throttle = 0;
16610 16346 break;
16611 16347 default:
16612 16348 ASSERT(FALSE);
16613 16349 }
16614 16350
16615 16351 if (un->un_ncmds_in_transport > 0) {
16616 16352 un->un_throttle = un->un_ncmds_in_transport;
16617 16353 }
16618 16354
16619 16355 } else {
16620 16356 if (un->un_ncmds_in_transport == 0) {
16621 16357 un->un_throttle = 1;
16622 16358 } else {
16623 16359 un->un_throttle = un->un_ncmds_in_transport;
16624 16360 }
16625 16361 }
16626 16362 }
16627 16363
16628 16364 /* Reschedule the timeout if none is currently active */
16629 16365 if (un->un_reset_throttle_timeid == NULL) {
16630 16366 un->un_reset_throttle_timeid = timeout(sd_restore_throttle,
16631 16367 un, SD_THROTTLE_RESET_INTERVAL);
16632 16368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16633 16369 "sd_reduce_throttle: timeout scheduled!\n");
16634 16370 }
16635 16371
16636 16372 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16637 16373 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16638 16374 }
16639 16375
16640 16376
16641 16377
16642 16378 /*
16643 16379 * Function: sd_restore_throttle
16644 16380 *
16645 16381 * Description: Callback function for timeout(9F). Resets the current
16646 16382 * value of un->un_throttle to its default.
16647 16383 *
16648 16384 * Arguments: arg - pointer to associated softstate for the device.
16649 16385 *
16650 16386 * Context: May be called from interrupt context
16651 16387 */
16652 16388
16653 16389 static void
16654 16390 sd_restore_throttle(void *arg)
16655 16391 {
16656 16392 struct sd_lun *un = arg;
16657 16393
16658 16394 ASSERT(un != NULL);
16659 16395 ASSERT(!mutex_owned(SD_MUTEX(un)));
16660 16396
16661 16397 mutex_enter(SD_MUTEX(un));
16662 16398
16663 16399 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16664 16400 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16665 16401
16666 16402 un->un_reset_throttle_timeid = NULL;
16667 16403
16668 16404 if (un->un_f_use_adaptive_throttle == TRUE) {
16669 16405 /*
16670 16406 * If un_busy_throttle is nonzero, then it contains the
16671 16407 * value that un_throttle was when we got a TRAN_BUSY back
16672 16408 * from scsi_transport(). We want to revert back to this
16673 16409 * value.
16674 16410 *
16675 16411 * In the QFULL case, the throttle limit will incrementally
16676 16412 * increase until it reaches max throttle.
16677 16413 */
16678 16414 if (un->un_busy_throttle > 0) {
16679 16415 un->un_throttle = un->un_busy_throttle;
16680 16416 un->un_busy_throttle = 0;
16681 16417 } else {
16682 16418 /*
16683 16419 * increase throttle by 10% open gate slowly, schedule
16684 16420 * another restore if saved throttle has not been
16685 16421 * reached
16686 16422 */
16687 16423 short throttle;
16688 16424 if (sd_qfull_throttle_enable) {
16689 16425 throttle = un->un_throttle +
16690 16426 max((un->un_throttle / 10), 1);
16691 16427 un->un_throttle =
16692 16428 (throttle < un->un_saved_throttle) ?
16693 16429 throttle : un->un_saved_throttle;
16694 16430 if (un->un_throttle < un->un_saved_throttle) {
16695 16431 un->un_reset_throttle_timeid =
16696 16432 timeout(sd_restore_throttle,
16697 16433 un,
16698 16434 SD_QFULL_THROTTLE_RESET_INTERVAL);
16699 16435 }
16700 16436 }
16701 16437 }
16702 16438
16703 16439 /*
16704 16440 * If un_throttle has fallen below the low-water mark, we
16705 16441 * restore the maximum value here (and allow it to ratchet
16706 16442 * down again if necessary).
16707 16443 */
16708 16444 if (un->un_throttle < un->un_min_throttle) {
16709 16445 un->un_throttle = un->un_saved_throttle;
16710 16446 }
16711 16447 } else {
16712 16448 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16713 16449 "restoring limit from 0x%x to 0x%x\n",
16714 16450 un->un_throttle, un->un_saved_throttle);
16715 16451 un->un_throttle = un->un_saved_throttle;
16716 16452 }
16717 16453
16718 16454 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16719 16455 "sd_restore_throttle: calling sd_start_cmds!\n");
16720 16456
16721 16457 sd_start_cmds(un, NULL);
16722 16458
16723 16459 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16724 16460 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
16725 16461 un, un->un_throttle);
16726 16462
16727 16463 mutex_exit(SD_MUTEX(un));
16728 16464
16729 16465 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n");
16730 16466 }
16731 16467
16732 16468 /*
16733 16469 * Function: sdrunout
16734 16470 *
16735 16471 * Description: Callback routine for scsi_init_pkt when a resource allocation
16736 16472 * fails.
16737 16473 *
16738 16474 * Arguments: arg - a pointer to the sd_lun unit struct for the particular
16739 16475 * soft state instance.
16740 16476 *
16741 16477 * Return Code: The scsi_init_pkt routine allows for the callback function to
16742 16478 * return a 0 indicating the callback should be rescheduled or a 1
16743 16479 * indicating not to reschedule. This routine always returns 1
16744 16480 * because the driver always provides a callback function to
16745 16481 * scsi_init_pkt. This results in a callback always being scheduled
16746 16482 * (via the scsi_init_pkt callback implementation) if a resource
16747 16483 * failure occurs.
16748 16484 *
16749 16485 * Context: This callback function may not block or call routines that block
16750 16486 *
16751 16487 * Note: Using the scsi_init_pkt callback facility can result in an I/O
16752 16488 * request persisting at the head of the list which cannot be
16753 16489 * satisfied even after multiple retries. In the future the driver
16754 16490 * may implement some time of maximum runout count before failing
16755 16491 * an I/O.
16756 16492 */
16757 16493
16758 16494 static int
16759 16495 sdrunout(caddr_t arg)
16760 16496 {
16761 16497 struct sd_lun *un = (struct sd_lun *)arg;
16762 16498
16763 16499 ASSERT(un != NULL);
16764 16500 ASSERT(!mutex_owned(SD_MUTEX(un)));
16765 16501
16766 16502 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n");
16767 16503
16768 16504 mutex_enter(SD_MUTEX(un));
16769 16505 sd_start_cmds(un, NULL);
|
↓ open down ↓ |
274 lines elided |
↑ open up ↑ |
16770 16506 mutex_exit(SD_MUTEX(un));
16771 16507 /*
16772 16508 * This callback routine always returns 1 (i.e. do not reschedule)
16773 16509 * because we always specify sdrunout as the callback handler for
16774 16510 * scsi_init_pkt inside the call to sd_start_cmds.
16775 16511 */
16776 16512 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
16777 16513 return (1);
16778 16514 }
16779 16515
16516 +static void
16517 +sd_slow_io_ereport(struct scsi_pkt *pktp)
16518 +{
16519 + struct buf *bp;
16520 + struct sd_lun *un;
16521 + char *devid;
16780 16522
16523 + ASSERT(pktp != NULL);
16524 + bp = (struct buf *)pktp->pkt_private;
16525 + ASSERT(bp != NULL);
16526 + un = SD_GET_UN(bp);
16527 + ASSERT(un != NULL);
16528 +
16529 + SD_ERROR(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16530 + "Slow IO detected SD: 0x%p delta in nsec: %llu",
16531 + (void *)un, pktp->pkt_stop - pktp->pkt_start);
16532 +
16533 + devid = DEVI(un->un_sd->sd_dev)->devi_devid_str;
16534 + scsi_fm_ereport_post(un->un_sd, 0, NULL, "cmd.disk.slow-io",
16535 + fm_ena_generate(0, FM_ENA_FMT1), devid, NULL, DDI_NOSLEEP, NULL,
16536 + FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
16537 + "start", DATA_TYPE_UINT64, pktp->pkt_start,
16538 + "stop", DATA_TYPE_UINT64, pktp->pkt_stop,
16539 + "delta", DATA_TYPE_UINT64, pktp->pkt_stop - pktp->pkt_start,
16540 + "threshold", DATA_TYPE_UINT64, un->un_slow_io_threshold,
16541 + "pkt-reason", DATA_TYPE_UINT32, pktp->pkt_reason,
16542 + NULL);
16543 +}
16544 +
16545 +/* Clamp the value between 0..max using min as the offset */
16546 +static int
16547 +clamp_lat(int bucket, int min, int max)
16548 +{
16549 +
16550 + if (max < bucket)
16551 + bucket = max;
16552 + if (min > bucket)
16553 + bucket = min;
16554 +
16555 + return (bucket - min);
16556 +}
16557 +
16781 16558 /*
16782 16559 * Function: sdintr
16783 16560 *
16784 16561 * Description: Completion callback routine for scsi_pkt(9S) structs
16785 16562 * sent to the HBA driver via scsi_transport(9F).
16786 16563 *
16787 16564 * Context: Interrupt context
16788 16565 */
16789 16566
16790 16567 static void
16791 16568 sdintr(struct scsi_pkt *pktp)
16792 16569 {
16793 16570 struct buf *bp;
16794 16571 struct sd_xbuf *xp;
16795 16572 struct sd_lun *un;
16796 16573 size_t actual_len;
16797 16574 sd_ssc_t *sscp;
16575 + hrtime_t io_delta = 0LL;
16576 + int bucket;
16798 16577
16799 16578 ASSERT(pktp != NULL);
16800 16579 bp = (struct buf *)pktp->pkt_private;
16801 16580 ASSERT(bp != NULL);
16802 16581 xp = SD_GET_XBUF(bp);
16803 16582 ASSERT(xp != NULL);
16804 16583 ASSERT(xp->xb_pktp != NULL);
16805 16584 un = SD_GET_UN(bp);
16806 16585 ASSERT(un != NULL);
16807 16586 ASSERT(!mutex_owned(SD_MUTEX(un)));
16808 16587
16809 16588 #ifdef SD_FAULT_INJECTION
16810 16589
16811 16590 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n");
16812 16591 /* SD FaultInjection */
16813 16592 sd_faultinjection(pktp);
16814 16593
16815 16594 #endif /* SD_FAULT_INJECTION */
16816 16595
16817 16596 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p,"
16818 16597 " xp:0x%p, un:0x%p\n", bp, xp, un);
16819 16598
16820 16599 mutex_enter(SD_MUTEX(un));
16821 16600
16822 16601 ASSERT(un->un_fm_private != NULL);
16823 16602 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
16824 16603 ASSERT(sscp != NULL);
16825 16604
16826 16605 /* Reduce the count of the #commands currently in transport */
16827 16606 un->un_ncmds_in_transport--;
16828 16607 ASSERT(un->un_ncmds_in_transport >= 0);
16829 16608
16830 16609 /* Increment counter to indicate that the callback routine is active */
16831 16610 un->un_in_callback++;
16832 16611
16833 16612 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
16613 + /* If the HBA driver did not set the stop time, set it now. */
16614 + if (pktp->pkt_stop == 0)
16615 + pktp->pkt_stop = gethrtime();
16616 + /*
16617 + * If there are HBA drivers or layered drivers which do not participate
16618 + * in slow-io diagnosis, the start time, set above may be overwritten
16619 + * with zero. If pkt_start is zero, the delta should also be zero.
16620 + */
16621 + if (pktp->pkt_start != 0)
16622 + io_delta = pktp->pkt_stop - pktp->pkt_start;
16623 + if (un->un_slow_io_threshold > 0 && io_delta > un->un_slow_io_threshold)
16624 + sd_slow_io_ereport(pktp);
16625 + if (un->un_lat_stats) {
16626 + un->un_lat_stats->l_nrequest++;
16627 + un->un_lat_stats->l_sum += io_delta;
16834 16628
16629 + /* Track the latency in usec and quantize by power of 2 */
16630 + bucket = clamp_lat(ddi_fls(io_delta / 1000),
16631 + SD_LAT_MIN_USEC_SHIFT, SD_LAT_MAX_USEC_SHIFT - 1);
16632 + ASSERT3S(bucket, >=, 0);
16633 + ASSERT3S(bucket, <, ARRAY_SIZE(un->un_lat_stats->l_histogram));
16634 + un->un_lat_stats->l_histogram[bucket]++;
16635 + }
16636 +
16835 16637 #ifdef SDDEBUG
16836 16638 if (bp == un->un_retry_bp) {
16837 16639 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
16838 16640 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16839 16641 un, un->un_retry_bp, un->un_ncmds_in_transport);
16840 16642 }
16841 16643 #endif
16842 16644
16843 16645 /*
16844 16646 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
16845 16647 * state if needed.
16846 16648 */
16847 16649 if (pktp->pkt_reason == CMD_DEV_GONE) {
16848 16650 /* Prevent multiple console messages for the same failure. */
16849 16651 if (un->un_last_pkt_reason != CMD_DEV_GONE) {
16850 16652 un->un_last_pkt_reason = CMD_DEV_GONE;
16851 16653 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16852 16654 "Command failed to complete...Device is gone\n");
16853 16655 }
16854 16656 if (un->un_mediastate != DKIO_DEV_GONE) {
16855 16657 un->un_mediastate = DKIO_DEV_GONE;
16856 16658 cv_broadcast(&un->un_state_cv);
16857 16659 }
16858 16660 /*
16859 16661 * If the command happens to be the REQUEST SENSE command,
16860 16662 * free up the rqs buf and fail the original command.
16861 16663 */
16862 16664 if (bp == un->un_rqs_bp) {
16863 16665 bp = sd_mark_rqs_idle(un, xp);
16864 16666 }
16865 16667 sd_return_failed_command(un, bp, EIO);
16866 16668 goto exit;
16867 16669 }
16868 16670
16869 16671 if (pktp->pkt_state & STATE_XARQ_DONE) {
16870 16672 SD_TRACE(SD_LOG_COMMON, un,
16871 16673 "sdintr: extra sense data received. pkt=%p\n", pktp);
16872 16674 }
16873 16675
16874 16676 /*
16875 16677 * First see if the pkt has auto-request sense data with it....
16876 16678 * Look at the packet state first so we don't take a performance
16877 16679 * hit looking at the arq enabled flag unless absolutely necessary.
16878 16680 */
16879 16681 if ((pktp->pkt_state & STATE_ARQ_DONE) &&
16880 16682 (un->un_f_arq_enabled == TRUE)) {
16881 16683 /*
16882 16684 * The HBA did an auto request sense for this command so check
16883 16685 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
16884 16686 * driver command that should not be retried.
16885 16687 */
16886 16688 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
16887 16689 /*
16888 16690 * Save the relevant sense info into the xp for the
16889 16691 * original cmd.
16890 16692 */
16891 16693 struct scsi_arq_status *asp;
16892 16694 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
16893 16695 xp->xb_sense_status =
16894 16696 *((uchar_t *)(&(asp->sts_rqpkt_status)));
16895 16697 xp->xb_sense_state = asp->sts_rqpkt_state;
16896 16698 xp->xb_sense_resid = asp->sts_rqpkt_resid;
16897 16699 if (pktp->pkt_state & STATE_XARQ_DONE) {
16898 16700 actual_len = MAX_SENSE_LENGTH -
16899 16701 xp->xb_sense_resid;
16900 16702 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16901 16703 MAX_SENSE_LENGTH);
16902 16704 } else {
16903 16705 if (xp->xb_sense_resid > SENSE_LENGTH) {
16904 16706 actual_len = MAX_SENSE_LENGTH -
16905 16707 xp->xb_sense_resid;
16906 16708 } else {
16907 16709 actual_len = SENSE_LENGTH -
16908 16710 xp->xb_sense_resid;
16909 16711 }
16910 16712 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
16911 16713 if ((((struct uscsi_cmd *)
16912 16714 (xp->xb_pktinfo))->uscsi_rqlen) >
16913 16715 actual_len) {
16914 16716 xp->xb_sense_resid =
16915 16717 (((struct uscsi_cmd *)
16916 16718 (xp->xb_pktinfo))->
16917 16719 uscsi_rqlen) - actual_len;
16918 16720 } else {
16919 16721 xp->xb_sense_resid = 0;
16920 16722 }
16921 16723 }
16922 16724 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
|
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
16923 16725 SENSE_LENGTH);
16924 16726 }
16925 16727
16926 16728 /* fail the command */
16927 16729 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16928 16730 "sdintr: arq done and FLAG_DIAGNOSE set\n");
16929 16731 sd_return_failed_command(un, bp, EIO);
16930 16732 goto exit;
16931 16733 }
16932 16734
16933 -#if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
16934 16735 /*
16935 16736 * We want to either retry or fail this command, so free
16936 16737 * the DMA resources here. If we retry the command then
16937 16738 * the DMA resources will be reallocated in sd_start_cmds().
16938 16739 * Note that when PKT_DMA_PARTIAL is used, this reallocation
16939 16740 * causes the *entire* transfer to start over again from the
16940 16741 * beginning of the request, even for PARTIAL chunks that
16941 16742 * have already transferred successfully.
16942 16743 */
16943 16744 if ((un->un_f_is_fibre == TRUE) &&
16944 16745 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
16945 16746 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
16946 16747 scsi_dmafree(pktp);
16947 16748 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
16948 16749 }
16949 -#endif
16950 16750
16951 16751 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16952 16752 "sdintr: arq done, sd_handle_auto_request_sense\n");
16953 16753
16954 16754 sd_handle_auto_request_sense(un, bp, xp, pktp);
16955 16755 goto exit;
16956 16756 }
16957 16757
16958 16758 /* Next see if this is the REQUEST SENSE pkt for the instance */
16959 16759 if (pktp->pkt_flags & FLAG_SENSING) {
16960 16760 /* This pktp is from the unit's REQUEST_SENSE command */
16961 16761 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16962 16762 "sdintr: sd_handle_request_sense\n");
16963 16763 sd_handle_request_sense(un, bp, xp, pktp);
16964 16764 goto exit;
16965 16765 }
16966 16766
16967 16767 /*
16968 16768 * Check to see if the command successfully completed as requested;
16969 16769 * this is the most common case (and also the hot performance path).
16970 16770 *
16971 16771 * Requirements for successful completion are:
16972 16772 * pkt_reason is CMD_CMPLT and packet status is status good.
16973 16773 * In addition:
16974 16774 * - A residual of zero indicates successful completion no matter what
16975 16775 * the command is.
16976 16776 * - If the residual is not zero and the command is not a read or
16977 16777 * write, then it's still defined as successful completion. In other
16978 16778 * words, if the command is a read or write the residual must be
16979 16779 * zero for successful completion.
16980 16780 * - If the residual is not zero and the command is a read or
16981 16781 * write, and it's a USCSICMD, then it's still defined as
16982 16782 * successful completion.
16983 16783 */
16984 16784 if ((pktp->pkt_reason == CMD_CMPLT) &&
16985 16785 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) {
16986 16786
16987 16787 /*
16988 16788 * Since this command is returned with a good status, we
16989 16789 * can reset the count for Sonoma failover.
16990 16790 */
16991 16791 un->un_sonoma_failure_count = 0;
16992 16792
16993 16793 /*
16994 16794 * Return all USCSI commands on good status
16995 16795 */
16996 16796 if (pktp->pkt_resid == 0) {
16997 16797 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16998 16798 "sdintr: returning command for resid == 0\n");
16999 16799 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) &&
17000 16800 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) {
17001 16801 SD_UPDATE_B_RESID(bp, pktp);
17002 16802 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17003 16803 "sdintr: returning command for resid != 0\n");
17004 16804 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17005 16805 SD_UPDATE_B_RESID(bp, pktp);
17006 16806 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17007 16807 "sdintr: returning uscsi command\n");
17008 16808 } else {
17009 16809 goto not_successful;
17010 16810 }
17011 16811 sd_return_command(un, bp);
17012 16812
17013 16813 /*
17014 16814 * Decrement counter to indicate that the callback routine
|
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
17015 16815 * is done.
17016 16816 */
17017 16817 un->un_in_callback--;
17018 16818 ASSERT(un->un_in_callback >= 0);
17019 16819 mutex_exit(SD_MUTEX(un));
17020 16820
17021 16821 return;
17022 16822 }
17023 16823
17024 16824 not_successful:
17025 -
17026 -#if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
17027 16825 /*
17028 16826 * The following is based upon knowledge of the underlying transport
17029 16827 * and its use of DMA resources. This code should be removed when
17030 16828 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17031 16829 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17032 16830 * and sd_start_cmds().
17033 16831 *
17034 16832 * Free any DMA resources associated with this command if there
17035 16833 * is a chance it could be retried or enqueued for later retry.
17036 16834 * If we keep the DMA binding then mpxio cannot reissue the
17037 16835 * command on another path whenever a path failure occurs.
17038 16836 *
17039 16837 * Note that when PKT_DMA_PARTIAL is used, free/reallocation
17040 16838 * causes the *entire* transfer to start over again from the
17041 16839 * beginning of the request, even for PARTIAL chunks that
17042 16840 * have already transferred successfully.
17043 16841 *
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
17044 16842 * This is only done for non-uscsi commands (and also skipped for the
17045 16843 * driver's internal RQS command). Also just do this for Fibre Channel
17046 16844 * devices as these are the only ones that support mpxio.
17047 16845 */
17048 16846 if ((un->un_f_is_fibre == TRUE) &&
17049 16847 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
17050 16848 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
17051 16849 scsi_dmafree(pktp);
17052 16850 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
17053 16851 }
17054 -#endif
17055 16852
17056 16853 /*
17057 16854 * The command did not successfully complete as requested so check
17058 16855 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17059 16856 * driver command that should not be retried so just return. If
17060 16857 * FLAG_DIAGNOSE is not set the error will be processed below.
17061 16858 */
17062 16859 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
17063 16860 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17064 16861 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17065 16862 /*
17066 16863 * Issue a request sense if a check condition caused the error
17067 16864 * (we handle the auto request sense case above), otherwise
17068 16865 * just fail the command.
17069 16866 */
17070 16867 if ((pktp->pkt_reason == CMD_CMPLT) &&
17071 16868 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
17072 - sd_send_request_sense_command(un, bp, pktp);
16869 + sd_send_request_sense_command(un, bp,
16870 + SD_RETRIES_STANDARD, pktp);
17073 16871 } else {
17074 16872 sd_return_failed_command(un, bp, EIO);
17075 16873 }
17076 16874 goto exit;
17077 16875 }
17078 16876
17079 16877 /*
17080 16878 * The command did not successfully complete as requested so process
17081 16879 * the error, retry, and/or attempt recovery.
17082 16880 */
17083 16881 switch (pktp->pkt_reason) {
17084 16882 case CMD_CMPLT:
17085 16883 switch (SD_GET_PKT_STATUS(pktp)) {
17086 16884 case STATUS_GOOD:
17087 16885 /*
17088 16886 * The command completed successfully with a non-zero
17089 16887 * residual
17090 16888 */
17091 16889 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17092 16890 "sdintr: STATUS_GOOD \n");
17093 16891 sd_pkt_status_good(un, bp, xp, pktp);
17094 16892 break;
17095 16893
17096 16894 case STATUS_CHECK:
17097 16895 case STATUS_TERMINATED:
17098 16896 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17099 16897 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
17100 16898 sd_pkt_status_check_condition(un, bp, xp, pktp);
17101 16899 break;
17102 16900
17103 16901 case STATUS_BUSY:
17104 16902 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17105 16903 "sdintr: STATUS_BUSY\n");
17106 16904 sd_pkt_status_busy(un, bp, xp, pktp);
17107 16905 break;
17108 16906
17109 16907 case STATUS_RESERVATION_CONFLICT:
17110 16908 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17111 16909 "sdintr: STATUS_RESERVATION_CONFLICT\n");
17112 16910 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17113 16911 break;
17114 16912
17115 16913 case STATUS_QFULL:
17116 16914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17117 16915 "sdintr: STATUS_QFULL\n");
17118 16916 sd_pkt_status_qfull(un, bp, xp, pktp);
17119 16917 break;
17120 16918
17121 16919 case STATUS_MET:
17122 16920 case STATUS_INTERMEDIATE:
17123 16921 case STATUS_SCSI2:
17124 16922 case STATUS_INTERMEDIATE_MET:
17125 16923 case STATUS_ACA_ACTIVE:
17126 16924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17127 16925 "Unexpected SCSI status received: 0x%x\n",
17128 16926 SD_GET_PKT_STATUS(pktp));
17129 16927 /*
17130 16928 * Mark the ssc_flags when detected invalid status
17131 16929 * code for non-USCSI command.
17132 16930 */
17133 16931 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17134 16932 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17135 16933 0, "stat-code");
17136 16934 }
17137 16935 sd_return_failed_command(un, bp, EIO);
17138 16936 break;
17139 16937
17140 16938 default:
17141 16939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17142 16940 "Invalid SCSI status received: 0x%x\n",
17143 16941 SD_GET_PKT_STATUS(pktp));
17144 16942 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17145 16943 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17146 16944 0, "stat-code");
17147 16945 }
17148 16946 sd_return_failed_command(un, bp, EIO);
17149 16947 break;
17150 16948
17151 16949 }
17152 16950 break;
17153 16951
17154 16952 case CMD_INCOMPLETE:
17155 16953 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17156 16954 "sdintr: CMD_INCOMPLETE\n");
17157 16955 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp);
17158 16956 break;
17159 16957 case CMD_TRAN_ERR:
17160 16958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17161 16959 "sdintr: CMD_TRAN_ERR\n");
17162 16960 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp);
17163 16961 break;
17164 16962 case CMD_RESET:
17165 16963 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17166 16964 "sdintr: CMD_RESET \n");
17167 16965 sd_pkt_reason_cmd_reset(un, bp, xp, pktp);
17168 16966 break;
17169 16967 case CMD_ABORTED:
17170 16968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17171 16969 "sdintr: CMD_ABORTED \n");
17172 16970 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp);
17173 16971 break;
17174 16972 case CMD_TIMEOUT:
17175 16973 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17176 16974 "sdintr: CMD_TIMEOUT\n");
17177 16975 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp);
17178 16976 break;
17179 16977 case CMD_UNX_BUS_FREE:
17180 16978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17181 16979 "sdintr: CMD_UNX_BUS_FREE \n");
17182 16980 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp);
17183 16981 break;
17184 16982 case CMD_TAG_REJECT:
17185 16983 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17186 16984 "sdintr: CMD_TAG_REJECT\n");
17187 16985 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp);
17188 16986 break;
17189 16987 default:
17190 16988 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17191 16989 "sdintr: default\n");
17192 16990 /*
17193 16991 * Mark the ssc_flags for detecting invliad pkt_reason.
17194 16992 */
17195 16993 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17196 16994 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON,
17197 16995 0, "pkt-reason");
17198 16996 }
17199 16997 sd_pkt_reason_default(un, bp, xp, pktp);
17200 16998 break;
17201 16999 }
17202 17000
17203 17001 exit:
17204 17002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n");
17205 17003
17206 17004 /* Decrement counter to indicate that the callback routine is done. */
17207 17005 un->un_in_callback--;
17208 17006 ASSERT(un->un_in_callback >= 0);
17209 17007
17210 17008 /*
17211 17009 * At this point, the pkt has been dispatched, ie, it is either
17212 17010 * being re-tried or has been returned to its caller and should
17213 17011 * not be referenced.
17214 17012 */
17215 17013
17216 17014 mutex_exit(SD_MUTEX(un));
17217 17015 }
17218 17016
17219 17017
17220 17018 /*
17221 17019 * Function: sd_print_incomplete_msg
17222 17020 *
17223 17021 * Description: Prints the error message for a CMD_INCOMPLETE error.
17224 17022 *
17225 17023 * Arguments: un - ptr to associated softstate for the device.
17226 17024 * bp - ptr to the buf(9S) for the command.
17227 17025 * arg - message string ptr
17228 17026 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED,
17229 17027 * or SD_NO_RETRY_ISSUED.
17230 17028 *
17231 17029 * Context: May be called under interrupt context
17232 17030 */
17233 17031
17234 17032 static void
17235 17033 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17236 17034 {
17237 17035 struct scsi_pkt *pktp;
17238 17036 char *msgp;
17239 17037 char *cmdp = arg;
17240 17038
17241 17039 ASSERT(un != NULL);
17242 17040 ASSERT(mutex_owned(SD_MUTEX(un)));
17243 17041 ASSERT(bp != NULL);
17244 17042 ASSERT(arg != NULL);
17245 17043 pktp = SD_GET_PKTP(bp);
17246 17044 ASSERT(pktp != NULL);
17247 17045
17248 17046 switch (code) {
17249 17047 case SD_DELAYED_RETRY_ISSUED:
17250 17048 case SD_IMMEDIATE_RETRY_ISSUED:
17251 17049 msgp = "retrying";
17252 17050 break;
17253 17051 case SD_NO_RETRY_ISSUED:
17254 17052 default:
17255 17053 msgp = "giving up";
17256 17054 break;
17257 17055 }
17258 17056
17259 17057 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17260 17058 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17261 17059 "incomplete %s- %s\n", cmdp, msgp);
17262 17060 }
17263 17061 }
17264 17062
17265 17063
17266 17064
17267 17065 /*
17268 17066 * Function: sd_pkt_status_good
17269 17067 *
17270 17068 * Description: Processing for a STATUS_GOOD code in pkt_status.
17271 17069 *
17272 17070 * Context: May be called under interrupt context
17273 17071 */
17274 17072
17275 17073 static void
17276 17074 sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
17277 17075 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17278 17076 {
17279 17077 char *cmdp;
17280 17078
17281 17079 ASSERT(un != NULL);
17282 17080 ASSERT(mutex_owned(SD_MUTEX(un)));
17283 17081 ASSERT(bp != NULL);
17284 17082 ASSERT(xp != NULL);
17285 17083 ASSERT(pktp != NULL);
17286 17084 ASSERT(pktp->pkt_reason == CMD_CMPLT);
17287 17085 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD);
17288 17086 ASSERT(pktp->pkt_resid != 0);
17289 17087
17290 17088 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n");
17291 17089
17292 17090 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17293 17091 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) {
17294 17092 case SCMD_READ:
17295 17093 cmdp = "read";
17296 17094 break;
17297 17095 case SCMD_WRITE:
17298 17096 cmdp = "write";
17299 17097 break;
17300 17098 default:
17301 17099 SD_UPDATE_B_RESID(bp, pktp);
17302 17100 sd_return_command(un, bp);
17303 17101 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17304 17102 return;
17305 17103 }
17306 17104
17307 17105 /*
17308 17106 * See if we can retry the read/write, preferrably immediately.
17309 17107 * If retries are exhaused, then sd_retry_command() will update
17310 17108 * the b_resid count.
17311 17109 */
17312 17110 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg,
17313 17111 cmdp, EIO, (clock_t)0, NULL);
17314 17112
17315 17113 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17316 17114 }
17317 17115
17318 17116
17319 17117
17320 17118
17321 17119
17322 17120 /*
17323 17121 * Function: sd_handle_request_sense
17324 17122 *
17325 17123 * Description: Processing for non-auto Request Sense command.
17326 17124 *
17327 17125 * Arguments: un - ptr to associated softstate
17328 17126 * sense_bp - ptr to buf(9S) for the RQS command
17329 17127 * sense_xp - ptr to the sd_xbuf for the RQS command
17330 17128 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command
17331 17129 *
17332 17130 * Context: May be called under interrupt context
17333 17131 */
17334 17132
17335 17133 static void
17336 17134 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp,
17337 17135 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp)
17338 17136 {
17339 17137 struct buf *cmd_bp; /* buf for the original command */
17340 17138 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */
17341 17139 struct scsi_pkt *cmd_pktp; /* pkt for the original command */
17342 17140 size_t actual_len; /* actual sense data length */
17343 17141
17344 17142 ASSERT(un != NULL);
17345 17143 ASSERT(mutex_owned(SD_MUTEX(un)));
17346 17144 ASSERT(sense_bp != NULL);
17347 17145 ASSERT(sense_xp != NULL);
17348 17146 ASSERT(sense_pktp != NULL);
17349 17147
17350 17148 /*
17351 17149 * Note the sense_bp, sense_xp, and sense_pktp here are for the
17352 17150 * RQS command and not the original command.
17353 17151 */
17354 17152 ASSERT(sense_pktp == un->un_rqs_pktp);
17355 17153 ASSERT(sense_bp == un->un_rqs_bp);
17356 17154 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) ==
17357 17155 (FLAG_SENSING | FLAG_HEAD));
17358 17156 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) &
17359 17157 FLAG_SENSING) == FLAG_SENSING);
17360 17158
17361 17159 /* These are the bp, xp, and pktp for the original command */
17362 17160 cmd_bp = sense_xp->xb_sense_bp;
17363 17161 cmd_xp = SD_GET_XBUF(cmd_bp);
17364 17162 cmd_pktp = SD_GET_PKTP(cmd_bp);
17365 17163
17366 17164 if (sense_pktp->pkt_reason != CMD_CMPLT) {
17367 17165 /*
17368 17166 * The REQUEST SENSE command failed. Release the REQUEST
17369 17167 * SENSE command for re-use, get back the bp for the original
17370 17168 * command, and attempt to re-try the original command if
17371 17169 * FLAG_DIAGNOSE is not set in the original packet.
17372 17170 */
17373 17171 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17374 17172 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17375 17173 cmd_bp = sd_mark_rqs_idle(un, sense_xp);
17376 17174 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD,
17377 17175 NULL, NULL, EIO, (clock_t)0, NULL);
17378 17176 return;
17379 17177 }
17380 17178 }
17381 17179
17382 17180 /*
17383 17181 * Save the relevant sense info into the xp for the original cmd.
17384 17182 *
17385 17183 * Note: if the request sense failed the state info will be zero
17386 17184 * as set in sd_mark_rqs_busy()
17387 17185 */
17388 17186 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp);
17389 17187 cmd_xp->xb_sense_state = sense_pktp->pkt_state;
17390 17188 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid;
17391 17189 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) &&
17392 17190 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen >
17393 17191 SENSE_LENGTH)) {
17394 17192 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17395 17193 MAX_SENSE_LENGTH);
17396 17194 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid;
17397 17195 } else {
17398 17196 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17399 17197 SENSE_LENGTH);
17400 17198 if (actual_len < SENSE_LENGTH) {
17401 17199 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len;
17402 17200 } else {
17403 17201 cmd_xp->xb_sense_resid = 0;
17404 17202 }
17405 17203 }
17406 17204
17407 17205 /*
17408 17206 * Free up the RQS command....
17409 17207 * NOTE:
17410 17208 * Must do this BEFORE calling sd_validate_sense_data!
17411 17209 * sd_validate_sense_data may return the original command in
17412 17210 * which case the pkt will be freed and the flags can no
17413 17211 * longer be touched.
17414 17212 * SD_MUTEX is held through this process until the command
17415 17213 * is dispatched based upon the sense data, so there are
17416 17214 * no race conditions.
17417 17215 */
17418 17216 (void) sd_mark_rqs_idle(un, sense_xp);
17419 17217
17420 17218 /*
17421 17219 * For a retryable command see if we have valid sense data, if so then
17422 17220 * turn it over to sd_decode_sense() to figure out the right course of
17423 17221 * action. Just fail a non-retryable command.
17424 17222 */
17425 17223 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17426 17224 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) ==
17427 17225 SD_SENSE_DATA_IS_VALID) {
17428 17226 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp);
17429 17227 }
17430 17228 } else {
17431 17229 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB",
17432 17230 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
17433 17231 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data",
17434 17232 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
17435 17233 sd_return_failed_command(un, cmd_bp, EIO);
17436 17234 }
17437 17235 }
17438 17236
17439 17237
17440 17238
17441 17239
17442 17240 /*
17443 17241 * Function: sd_handle_auto_request_sense
17444 17242 *
17445 17243 * Description: Processing for auto-request sense information.
17446 17244 *
17447 17245 * Arguments: un - ptr to associated softstate
17448 17246 * bp - ptr to buf(9S) for the command
17449 17247 * xp - ptr to the sd_xbuf for the command
17450 17248 * pktp - ptr to the scsi_pkt(9S) for the command
17451 17249 *
17452 17250 * Context: May be called under interrupt context
17453 17251 */
17454 17252
17455 17253 static void
17456 17254 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
17457 17255 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17458 17256 {
17459 17257 struct scsi_arq_status *asp;
17460 17258 size_t actual_len;
17461 17259
17462 17260 ASSERT(un != NULL);
17463 17261 ASSERT(mutex_owned(SD_MUTEX(un)));
17464 17262 ASSERT(bp != NULL);
17465 17263 ASSERT(xp != NULL);
17466 17264 ASSERT(pktp != NULL);
17467 17265 ASSERT(pktp != un->un_rqs_pktp);
17468 17266 ASSERT(bp != un->un_rqs_bp);
17469 17267
17470 17268 /*
17471 17269 * For auto-request sense, we get a scsi_arq_status back from
17472 17270 * the HBA, with the sense data in the sts_sensedata member.
17473 17271 * The pkt_scbp of the packet points to this scsi_arq_status.
17474 17272 */
17475 17273 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
17476 17274
17477 17275 if (asp->sts_rqpkt_reason != CMD_CMPLT) {
17478 17276 /*
17479 17277 * The auto REQUEST SENSE failed; see if we can re-try
17480 17278 * the original command.
17481 17279 */
17482 17280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17483 17281 "auto request sense failed (reason=%s)\n",
17484 17282 scsi_rname(asp->sts_rqpkt_reason));
17485 17283
17486 17284 sd_reset_target(un, pktp);
17487 17285
17488 17286 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17489 17287 NULL, NULL, EIO, (clock_t)0, NULL);
17490 17288 return;
17491 17289 }
17492 17290
17493 17291 /* Save the relevant sense info into the xp for the original cmd. */
17494 17292 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status)));
17495 17293 xp->xb_sense_state = asp->sts_rqpkt_state;
17496 17294 xp->xb_sense_resid = asp->sts_rqpkt_resid;
17497 17295 if (xp->xb_sense_state & STATE_XARQ_DONE) {
17498 17296 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17499 17297 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
17500 17298 MAX_SENSE_LENGTH);
17501 17299 } else {
17502 17300 if (xp->xb_sense_resid > SENSE_LENGTH) {
17503 17301 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17504 17302 } else {
17505 17303 actual_len = SENSE_LENGTH - xp->xb_sense_resid;
17506 17304 }
17507 17305 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17508 17306 if ((((struct uscsi_cmd *)
17509 17307 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) {
17510 17308 xp->xb_sense_resid = (((struct uscsi_cmd *)
17511 17309 (xp->xb_pktinfo))->uscsi_rqlen) -
17512 17310 actual_len;
17513 17311 } else {
17514 17312 xp->xb_sense_resid = 0;
17515 17313 }
17516 17314 }
17517 17315 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH);
17518 17316 }
17519 17317
17520 17318 /*
17521 17319 * See if we have valid sense data, if so then turn it over to
17522 17320 * sd_decode_sense() to figure out the right course of action.
17523 17321 */
17524 17322 if (sd_validate_sense_data(un, bp, xp, actual_len) ==
17525 17323 SD_SENSE_DATA_IS_VALID) {
17526 17324 sd_decode_sense(un, bp, xp, pktp);
17527 17325 }
17528 17326 }
17529 17327
17530 17328
17531 17329 /*
17532 17330 * Function: sd_print_sense_failed_msg
17533 17331 *
17534 17332 * Description: Print log message when RQS has failed.
17535 17333 *
17536 17334 * Arguments: un - ptr to associated softstate
17537 17335 * bp - ptr to buf(9S) for the command
17538 17336 * arg - generic message string ptr
17539 17337 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17540 17338 * or SD_NO_RETRY_ISSUED
17541 17339 *
17542 17340 * Context: May be called from interrupt context
17543 17341 */
17544 17342
17545 17343 static void
17546 17344 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg,
17547 17345 int code)
17548 17346 {
17549 17347 char *msgp = arg;
17550 17348
17551 17349 ASSERT(un != NULL);
17552 17350 ASSERT(mutex_owned(SD_MUTEX(un)));
17553 17351 ASSERT(bp != NULL);
17554 17352
17555 17353 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) {
17556 17354 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp);
17557 17355 }
17558 17356 }
17559 17357
17560 17358
17561 17359 /*
17562 17360 * Function: sd_validate_sense_data
17563 17361 *
17564 17362 * Description: Check the given sense data for validity.
17565 17363 * If the sense data is not valid, the command will
17566 17364 * be either failed or retried!
17567 17365 *
17568 17366 * Return Code: SD_SENSE_DATA_IS_INVALID
17569 17367 * SD_SENSE_DATA_IS_VALID
17570 17368 *
17571 17369 * Context: May be called from interrupt context
17572 17370 */
17573 17371
17574 17372 static int
17575 17373 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17576 17374 size_t actual_len)
17577 17375 {
17578 17376 struct scsi_extended_sense *esp;
17579 17377 struct scsi_pkt *pktp;
17580 17378 char *msgp = NULL;
17581 17379 sd_ssc_t *sscp;
17582 17380
17583 17381 ASSERT(un != NULL);
17584 17382 ASSERT(mutex_owned(SD_MUTEX(un)));
17585 17383 ASSERT(bp != NULL);
17586 17384 ASSERT(bp != un->un_rqs_bp);
17587 17385 ASSERT(xp != NULL);
17588 17386 ASSERT(un->un_fm_private != NULL);
17589 17387
17590 17388 pktp = SD_GET_PKTP(bp);
17591 17389 ASSERT(pktp != NULL);
17592 17390
17593 17391 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
17594 17392 ASSERT(sscp != NULL);
17595 17393
17596 17394 /*
17597 17395 * Check the status of the RQS command (auto or manual).
17598 17396 */
17599 17397 switch (xp->xb_sense_status & STATUS_MASK) {
17600 17398 case STATUS_GOOD:
17601 17399 break;
17602 17400
17603 17401 case STATUS_RESERVATION_CONFLICT:
17604 17402 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17605 17403 return (SD_SENSE_DATA_IS_INVALID);
17606 17404
17607 17405 case STATUS_BUSY:
17608 17406 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17609 17407 "Busy Status on REQUEST SENSE\n");
17610 17408 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL,
17611 17409 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17612 17410 return (SD_SENSE_DATA_IS_INVALID);
17613 17411
17614 17412 case STATUS_QFULL:
17615 17413 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17616 17414 "QFULL Status on REQUEST SENSE\n");
17617 17415 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL,
17618 17416 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17619 17417 return (SD_SENSE_DATA_IS_INVALID);
17620 17418
17621 17419 case STATUS_CHECK:
17622 17420 case STATUS_TERMINATED:
17623 17421 msgp = "Check Condition on REQUEST SENSE\n";
17624 17422 goto sense_failed;
17625 17423
17626 17424 default:
17627 17425 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n";
17628 17426 goto sense_failed;
17629 17427 }
17630 17428
17631 17429 /*
17632 17430 * See if we got the minimum required amount of sense data.
17633 17431 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes
17634 17432 * or less.
17635 17433 */
17636 17434 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) ||
17637 17435 (actual_len == 0)) {
17638 17436 msgp = "Request Sense couldn't get sense data\n";
17639 17437 goto sense_failed;
17640 17438 }
17641 17439
17642 17440 if (actual_len < SUN_MIN_SENSE_LENGTH) {
17643 17441 msgp = "Not enough sense information\n";
17644 17442 /* Mark the ssc_flags for detecting invalid sense data */
17645 17443 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17646 17444 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17647 17445 "sense-data");
17648 17446 }
17649 17447 goto sense_failed;
17650 17448 }
17651 17449
17652 17450 /*
17653 17451 * We require the extended sense data
17654 17452 */
17655 17453 esp = (struct scsi_extended_sense *)xp->xb_sense_data;
17656 17454 if (esp->es_class != CLASS_EXTENDED_SENSE) {
17657 17455 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17658 17456 static char tmp[8];
17659 17457 static char buf[148];
17660 17458 char *p = (char *)(xp->xb_sense_data);
17661 17459 int i;
17662 17460
17663 17461 mutex_enter(&sd_sense_mutex);
17664 17462 (void) strcpy(buf, "undecodable sense information:");
17665 17463 for (i = 0; i < actual_len; i++) {
17666 17464 (void) sprintf(tmp, " 0x%x", *(p++)&0xff);
17667 17465 (void) strcpy(&buf[strlen(buf)], tmp);
17668 17466 }
17669 17467 i = strlen(buf);
17670 17468 (void) strcpy(&buf[i], "-(assumed fatal)\n");
17671 17469
17672 17470 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
17673 17471 scsi_log(SD_DEVINFO(un), sd_label,
17674 17472 CE_WARN, buf);
17675 17473 }
17676 17474 mutex_exit(&sd_sense_mutex);
17677 17475 }
17678 17476
17679 17477 /* Mark the ssc_flags for detecting invalid sense data */
17680 17478 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17681 17479 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17682 17480 "sense-data");
17683 17481 }
17684 17482
17685 17483 /* Note: Legacy behavior, fail the command with no retry */
17686 17484 sd_return_failed_command(un, bp, EIO);
17687 17485 return (SD_SENSE_DATA_IS_INVALID);
17688 17486 }
17689 17487
17690 17488 /*
17691 17489 * Check that es_code is valid (es_class concatenated with es_code
17692 17490 * make up the "response code" field. es_class will always be 7, so
17693 17491 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the
17694 17492 * format.
17695 17493 */
17696 17494 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) &&
17697 17495 (esp->es_code != CODE_FMT_FIXED_DEFERRED) &&
17698 17496 (esp->es_code != CODE_FMT_DESCR_CURRENT) &&
17699 17497 (esp->es_code != CODE_FMT_DESCR_DEFERRED) &&
17700 17498 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) {
17701 17499 /* Mark the ssc_flags for detecting invalid sense data */
17702 17500 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17703 17501 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17704 17502 "sense-data");
17705 17503 }
|
↓ open down ↓ |
623 lines elided |
↑ open up ↑ |
17706 17504 goto sense_failed;
17707 17505 }
17708 17506
17709 17507 return (SD_SENSE_DATA_IS_VALID);
17710 17508
17711 17509 sense_failed:
17712 17510 /*
17713 17511 * If the request sense failed (for whatever reason), attempt
17714 17512 * to retry the original command.
17715 17513 */
17716 -#if defined(__i386) || defined(__amd64)
17717 - /*
17718 - * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17719 - * sddef.h for Sparc platform, and x86 uses 1 binary
17720 - * for both SCSI/FC.
17721 - * The SD_RETRY_DELAY value need to be adjusted here
17722 - * when SD_RETRY_DELAY change in sddef.h
17723 - */
17724 17514 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17725 17515 sd_print_sense_failed_msg, msgp, EIO,
17726 17516 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL);
17727 -#else
17728 - sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17729 - sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL);
17730 -#endif
17731 17517
17732 17518 return (SD_SENSE_DATA_IS_INVALID);
17733 17519 }
17734 17520
17735 17521 /*
17736 17522 * Function: sd_decode_sense
17737 17523 *
17738 17524 * Description: Take recovery action(s) when SCSI Sense Data is received.
17739 17525 *
17740 17526 * Context: Interrupt context.
17741 17527 */
17742 17528
17743 17529 static void
17744 17530 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17745 17531 struct scsi_pkt *pktp)
17746 17532 {
17747 17533 uint8_t sense_key;
17748 17534
17749 17535 ASSERT(un != NULL);
17750 17536 ASSERT(mutex_owned(SD_MUTEX(un)));
17751 17537 ASSERT(bp != NULL);
17752 17538 ASSERT(bp != un->un_rqs_bp);
17753 17539 ASSERT(xp != NULL);
17754 17540 ASSERT(pktp != NULL);
17755 17541
17756 17542 sense_key = scsi_sense_key(xp->xb_sense_data);
17757 17543
17758 17544 switch (sense_key) {
17759 17545 case KEY_NO_SENSE:
17760 17546 sd_sense_key_no_sense(un, bp, xp, pktp);
17761 17547 break;
17762 17548 case KEY_RECOVERABLE_ERROR:
17763 17549 sd_sense_key_recoverable_error(un, xp->xb_sense_data,
17764 17550 bp, xp, pktp);
17765 17551 break;
17766 17552 case KEY_NOT_READY:
17767 17553 sd_sense_key_not_ready(un, xp->xb_sense_data,
17768 17554 bp, xp, pktp);
17769 17555 break;
17770 17556 case KEY_MEDIUM_ERROR:
17771 17557 case KEY_HARDWARE_ERROR:
17772 17558 sd_sense_key_medium_or_hardware_error(un,
17773 17559 xp->xb_sense_data, bp, xp, pktp);
17774 17560 break;
17775 17561 case KEY_ILLEGAL_REQUEST:
17776 17562 sd_sense_key_illegal_request(un, bp, xp, pktp);
17777 17563 break;
17778 17564 case KEY_UNIT_ATTENTION:
17779 17565 sd_sense_key_unit_attention(un, xp->xb_sense_data,
17780 17566 bp, xp, pktp);
17781 17567 break;
17782 17568 case KEY_WRITE_PROTECT:
17783 17569 case KEY_VOLUME_OVERFLOW:
17784 17570 case KEY_MISCOMPARE:
17785 17571 sd_sense_key_fail_command(un, bp, xp, pktp);
17786 17572 break;
17787 17573 case KEY_BLANK_CHECK:
17788 17574 sd_sense_key_blank_check(un, bp, xp, pktp);
17789 17575 break;
17790 17576 case KEY_ABORTED_COMMAND:
17791 17577 sd_sense_key_aborted_command(un, bp, xp, pktp);
17792 17578 break;
17793 17579 case KEY_VENDOR_UNIQUE:
17794 17580 case KEY_COPY_ABORTED:
17795 17581 case KEY_EQUAL:
17796 17582 case KEY_RESERVED:
17797 17583 default:
17798 17584 sd_sense_key_default(un, xp->xb_sense_data,
17799 17585 bp, xp, pktp);
17800 17586 break;
17801 17587 }
17802 17588 }
17803 17589
17804 17590
17805 17591 /*
17806 17592 * Function: sd_dump_memory
17807 17593 *
17808 17594 * Description: Debug logging routine to print the contents of a user provided
17809 17595 * buffer. The output of the buffer is broken up into 256 byte
17810 17596 * segments due to a size constraint of the scsi_log.
17811 17597 * implementation.
17812 17598 *
17813 17599 * Arguments: un - ptr to softstate
17814 17600 * comp - component mask
17815 17601 * title - "title" string to preceed data when printed
17816 17602 * data - ptr to data block to be printed
17817 17603 * len - size of data block to be printed
17818 17604 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c)
17819 17605 *
17820 17606 * Context: May be called from interrupt context
17821 17607 */
17822 17608
17823 17609 #define SD_DUMP_MEMORY_BUF_SIZE 256
17824 17610
17825 17611 static char *sd_dump_format_string[] = {
17826 17612 " 0x%02x",
17827 17613 " %c"
17828 17614 };
17829 17615
17830 17616 static void
17831 17617 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data,
17832 17618 int len, int fmt)
17833 17619 {
17834 17620 int i, j;
17835 17621 int avail_count;
17836 17622 int start_offset;
17837 17623 int end_offset;
17838 17624 size_t entry_len;
17839 17625 char *bufp;
17840 17626 char *local_buf;
17841 17627 char *format_string;
17842 17628
17843 17629 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR));
17844 17630
17845 17631 /*
17846 17632 * In the debug version of the driver, this function is called from a
17847 17633 * number of places which are NOPs in the release driver.
17848 17634 * The debug driver therefore has additional methods of filtering
17849 17635 * debug output.
17850 17636 */
17851 17637 #ifdef SDDEBUG
17852 17638 /*
17853 17639 * In the debug version of the driver we can reduce the amount of debug
17854 17640 * messages by setting sd_error_level to something other than
17855 17641 * SCSI_ERR_ALL and clearing bits in sd_level_mask and
17856 17642 * sd_component_mask.
17857 17643 */
17858 17644 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) ||
17859 17645 (sd_error_level != SCSI_ERR_ALL)) {
17860 17646 return;
17861 17647 }
17862 17648 if (((sd_component_mask & comp) == 0) ||
17863 17649 (sd_error_level != SCSI_ERR_ALL)) {
17864 17650 return;
17865 17651 }
17866 17652 #else
17867 17653 if (sd_error_level != SCSI_ERR_ALL) {
17868 17654 return;
17869 17655 }
17870 17656 #endif
17871 17657
17872 17658 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP);
17873 17659 bufp = local_buf;
17874 17660 /*
17875 17661 * Available length is the length of local_buf[], minus the
17876 17662 * length of the title string, minus one for the ":", minus
17877 17663 * one for the newline, minus one for the NULL terminator.
17878 17664 * This gives the #bytes available for holding the printed
17879 17665 * values from the given data buffer.
17880 17666 */
17881 17667 if (fmt == SD_LOG_HEX) {
17882 17668 format_string = sd_dump_format_string[0];
17883 17669 } else /* SD_LOG_CHAR */ {
17884 17670 format_string = sd_dump_format_string[1];
17885 17671 }
17886 17672 /*
17887 17673 * Available count is the number of elements from the given
17888 17674 * data buffer that we can fit into the available length.
17889 17675 * This is based upon the size of the format string used.
17890 17676 * Make one entry and find it's size.
17891 17677 */
17892 17678 (void) sprintf(bufp, format_string, data[0]);
17893 17679 entry_len = strlen(bufp);
17894 17680 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len;
17895 17681
17896 17682 j = 0;
17897 17683 while (j < len) {
17898 17684 bufp = local_buf;
17899 17685 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE);
17900 17686 start_offset = j;
17901 17687
17902 17688 end_offset = start_offset + avail_count;
17903 17689
17904 17690 (void) sprintf(bufp, "%s:", title);
17905 17691 bufp += strlen(bufp);
17906 17692 for (i = start_offset; ((i < end_offset) && (j < len));
17907 17693 i++, j++) {
17908 17694 (void) sprintf(bufp, format_string, data[i]);
17909 17695 bufp += entry_len;
17910 17696 }
17911 17697 (void) sprintf(bufp, "\n");
17912 17698
17913 17699 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf);
17914 17700 }
17915 17701 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE);
17916 17702 }
17917 17703
17918 17704 /*
17919 17705 * Function: sd_print_sense_msg
17920 17706 *
17921 17707 * Description: Log a message based upon the given sense data.
17922 17708 *
17923 17709 * Arguments: un - ptr to associated softstate
17924 17710 * bp - ptr to buf(9S) for the command
17925 17711 * arg - ptr to associate sd_sense_info struct
17926 17712 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17927 17713 * or SD_NO_RETRY_ISSUED
17928 17714 *
17929 17715 * Context: May be called from interrupt context
17930 17716 */
17931 17717
17932 17718 static void
17933 17719 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17934 17720 {
17935 17721 struct sd_xbuf *xp;
17936 17722 struct scsi_pkt *pktp;
17937 17723 uint8_t *sensep;
17938 17724 daddr_t request_blkno;
17939 17725 diskaddr_t err_blkno;
17940 17726 int severity;
17941 17727 int pfa_flag;
17942 17728 extern struct scsi_key_strings scsi_cmds[];
17943 17729
17944 17730 ASSERT(un != NULL);
17945 17731 ASSERT(mutex_owned(SD_MUTEX(un)));
17946 17732 ASSERT(bp != NULL);
17947 17733 xp = SD_GET_XBUF(bp);
17948 17734 ASSERT(xp != NULL);
17949 17735 pktp = SD_GET_PKTP(bp);
17950 17736 ASSERT(pktp != NULL);
17951 17737 ASSERT(arg != NULL);
17952 17738
17953 17739 severity = ((struct sd_sense_info *)(arg))->ssi_severity;
17954 17740 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag;
17955 17741
17956 17742 if ((code == SD_DELAYED_RETRY_ISSUED) ||
17957 17743 (code == SD_IMMEDIATE_RETRY_ISSUED)) {
17958 17744 severity = SCSI_ERR_RETRYABLE;
17959 17745 }
17960 17746
17961 17747 /* Use absolute block number for the request block number */
17962 17748 request_blkno = xp->xb_blkno;
17963 17749
17964 17750 /*
17965 17751 * Now try to get the error block number from the sense data
17966 17752 */
17967 17753 sensep = xp->xb_sense_data;
17968 17754
17969 17755 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH,
17970 17756 (uint64_t *)&err_blkno)) {
17971 17757 /*
17972 17758 * We retrieved the error block number from the information
17973 17759 * portion of the sense data.
17974 17760 *
17975 17761 * For USCSI commands we are better off using the error
17976 17762 * block no. as the requested block no. (This is the best
17977 17763 * we can estimate.)
17978 17764 */
17979 17765 if ((SD_IS_BUFIO(xp) == FALSE) &&
17980 17766 ((pktp->pkt_flags & FLAG_SILENT) == 0)) {
17981 17767 request_blkno = err_blkno;
17982 17768 }
17983 17769 } else {
17984 17770 /*
17985 17771 * Without the es_valid bit set (for fixed format) or an
17986 17772 * information descriptor (for descriptor format) we cannot
17987 17773 * be certain of the error blkno, so just use the
17988 17774 * request_blkno.
17989 17775 */
17990 17776 err_blkno = (diskaddr_t)request_blkno;
17991 17777 }
17992 17778
17993 17779 /*
17994 17780 * The following will log the buffer contents for the release driver
17995 17781 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error
17996 17782 * level is set to verbose.
17997 17783 */
17998 17784 sd_dump_memory(un, SD_LOG_IO, "Failed CDB",
17999 17785 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
18000 17786 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
18001 17787 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX);
18002 17788
18003 17789 if (pfa_flag == FALSE) {
18004 17790 /* This is normally only set for USCSI */
18005 17791 if ((pktp->pkt_flags & FLAG_SILENT) != 0) {
18006 17792 return;
18007 17793 }
18008 17794
18009 17795 if ((SD_IS_BUFIO(xp) == TRUE) &&
18010 17796 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) &&
18011 17797 (severity < sd_error_level))) {
18012 17798 return;
18013 17799 }
18014 17800 }
18015 17801 /*
18016 17802 * Check for Sonoma Failover and keep a count of how many failed I/O's
18017 17803 */
18018 17804 if ((SD_IS_LSI(un)) &&
18019 17805 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) &&
18020 17806 (scsi_sense_asc(sensep) == 0x94) &&
18021 17807 (scsi_sense_ascq(sensep) == 0x01)) {
18022 17808 un->un_sonoma_failure_count++;
18023 17809 if (un->un_sonoma_failure_count > 1) {
18024 17810 return;
18025 17811 }
18026 17812 }
18027 17813
18028 17814 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP ||
18029 17815 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) &&
18030 17816 (pktp->pkt_resid == 0))) {
18031 17817 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity,
18032 17818 request_blkno, err_blkno, scsi_cmds,
18033 17819 (struct scsi_extended_sense *)sensep,
18034 17820 un->un_additional_codes, NULL);
18035 17821 }
18036 17822 }
18037 17823
18038 17824 /*
18039 17825 * Function: sd_sense_key_no_sense
18040 17826 *
18041 17827 * Description: Recovery action when sense data was not received.
18042 17828 *
18043 17829 * Context: May be called from interrupt context
18044 17830 */
18045 17831
18046 17832 static void
18047 17833 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
18048 17834 struct scsi_pkt *pktp)
18049 17835 {
18050 17836 struct sd_sense_info si;
18051 17837
18052 17838 ASSERT(un != NULL);
18053 17839 ASSERT(mutex_owned(SD_MUTEX(un)));
18054 17840 ASSERT(bp != NULL);
18055 17841 ASSERT(xp != NULL);
18056 17842 ASSERT(pktp != NULL);
18057 17843
18058 17844 si.ssi_severity = SCSI_ERR_FATAL;
18059 17845 si.ssi_pfa_flag = FALSE;
18060 17846
18061 17847 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18062 17848
18063 17849 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18064 17850 &si, EIO, (clock_t)0, NULL);
18065 17851 }
18066 17852
18067 17853
18068 17854 /*
18069 17855 * Function: sd_sense_key_recoverable_error
18070 17856 *
18071 17857 * Description: Recovery actions for a SCSI "Recovered Error" sense key.
18072 17858 *
18073 17859 * Context: May be called from interrupt context
18074 17860 */
18075 17861
18076 17862 static void
18077 17863 sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap,
18078 17864 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18079 17865 {
18080 17866 struct sd_sense_info si;
18081 17867 uint8_t asc = scsi_sense_asc(sense_datap);
18082 17868 uint8_t ascq = scsi_sense_ascq(sense_datap);
18083 17869
18084 17870 ASSERT(un != NULL);
18085 17871 ASSERT(mutex_owned(SD_MUTEX(un)));
18086 17872 ASSERT(bp != NULL);
18087 17873 ASSERT(xp != NULL);
18088 17874 ASSERT(pktp != NULL);
18089 17875
18090 17876 /*
18091 17877 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE
18092 17878 */
18093 17879 if (asc == 0x00 && ascq == 0x1D) {
18094 17880 sd_return_command(un, bp);
18095 17881 return;
18096 17882 }
18097 17883
18098 17884 /*
18099 17885 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED
18100 17886 */
18101 17887 if ((asc == 0x5D) && (sd_report_pfa != 0)) {
18102 17888 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18103 17889 si.ssi_severity = SCSI_ERR_INFO;
18104 17890 si.ssi_pfa_flag = TRUE;
18105 17891 } else {
18106 17892 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18107 17893 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err);
18108 17894 si.ssi_severity = SCSI_ERR_RECOVERED;
18109 17895 si.ssi_pfa_flag = FALSE;
18110 17896 }
18111 17897
18112 17898 if (pktp->pkt_resid == 0) {
18113 17899 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18114 17900 sd_return_command(un, bp);
18115 17901 return;
18116 17902 }
18117 17903
18118 17904 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18119 17905 &si, EIO, (clock_t)0, NULL);
18120 17906 }
18121 17907
18122 17908
18123 17909
18124 17910
18125 17911 /*
18126 17912 * Function: sd_sense_key_not_ready
18127 17913 *
18128 17914 * Description: Recovery actions for a SCSI "Not Ready" sense key.
18129 17915 *
18130 17916 * Context: May be called from interrupt context
18131 17917 */
18132 17918
18133 17919 static void
18134 17920 sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp,
18135 17921 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18136 17922 {
18137 17923 struct sd_sense_info si;
18138 17924 uint8_t asc = scsi_sense_asc(sense_datap);
18139 17925 uint8_t ascq = scsi_sense_ascq(sense_datap);
18140 17926
18141 17927 ASSERT(un != NULL);
18142 17928 ASSERT(mutex_owned(SD_MUTEX(un)));
18143 17929 ASSERT(bp != NULL);
18144 17930 ASSERT(xp != NULL);
18145 17931 ASSERT(pktp != NULL);
18146 17932
18147 17933 si.ssi_severity = SCSI_ERR_FATAL;
18148 17934 si.ssi_pfa_flag = FALSE;
18149 17935
18150 17936 /*
18151 17937 * Update error stats after first NOT READY error. Disks may have
18152 17938 * been powered down and may need to be restarted. For CDROMs,
18153 17939 * report NOT READY errors only if media is present.
18154 17940 */
18155 17941 if ((ISCD(un) && (asc == 0x3A)) ||
18156 17942 (xp->xb_nr_retry_count > 0)) {
18157 17943 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18158 17944 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err);
18159 17945 }
18160 17946
18161 17947 /*
18162 17948 * Just fail if the "not ready" retry limit has been reached.
18163 17949 */
18164 17950 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) {
18165 17951 /* Special check for error message printing for removables. */
18166 17952 if (un->un_f_has_removable_media && (asc == 0x04) &&
18167 17953 (ascq >= 0x04)) {
18168 17954 si.ssi_severity = SCSI_ERR_ALL;
18169 17955 }
18170 17956 goto fail_command;
18171 17957 }
18172 17958
18173 17959 /*
18174 17960 * Check the ASC and ASCQ in the sense data as needed, to determine
18175 17961 * what to do.
18176 17962 */
18177 17963 switch (asc) {
18178 17964 case 0x04: /* LOGICAL UNIT NOT READY */
18179 17965 /*
18180 17966 * disk drives that don't spin up result in a very long delay
18181 17967 * in format without warning messages. We will log a message
18182 17968 * if the error level is set to verbose.
18183 17969 */
18184 17970 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18185 17971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18186 17972 "logical unit not ready, resetting disk\n");
18187 17973 }
18188 17974
18189 17975 /*
18190 17976 * There are different requirements for CDROMs and disks for
18191 17977 * the number of retries. If a CD-ROM is giving this, it is
18192 17978 * probably reading TOC and is in the process of getting
18193 17979 * ready, so we should keep on trying for a long time to make
18194 17980 * sure that all types of media are taken in account (for
18195 17981 * some media the drive takes a long time to read TOC). For
18196 17982 * disks we do not want to retry this too many times as this
18197 17983 * can cause a long hang in format when the drive refuses to
18198 17984 * spin up (a very common failure).
18199 17985 */
18200 17986 switch (ascq) {
18201 17987 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */
18202 17988 /*
18203 17989 * Disk drives frequently refuse to spin up which
18204 17990 * results in a very long hang in format without
18205 17991 * warning messages.
18206 17992 *
18207 17993 * Note: This code preserves the legacy behavior of
18208 17994 * comparing xb_nr_retry_count against zero for fibre
18209 17995 * channel targets instead of comparing against the
18210 17996 * un_reset_retry_count value. The reason for this
18211 17997 * discrepancy has been so utterly lost beneath the
18212 17998 * Sands of Time that even Indiana Jones could not
18213 17999 * find it.
18214 18000 */
18215 18001 if (un->un_f_is_fibre == TRUE) {
18216 18002 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18217 18003 (xp->xb_nr_retry_count > 0)) &&
18218 18004 (un->un_startstop_timeid == NULL)) {
18219 18005 scsi_log(SD_DEVINFO(un), sd_label,
18220 18006 CE_WARN, "logical unit not ready, "
18221 18007 "resetting disk\n");
18222 18008 sd_reset_target(un, pktp);
18223 18009 }
18224 18010 } else {
18225 18011 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18226 18012 (xp->xb_nr_retry_count >
18227 18013 un->un_reset_retry_count)) &&
18228 18014 (un->un_startstop_timeid == NULL)) {
18229 18015 scsi_log(SD_DEVINFO(un), sd_label,
18230 18016 CE_WARN, "logical unit not ready, "
18231 18017 "resetting disk\n");
18232 18018 sd_reset_target(un, pktp);
18233 18019 }
18234 18020 }
18235 18021 break;
18236 18022
18237 18023 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */
18238 18024 /*
18239 18025 * If the target is in the process of becoming
18240 18026 * ready, just proceed with the retry. This can
18241 18027 * happen with CD-ROMs that take a long time to
18242 18028 * read TOC after a power cycle or reset.
18243 18029 */
18244 18030 goto do_retry;
18245 18031
18246 18032 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */
18247 18033 break;
18248 18034
18249 18035 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */
18250 18036 /*
18251 18037 * Retries cannot help here so just fail right away.
18252 18038 */
18253 18039 goto fail_command;
18254 18040
18255 18041 case 0x88:
18256 18042 /*
18257 18043 * Vendor-unique code for T3/T4: it indicates a
18258 18044 * path problem in a mutipathed config, but as far as
18259 18045 * the target driver is concerned it equates to a fatal
18260 18046 * error, so we should just fail the command right away
18261 18047 * (without printing anything to the console). If this
18262 18048 * is not a T3/T4, fall thru to the default recovery
18263 18049 * action.
18264 18050 * T3/T4 is FC only, don't need to check is_fibre
18265 18051 */
18266 18052 if (SD_IS_T3(un) || SD_IS_T4(un)) {
18267 18053 sd_return_failed_command(un, bp, EIO);
18268 18054 return;
18269 18055 }
18270 18056 /* FALLTHRU */
18271 18057
18272 18058 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */
18273 18059 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */
18274 18060 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */
18275 18061 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */
18276 18062 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */
18277 18063 default: /* Possible future codes in SCSI spec? */
18278 18064 /*
18279 18065 * For removable-media devices, do not retry if
18280 18066 * ASCQ > 2 as these result mostly from USCSI commands
18281 18067 * on MMC devices issued to check status of an
18282 18068 * operation initiated in immediate mode. Also for
18283 18069 * ASCQ >= 4 do not print console messages as these
18284 18070 * mainly represent a user-initiated operation
18285 18071 * instead of a system failure.
18286 18072 */
18287 18073 if (un->un_f_has_removable_media) {
18288 18074 si.ssi_severity = SCSI_ERR_ALL;
18289 18075 goto fail_command;
18290 18076 }
18291 18077 break;
18292 18078 }
18293 18079
18294 18080 /*
18295 18081 * As part of our recovery attempt for the NOT READY
18296 18082 * condition, we issue a START STOP UNIT command. However
18297 18083 * we want to wait for a short delay before attempting this
18298 18084 * as there may still be more commands coming back from the
18299 18085 * target with the check condition. To do this we use
18300 18086 * timeout(9F) to call sd_start_stop_unit_callback() after
18301 18087 * the delay interval expires. (sd_start_stop_unit_callback()
18302 18088 * dispatches sd_start_stop_unit_task(), which will issue
18303 18089 * the actual START STOP UNIT command. The delay interval
18304 18090 * is one-half of the delay that we will use to retry the
18305 18091 * command that generated the NOT READY condition.
18306 18092 *
18307 18093 * Note that we could just dispatch sd_start_stop_unit_task()
18308 18094 * from here and allow it to sleep for the delay interval,
18309 18095 * but then we would be tying up the taskq thread
18310 18096 * uncesessarily for the duration of the delay.
18311 18097 *
18312 18098 * Do not issue the START STOP UNIT if the current command
18313 18099 * is already a START STOP UNIT.
18314 18100 */
18315 18101 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) {
18316 18102 break;
18317 18103 }
18318 18104
18319 18105 /*
18320 18106 * Do not schedule the timeout if one is already pending.
18321 18107 */
18322 18108 if (un->un_startstop_timeid != NULL) {
18323 18109 SD_INFO(SD_LOG_ERROR, un,
18324 18110 "sd_sense_key_not_ready: restart already issued to"
18325 18111 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)),
18326 18112 ddi_get_instance(SD_DEVINFO(un)));
18327 18113 break;
18328 18114 }
18329 18115
18330 18116 /*
18331 18117 * Schedule the START STOP UNIT command, then queue the command
18332 18118 * for a retry.
18333 18119 *
18334 18120 * Note: A timeout is not scheduled for this retry because we
18335 18121 * want the retry to be serial with the START_STOP_UNIT. The
18336 18122 * retry will be started when the START_STOP_UNIT is completed
18337 18123 * in sd_start_stop_unit_task.
18338 18124 */
18339 18125 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback,
18340 18126 un, un->un_busy_timeout / 2);
18341 18127 xp->xb_nr_retry_count++;
18342 18128 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter);
18343 18129 return;
18344 18130
18345 18131 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */
18346 18132 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18347 18133 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18348 18134 "unit does not respond to selection\n");
18349 18135 }
18350 18136 break;
18351 18137
18352 18138 case 0x3A: /* MEDIUM NOT PRESENT */
18353 18139 if (sd_error_level >= SCSI_ERR_FATAL) {
18354 18140 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18355 18141 "Caddy not inserted in drive\n");
18356 18142 }
18357 18143
18358 18144 sr_ejected(un);
18359 18145 un->un_mediastate = DKIO_EJECTED;
18360 18146 /* The state has changed, inform the media watch routines */
18361 18147 cv_broadcast(&un->un_state_cv);
18362 18148 /* Just fail if no media is present in the drive. */
18363 18149 goto fail_command;
18364 18150
18365 18151 default:
18366 18152 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18367 18153 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
18368 18154 "Unit not Ready. Additional sense code 0x%x\n",
18369 18155 asc);
18370 18156 }
18371 18157 break;
18372 18158 }
18373 18159
18374 18160 do_retry:
18375 18161
18376 18162 /*
18377 18163 * Retry the command, as some targets may report NOT READY for
18378 18164 * several seconds after being reset.
18379 18165 */
18380 18166 xp->xb_nr_retry_count++;
18381 18167 si.ssi_severity = SCSI_ERR_RETRYABLE;
18382 18168 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg,
18383 18169 &si, EIO, un->un_busy_timeout, NULL);
18384 18170
18385 18171 return;
18386 18172
18387 18173 fail_command:
18388 18174 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18389 18175 sd_return_failed_command(un, bp, EIO);
18390 18176 }
18391 18177
18392 18178
18393 18179
18394 18180 /*
18395 18181 * Function: sd_sense_key_medium_or_hardware_error
18396 18182 *
18397 18183 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error"
18398 18184 * sense key.
18399 18185 *
18400 18186 * Context: May be called from interrupt context
18401 18187 */
18402 18188
18403 18189 static void
18404 18190 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap,
18405 18191 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18406 18192 {
18407 18193 struct sd_sense_info si;
18408 18194 uint8_t sense_key = scsi_sense_key(sense_datap);
18409 18195 uint8_t asc = scsi_sense_asc(sense_datap);
18410 18196
18411 18197 ASSERT(un != NULL);
18412 18198 ASSERT(mutex_owned(SD_MUTEX(un)));
18413 18199 ASSERT(bp != NULL);
18414 18200 ASSERT(xp != NULL);
18415 18201 ASSERT(pktp != NULL);
18416 18202
18417 18203 si.ssi_severity = SCSI_ERR_FATAL;
18418 18204 si.ssi_pfa_flag = FALSE;
18419 18205
18420 18206 if (sense_key == KEY_MEDIUM_ERROR) {
18421 18207 SD_UPDATE_ERRSTATS(un, sd_rq_media_err);
18422 18208 }
18423 18209
18424 18210 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18425 18211
18426 18212 if ((un->un_reset_retry_count != 0) &&
18427 18213 (xp->xb_retry_count == un->un_reset_retry_count)) {
18428 18214 mutex_exit(SD_MUTEX(un));
18429 18215 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */
18430 18216 if (un->un_f_allow_bus_device_reset == TRUE) {
18431 18217
18432 18218 boolean_t try_resetting_target = B_TRUE;
18433 18219
18434 18220 /*
18435 18221 * We need to be able to handle specific ASC when we are
18436 18222 * handling a KEY_HARDWARE_ERROR. In particular
18437 18223 * taking the default action of resetting the target may
18438 18224 * not be the appropriate way to attempt recovery.
18439 18225 * Resetting a target because of a single LUN failure
18440 18226 * victimizes all LUNs on that target.
18441 18227 *
18442 18228 * This is true for the LSI arrays, if an LSI
18443 18229 * array controller returns an ASC of 0x84 (LUN Dead) we
18444 18230 * should trust it.
18445 18231 */
18446 18232
18447 18233 if (sense_key == KEY_HARDWARE_ERROR) {
18448 18234 switch (asc) {
18449 18235 case 0x84:
18450 18236 if (SD_IS_LSI(un)) {
18451 18237 try_resetting_target = B_FALSE;
18452 18238 }
18453 18239 break;
18454 18240 default:
18455 18241 break;
18456 18242 }
18457 18243 }
18458 18244
18459 18245 if (try_resetting_target == B_TRUE) {
18460 18246 int reset_retval = 0;
18461 18247 if (un->un_f_lun_reset_enabled == TRUE) {
18462 18248 SD_TRACE(SD_LOG_IO_CORE, un,
18463 18249 "sd_sense_key_medium_or_hardware_"
18464 18250 "error: issuing RESET_LUN\n");
18465 18251 reset_retval =
18466 18252 scsi_reset(SD_ADDRESS(un),
18467 18253 RESET_LUN);
18468 18254 }
18469 18255 if (reset_retval == 0) {
18470 18256 SD_TRACE(SD_LOG_IO_CORE, un,
18471 18257 "sd_sense_key_medium_or_hardware_"
18472 18258 "error: issuing RESET_TARGET\n");
18473 18259 (void) scsi_reset(SD_ADDRESS(un),
18474 18260 RESET_TARGET);
18475 18261 }
18476 18262 }
18477 18263 }
18478 18264 mutex_enter(SD_MUTEX(un));
18479 18265 }
18480 18266
18481 18267 /*
18482 18268 * This really ought to be a fatal error, but we will retry anyway
18483 18269 * as some drives report this as a spurious error.
18484 18270 */
18485 18271 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18486 18272 &si, EIO, (clock_t)0, NULL);
18487 18273 }
18488 18274
18489 18275
18490 18276
18491 18277 /*
18492 18278 * Function: sd_sense_key_illegal_request
18493 18279 *
18494 18280 * Description: Recovery actions for a SCSI "Illegal Request" sense key.
18495 18281 *
18496 18282 * Context: May be called from interrupt context
18497 18283 */
18498 18284
18499 18285 static void
18500 18286 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
18501 18287 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18502 18288 {
18503 18289 struct sd_sense_info si;
18504 18290
18505 18291 ASSERT(un != NULL);
18506 18292 ASSERT(mutex_owned(SD_MUTEX(un)));
18507 18293 ASSERT(bp != NULL);
18508 18294 ASSERT(xp != NULL);
18509 18295 ASSERT(pktp != NULL);
18510 18296
18511 18297 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err);
18512 18298
18513 18299 si.ssi_severity = SCSI_ERR_INFO;
18514 18300 si.ssi_pfa_flag = FALSE;
18515 18301
18516 18302 /* Pointless to retry if the target thinks it's an illegal request */
18517 18303 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18518 18304 sd_return_failed_command(un, bp, EIO);
18519 18305 }
18520 18306
18521 18307
18522 18308
18523 18309
18524 18310 /*
18525 18311 * Function: sd_sense_key_unit_attention
18526 18312 *
18527 18313 * Description: Recovery actions for a SCSI "Unit Attention" sense key.
18528 18314 *
18529 18315 * Context: May be called from interrupt context
18530 18316 */
18531 18317
18532 18318 static void
18533 18319 sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap,
18534 18320 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18535 18321 {
18536 18322 /*
18537 18323 * For UNIT ATTENTION we allow retries for one minute. Devices
18538 18324 * like Sonoma can return UNIT ATTENTION close to a minute
18539 18325 * under certain conditions.
18540 18326 */
18541 18327 int retry_check_flag = SD_RETRIES_UA;
18542 18328 boolean_t kstat_updated = B_FALSE;
18543 18329 struct sd_sense_info si;
18544 18330 uint8_t asc = scsi_sense_asc(sense_datap);
18545 18331 uint8_t ascq = scsi_sense_ascq(sense_datap);
18546 18332
18547 18333 ASSERT(un != NULL);
18548 18334 ASSERT(mutex_owned(SD_MUTEX(un)));
18549 18335 ASSERT(bp != NULL);
18550 18336 ASSERT(xp != NULL);
18551 18337 ASSERT(pktp != NULL);
18552 18338
18553 18339 si.ssi_severity = SCSI_ERR_INFO;
18554 18340 si.ssi_pfa_flag = FALSE;
18555 18341
18556 18342
18557 18343 switch (asc) {
18558 18344 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */
18559 18345 if (sd_report_pfa != 0) {
18560 18346 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18561 18347 si.ssi_pfa_flag = TRUE;
18562 18348 retry_check_flag = SD_RETRIES_STANDARD;
18563 18349 goto do_retry;
18564 18350 }
18565 18351
18566 18352 break;
18567 18353
18568 18354 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
18569 18355 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
18570 18356 un->un_resvd_status |=
18571 18357 (SD_LOST_RESERVE | SD_WANT_RESERVE);
18572 18358 }
18573 18359 #ifdef _LP64
18574 18360 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) {
18575 18361 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task,
18576 18362 un, KM_NOSLEEP) == 0) {
18577 18363 /*
18578 18364 * If we can't dispatch the task we'll just
18579 18365 * live without descriptor sense. We can
18580 18366 * try again on the next "unit attention"
18581 18367 */
18582 18368 SD_ERROR(SD_LOG_ERROR, un,
18583 18369 "sd_sense_key_unit_attention: "
18584 18370 "Could not dispatch "
18585 18371 "sd_reenable_dsense_task\n");
18586 18372 }
18587 18373 }
18588 18374 #endif /* _LP64 */
18589 18375 /* FALLTHRU */
18590 18376
18591 18377 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
18592 18378 if (!un->un_f_has_removable_media) {
18593 18379 break;
18594 18380 }
18595 18381
18596 18382 /*
18597 18383 * When we get a unit attention from a removable-media device,
18598 18384 * it may be in a state that will take a long time to recover
18599 18385 * (e.g., from a reset). Since we are executing in interrupt
18600 18386 * context here, we cannot wait around for the device to come
18601 18387 * back. So hand this command off to sd_media_change_task()
18602 18388 * for deferred processing under taskq thread context. (Note
18603 18389 * that the command still may be failed if a problem is
18604 18390 * encountered at a later time.)
18605 18391 */
18606 18392 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp,
18607 18393 KM_NOSLEEP) == 0) {
18608 18394 /*
18609 18395 * Cannot dispatch the request so fail the command.
18610 18396 */
18611 18397 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18612 18398 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18613 18399 si.ssi_severity = SCSI_ERR_FATAL;
18614 18400 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18615 18401 sd_return_failed_command(un, bp, EIO);
18616 18402 }
18617 18403
18618 18404 /*
18619 18405 * If failed to dispatch sd_media_change_task(), we already
18620 18406 * updated kstat. If succeed to dispatch sd_media_change_task(),
18621 18407 * we should update kstat later if it encounters an error. So,
18622 18408 * we update kstat_updated flag here.
18623 18409 */
18624 18410 kstat_updated = B_TRUE;
18625 18411
18626 18412 /*
18627 18413 * Either the command has been successfully dispatched to a
18628 18414 * task Q for retrying, or the dispatch failed. In either case
18629 18415 * do NOT retry again by calling sd_retry_command. This sets up
18630 18416 * two retries of the same command and when one completes and
18631 18417 * frees the resources the other will access freed memory,
18632 18418 * a bad thing.
18633 18419 */
18634 18420 return;
18635 18421
18636 18422 default:
18637 18423 break;
18638 18424 }
18639 18425
18640 18426 /*
18641 18427 * ASC ASCQ
18642 18428 * 2A 09 Capacity data has changed
18643 18429 * 2A 01 Mode parameters changed
18644 18430 * 3F 0E Reported luns data has changed
18645 18431 * Arrays that support logical unit expansion should report
18646 18432 * capacity changes(2Ah/09). Mode parameters changed and
18647 18433 * reported luns data has changed are the approximation.
18648 18434 */
18649 18435 if (((asc == 0x2a) && (ascq == 0x09)) ||
18650 18436 ((asc == 0x2a) && (ascq == 0x01)) ||
18651 18437 ((asc == 0x3f) && (ascq == 0x0e))) {
18652 18438 if (taskq_dispatch(sd_tq, sd_target_change_task, un,
18653 18439 KM_NOSLEEP) == 0) {
18654 18440 SD_ERROR(SD_LOG_ERROR, un,
18655 18441 "sd_sense_key_unit_attention: "
18656 18442 "Could not dispatch sd_target_change_task\n");
18657 18443 }
18658 18444 }
18659 18445
18660 18446 /*
18661 18447 * Update kstat if we haven't done that.
18662 18448 */
18663 18449 if (!kstat_updated) {
18664 18450 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18665 18451 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18666 18452 }
18667 18453
18668 18454 do_retry:
18669 18455 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si,
18670 18456 EIO, SD_UA_RETRY_DELAY, NULL);
18671 18457 }
18672 18458
18673 18459
18674 18460
18675 18461 /*
18676 18462 * Function: sd_sense_key_fail_command
18677 18463 *
18678 18464 * Description: Use to fail a command when we don't like the sense key that
18679 18465 * was returned.
18680 18466 *
18681 18467 * Context: May be called from interrupt context
18682 18468 */
18683 18469
18684 18470 static void
18685 18471 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
18686 18472 struct scsi_pkt *pktp)
18687 18473 {
18688 18474 struct sd_sense_info si;
18689 18475
18690 18476 ASSERT(un != NULL);
18691 18477 ASSERT(mutex_owned(SD_MUTEX(un)));
18692 18478 ASSERT(bp != NULL);
18693 18479 ASSERT(xp != NULL);
18694 18480 ASSERT(pktp != NULL);
18695 18481
18696 18482 si.ssi_severity = SCSI_ERR_FATAL;
18697 18483 si.ssi_pfa_flag = FALSE;
18698 18484
18699 18485 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18700 18486 sd_return_failed_command(un, bp, EIO);
18701 18487 }
18702 18488
18703 18489
18704 18490
18705 18491 /*
18706 18492 * Function: sd_sense_key_blank_check
18707 18493 *
18708 18494 * Description: Recovery actions for a SCSI "Blank Check" sense key.
18709 18495 * Has no monetary connotation.
18710 18496 *
18711 18497 * Context: May be called from interrupt context
18712 18498 */
18713 18499
18714 18500 static void
18715 18501 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
18716 18502 struct scsi_pkt *pktp)
18717 18503 {
18718 18504 struct sd_sense_info si;
18719 18505
18720 18506 ASSERT(un != NULL);
18721 18507 ASSERT(mutex_owned(SD_MUTEX(un)));
18722 18508 ASSERT(bp != NULL);
18723 18509 ASSERT(xp != NULL);
18724 18510 ASSERT(pktp != NULL);
18725 18511
18726 18512 /*
18727 18513 * Blank check is not fatal for removable devices, therefore
18728 18514 * it does not require a console message.
18729 18515 */
18730 18516 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL :
18731 18517 SCSI_ERR_FATAL;
18732 18518 si.ssi_pfa_flag = FALSE;
18733 18519
18734 18520 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18735 18521 sd_return_failed_command(un, bp, EIO);
18736 18522 }
18737 18523
18738 18524
18739 18525
18740 18526
18741 18527 /*
18742 18528 * Function: sd_sense_key_aborted_command
18743 18529 *
18744 18530 * Description: Recovery actions for a SCSI "Aborted Command" sense key.
18745 18531 *
18746 18532 * Context: May be called from interrupt context
18747 18533 */
18748 18534
18749 18535 static void
18750 18536 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
18751 18537 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18752 18538 {
18753 18539 struct sd_sense_info si;
18754 18540
18755 18541 ASSERT(un != NULL);
18756 18542 ASSERT(mutex_owned(SD_MUTEX(un)));
18757 18543 ASSERT(bp != NULL);
18758 18544 ASSERT(xp != NULL);
18759 18545 ASSERT(pktp != NULL);
18760 18546
18761 18547 si.ssi_severity = SCSI_ERR_FATAL;
18762 18548 si.ssi_pfa_flag = FALSE;
18763 18549
18764 18550 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18765 18551
18766 18552 /*
18767 18553 * This really ought to be a fatal error, but we will retry anyway
18768 18554 * as some drives report this as a spurious error.
18769 18555 */
18770 18556 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18771 18557 &si, EIO, drv_usectohz(100000), NULL);
18772 18558 }
18773 18559
18774 18560
18775 18561
18776 18562 /*
18777 18563 * Function: sd_sense_key_default
18778 18564 *
18779 18565 * Description: Default recovery action for several SCSI sense keys (basically
18780 18566 * attempts a retry).
18781 18567 *
18782 18568 * Context: May be called from interrupt context
18783 18569 */
18784 18570
18785 18571 static void
18786 18572 sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp,
18787 18573 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18788 18574 {
18789 18575 struct sd_sense_info si;
18790 18576 uint8_t sense_key = scsi_sense_key(sense_datap);
18791 18577
18792 18578 ASSERT(un != NULL);
18793 18579 ASSERT(mutex_owned(SD_MUTEX(un)));
18794 18580 ASSERT(bp != NULL);
18795 18581 ASSERT(xp != NULL);
18796 18582 ASSERT(pktp != NULL);
18797 18583
18798 18584 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18799 18585
18800 18586 /*
18801 18587 * Undecoded sense key. Attempt retries and hope that will fix
18802 18588 * the problem. Otherwise, we're dead.
18803 18589 */
18804 18590 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
18805 18591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18806 18592 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]);
18807 18593 }
18808 18594
18809 18595 si.ssi_severity = SCSI_ERR_FATAL;
18810 18596 si.ssi_pfa_flag = FALSE;
18811 18597
18812 18598 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18813 18599 &si, EIO, (clock_t)0, NULL);
18814 18600 }
18815 18601
18816 18602
18817 18603
18818 18604 /*
18819 18605 * Function: sd_print_retry_msg
18820 18606 *
18821 18607 * Description: Print a message indicating the retry action being taken.
18822 18608 *
18823 18609 * Arguments: un - ptr to associated softstate
18824 18610 * bp - ptr to buf(9S) for the command
18825 18611 * arg - not used.
18826 18612 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18827 18613 * or SD_NO_RETRY_ISSUED
18828 18614 *
18829 18615 * Context: May be called from interrupt context
18830 18616 */
18831 18617 /* ARGSUSED */
18832 18618 static void
18833 18619 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag)
18834 18620 {
18835 18621 struct sd_xbuf *xp;
18836 18622 struct scsi_pkt *pktp;
18837 18623 char *reasonp;
18838 18624 char *msgp;
18839 18625
18840 18626 ASSERT(un != NULL);
18841 18627 ASSERT(mutex_owned(SD_MUTEX(un)));
18842 18628 ASSERT(bp != NULL);
18843 18629 pktp = SD_GET_PKTP(bp);
18844 18630 ASSERT(pktp != NULL);
18845 18631 xp = SD_GET_XBUF(bp);
18846 18632 ASSERT(xp != NULL);
18847 18633
18848 18634 ASSERT(!mutex_owned(&un->un_pm_mutex));
18849 18635 mutex_enter(&un->un_pm_mutex);
18850 18636 if ((un->un_state == SD_STATE_SUSPENDED) ||
18851 18637 (SD_DEVICE_IS_IN_LOW_POWER(un)) ||
18852 18638 (pktp->pkt_flags & FLAG_SILENT)) {
18853 18639 mutex_exit(&un->un_pm_mutex);
18854 18640 goto update_pkt_reason;
18855 18641 }
18856 18642 mutex_exit(&un->un_pm_mutex);
18857 18643
18858 18644 /*
18859 18645 * Suppress messages if they are all the same pkt_reason; with
18860 18646 * TQ, many (up to 256) are returned with the same pkt_reason.
18861 18647 * If we are in panic, then suppress the retry messages.
18862 18648 */
18863 18649 switch (flag) {
18864 18650 case SD_NO_RETRY_ISSUED:
18865 18651 msgp = "giving up";
18866 18652 break;
18867 18653 case SD_IMMEDIATE_RETRY_ISSUED:
18868 18654 case SD_DELAYED_RETRY_ISSUED:
18869 18655 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) ||
18870 18656 ((pktp->pkt_reason == un->un_last_pkt_reason) &&
18871 18657 (sd_error_level != SCSI_ERR_ALL))) {
18872 18658 return;
18873 18659 }
18874 18660 msgp = "retrying command";
18875 18661 break;
18876 18662 default:
18877 18663 goto update_pkt_reason;
18878 18664 }
18879 18665
18880 18666 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" :
18881 18667 scsi_rname(pktp->pkt_reason));
18882 18668
18883 18669 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
18884 18670 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18885 18671 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp);
18886 18672 }
18887 18673
18888 18674 update_pkt_reason:
18889 18675 /*
18890 18676 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason.
18891 18677 * This is to prevent multiple console messages for the same failure
18892 18678 * condition. Note that un->un_last_pkt_reason is NOT restored if &
18893 18679 * when the command is retried successfully because there still may be
18894 18680 * more commands coming back with the same value of pktp->pkt_reason.
18895 18681 */
18896 18682 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) {
18897 18683 un->un_last_pkt_reason = pktp->pkt_reason;
18898 18684 }
18899 18685 }
18900 18686
18901 18687
18902 18688 /*
18903 18689 * Function: sd_print_cmd_incomplete_msg
18904 18690 *
18905 18691 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason.
18906 18692 *
18907 18693 * Arguments: un - ptr to associated softstate
18908 18694 * bp - ptr to buf(9S) for the command
18909 18695 * arg - passed to sd_print_retry_msg()
18910 18696 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18911 18697 * or SD_NO_RETRY_ISSUED
18912 18698 *
18913 18699 * Context: May be called from interrupt context
18914 18700 */
18915 18701
18916 18702 static void
18917 18703 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg,
18918 18704 int code)
18919 18705 {
18920 18706 dev_info_t *dip;
18921 18707
18922 18708 ASSERT(un != NULL);
18923 18709 ASSERT(mutex_owned(SD_MUTEX(un)));
18924 18710 ASSERT(bp != NULL);
18925 18711
18926 18712 switch (code) {
18927 18713 case SD_NO_RETRY_ISSUED:
18928 18714 /* Command was failed. Someone turned off this target? */
18929 18715 if (un->un_state != SD_STATE_OFFLINE) {
18930 18716 /*
18931 18717 * Suppress message if we are detaching and
18932 18718 * device has been disconnected
18933 18719 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation
18934 18720 * private interface and not part of the DDI
18935 18721 */
18936 18722 dip = un->un_sd->sd_dev;
18937 18723 if (!(DEVI_IS_DETACHING(dip) &&
18938 18724 DEVI_IS_DEVICE_REMOVED(dip))) {
18939 18725 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18940 18726 "disk not responding to selection\n");
18941 18727 }
18942 18728 New_state(un, SD_STATE_OFFLINE);
18943 18729 }
18944 18730 break;
18945 18731
18946 18732 case SD_DELAYED_RETRY_ISSUED:
18947 18733 case SD_IMMEDIATE_RETRY_ISSUED:
18948 18734 default:
18949 18735 /* Command was successfully queued for retry */
18950 18736 sd_print_retry_msg(un, bp, arg, code);
18951 18737 break;
18952 18738 }
18953 18739 }
18954 18740
18955 18741
18956 18742 /*
18957 18743 * Function: sd_pkt_reason_cmd_incomplete
18958 18744 *
18959 18745 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason.
18960 18746 *
18961 18747 * Context: May be called from interrupt context
18962 18748 */
18963 18749
18964 18750 static void
18965 18751 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
18966 18752 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18967 18753 {
18968 18754 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE;
18969 18755
18970 18756 ASSERT(un != NULL);
18971 18757 ASSERT(mutex_owned(SD_MUTEX(un)));
18972 18758 ASSERT(bp != NULL);
18973 18759 ASSERT(xp != NULL);
18974 18760 ASSERT(pktp != NULL);
18975 18761
18976 18762 /* Do not do a reset if selection did not complete */
18977 18763 /* Note: Should this not just check the bit? */
18978 18764 if (pktp->pkt_state != STATE_GOT_BUS) {
18979 18765 SD_UPDATE_ERRSTATS(un, sd_transerrs);
18980 18766 sd_reset_target(un, pktp);
18981 18767 }
18982 18768
18983 18769 /*
18984 18770 * If the target was not successfully selected, then set
18985 18771 * SD_RETRIES_FAILFAST to indicate that we lost communication
18986 18772 * with the target, and further retries and/or commands are
18987 18773 * likely to take a long time.
18988 18774 */
18989 18775 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) {
18990 18776 flag |= SD_RETRIES_FAILFAST;
18991 18777 }
18992 18778
18993 18779 SD_UPDATE_RESERVATION_STATUS(un, pktp);
18994 18780
18995 18781 sd_retry_command(un, bp, flag,
18996 18782 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
18997 18783 }
18998 18784
18999 18785
19000 18786
19001 18787 /*
19002 18788 * Function: sd_pkt_reason_cmd_tran_err
19003 18789 *
19004 18790 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason.
19005 18791 *
19006 18792 * Context: May be called from interrupt context
19007 18793 */
19008 18794
19009 18795 static void
19010 18796 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
19011 18797 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19012 18798 {
19013 18799 ASSERT(un != NULL);
19014 18800 ASSERT(mutex_owned(SD_MUTEX(un)));
19015 18801 ASSERT(bp != NULL);
19016 18802 ASSERT(xp != NULL);
19017 18803 ASSERT(pktp != NULL);
19018 18804
19019 18805 /*
19020 18806 * Do not reset if we got a parity error, or if
19021 18807 * selection did not complete.
19022 18808 */
19023 18809 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19024 18810 /* Note: Should this not just check the bit for pkt_state? */
19025 18811 if (((pktp->pkt_statistics & STAT_PERR) == 0) &&
19026 18812 (pktp->pkt_state != STATE_GOT_BUS)) {
19027 18813 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19028 18814 sd_reset_target(un, pktp);
19029 18815 }
19030 18816
19031 18817 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19032 18818
19033 18819 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19034 18820 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19035 18821 }
19036 18822
19037 18823
19038 18824
19039 18825 /*
19040 18826 * Function: sd_pkt_reason_cmd_reset
19041 18827 *
19042 18828 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason.
19043 18829 *
19044 18830 * Context: May be called from interrupt context
19045 18831 */
19046 18832
19047 18833 static void
19048 18834 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19049 18835 struct scsi_pkt *pktp)
19050 18836 {
19051 18837 ASSERT(un != NULL);
19052 18838 ASSERT(mutex_owned(SD_MUTEX(un)));
19053 18839 ASSERT(bp != NULL);
19054 18840 ASSERT(xp != NULL);
19055 18841 ASSERT(pktp != NULL);
19056 18842
19057 18843 /* The target may still be running the command, so try to reset. */
19058 18844 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19059 18845 sd_reset_target(un, pktp);
19060 18846
19061 18847 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19062 18848
19063 18849 /*
19064 18850 * If pkt_reason is CMD_RESET chances are that this pkt got
19065 18851 * reset because another target on this bus caused it. The target
19066 18852 * that caused it should get CMD_TIMEOUT with pkt_statistics
19067 18853 * of STAT_TIMEOUT/STAT_DEV_RESET.
19068 18854 */
19069 18855
19070 18856 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19071 18857 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19072 18858 }
19073 18859
19074 18860
19075 18861
19076 18862
19077 18863 /*
19078 18864 * Function: sd_pkt_reason_cmd_aborted
19079 18865 *
19080 18866 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason.
19081 18867 *
19082 18868 * Context: May be called from interrupt context
19083 18869 */
19084 18870
19085 18871 static void
19086 18872 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19087 18873 struct scsi_pkt *pktp)
19088 18874 {
19089 18875 ASSERT(un != NULL);
19090 18876 ASSERT(mutex_owned(SD_MUTEX(un)));
19091 18877 ASSERT(bp != NULL);
19092 18878 ASSERT(xp != NULL);
19093 18879 ASSERT(pktp != NULL);
19094 18880
19095 18881 /* The target may still be running the command, so try to reset. */
19096 18882 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19097 18883 sd_reset_target(un, pktp);
19098 18884
19099 18885 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19100 18886
19101 18887 /*
19102 18888 * If pkt_reason is CMD_ABORTED chances are that this pkt got
19103 18889 * aborted because another target on this bus caused it. The target
19104 18890 * that caused it should get CMD_TIMEOUT with pkt_statistics
19105 18891 * of STAT_TIMEOUT/STAT_DEV_RESET.
19106 18892 */
19107 18893
19108 18894 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19109 18895 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19110 18896 }
19111 18897
19112 18898
19113 18899
19114 18900 /*
19115 18901 * Function: sd_pkt_reason_cmd_timeout
19116 18902 *
19117 18903 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason.
19118 18904 *
19119 18905 * Context: May be called from interrupt context
19120 18906 */
19121 18907
19122 18908 static void
19123 18909 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19124 18910 struct scsi_pkt *pktp)
19125 18911 {
19126 18912 ASSERT(un != NULL);
19127 18913 ASSERT(mutex_owned(SD_MUTEX(un)));
19128 18914 ASSERT(bp != NULL);
19129 18915 ASSERT(xp != NULL);
19130 18916 ASSERT(pktp != NULL);
19131 18917
19132 18918
19133 18919 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19134 18920 sd_reset_target(un, pktp);
19135 18921
19136 18922 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19137 18923
19138 18924 /*
19139 18925 * A command timeout indicates that we could not establish
19140 18926 * communication with the target, so set SD_RETRIES_FAILFAST
19141 18927 * as further retries/commands are likely to take a long time.
19142 18928 */
19143 18929 sd_retry_command(un, bp,
19144 18930 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST),
19145 18931 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19146 18932 }
19147 18933
19148 18934
19149 18935
19150 18936 /*
19151 18937 * Function: sd_pkt_reason_cmd_unx_bus_free
19152 18938 *
19153 18939 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason.
19154 18940 *
19155 18941 * Context: May be called from interrupt context
19156 18942 */
19157 18943
19158 18944 static void
19159 18945 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
19160 18946 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19161 18947 {
19162 18948 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code);
19163 18949
19164 18950 ASSERT(un != NULL);
19165 18951 ASSERT(mutex_owned(SD_MUTEX(un)));
|
↓ open down ↓ |
1425 lines elided |
↑ open up ↑ |
19166 18952 ASSERT(bp != NULL);
19167 18953 ASSERT(xp != NULL);
19168 18954 ASSERT(pktp != NULL);
19169 18955
19170 18956 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19171 18957 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19172 18958
19173 18959 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
19174 18960 sd_print_retry_msg : NULL;
19175 18961
19176 - sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
18962 + sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19177 18963 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19178 18964 }
19179 18965
19180 18966
19181 18967 /*
19182 18968 * Function: sd_pkt_reason_cmd_tag_reject
19183 18969 *
19184 18970 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19185 18971 *
19186 18972 * Context: May be called from interrupt context
19187 18973 */
19188 18974
19189 18975 static void
19190 18976 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
19191 18977 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19192 18978 {
19193 18979 ASSERT(un != NULL);
19194 18980 ASSERT(mutex_owned(SD_MUTEX(un)));
19195 18981 ASSERT(bp != NULL);
19196 18982 ASSERT(xp != NULL);
19197 18983 ASSERT(pktp != NULL);
19198 18984
19199 18985 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19200 18986 pktp->pkt_flags = 0;
19201 18987 un->un_tagflags = 0;
19202 18988 if (un->un_f_opt_queueing == TRUE) {
19203 18989 un->un_throttle = min(un->un_throttle, 3);
19204 18990 } else {
19205 18991 un->un_throttle = 1;
19206 18992 }
19207 18993 mutex_exit(SD_MUTEX(un));
19208 18994 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
19209 18995 mutex_enter(SD_MUTEX(un));
19210 18996
19211 18997 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19212 18998
19213 18999 /* Legacy behavior not to check retry counts here. */
19214 19000 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE),
19215 19001 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19216 19002 }
19217 19003
19218 19004
19219 19005 /*
19220 19006 * Function: sd_pkt_reason_default
19221 19007 *
19222 19008 * Description: Default recovery actions for SCSA pkt_reason values that
19223 19009 * do not have more explicit recovery actions.
19224 19010 *
19225 19011 * Context: May be called from interrupt context
19226 19012 */
19227 19013
19228 19014 static void
19229 19015 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19230 19016 struct scsi_pkt *pktp)
19231 19017 {
19232 19018 ASSERT(un != NULL);
19233 19019 ASSERT(mutex_owned(SD_MUTEX(un)));
19234 19020 ASSERT(bp != NULL);
19235 19021 ASSERT(xp != NULL);
19236 19022 ASSERT(pktp != NULL);
19237 19023
19238 19024 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19239 19025 sd_reset_target(un, pktp);
19240 19026
19241 19027 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19242 19028
19243 19029 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19244 19030 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19245 19031 }
19246 19032
19247 19033
19248 19034
19249 19035 /*
19250 19036 * Function: sd_pkt_status_check_condition
19251 19037 *
19252 19038 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status.
19253 19039 *
19254 19040 * Context: May be called from interrupt context
19255 19041 */
19256 19042
19257 19043 static void
19258 19044 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
19259 19045 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19260 19046 {
19261 19047 ASSERT(un != NULL);
19262 19048 ASSERT(mutex_owned(SD_MUTEX(un)));
19263 19049 ASSERT(bp != NULL);
19264 19050 ASSERT(xp != NULL);
19265 19051 ASSERT(pktp != NULL);
19266 19052
19267 19053 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: "
19268 19054 "entry: buf:0x%p xp:0x%p\n", bp, xp);
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
19269 19055
19270 19056 /*
19271 19057 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19272 19058 * command will be retried after the request sense). Otherwise, retry
19273 19059 * the command. Note: we are issuing the request sense even though the
19274 19060 * retry limit may have been reached for the failed command.
19275 19061 */
19276 19062 if (un->un_f_arq_enabled == FALSE) {
19277 19063 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19278 19064 "no ARQ, sending request sense command\n");
19279 - sd_send_request_sense_command(un, bp, pktp);
19065 + sd_send_request_sense_command(un, bp, SD_RETRIES_STANDARD,
19066 + pktp);
19280 19067 } else {
19281 19068 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19282 19069 "ARQ,retrying request sense command\n");
19283 -#if defined(__i386) || defined(__amd64)
19284 - /*
19285 - * The SD_RETRY_DELAY value need to be adjusted here
19286 - * when SD_RETRY_DELAY change in sddef.h
19287 - */
19288 19070 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19289 - un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
19071 + un->un_f_is_fibre ? drv_usectohz(100000) : (clock_t)0,
19290 19072 NULL);
19291 -#else
19292 - sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL,
19293 - EIO, SD_RETRY_DELAY, NULL);
19294 -#endif
19295 19073 }
19296 19074
19297 19075 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
19298 19076 }
19299 19077
19300 19078
19301 19079 /*
19302 19080 * Function: sd_pkt_status_busy
19303 19081 *
19304 19082 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
19305 19083 *
19306 19084 * Context: May be called from interrupt context
19307 19085 */
19308 19086
19309 19087 static void
19310 19088 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19311 19089 struct scsi_pkt *pktp)
19312 19090 {
19313 19091 ASSERT(un != NULL);
19314 19092 ASSERT(mutex_owned(SD_MUTEX(un)));
19315 19093 ASSERT(bp != NULL);
19316 19094 ASSERT(xp != NULL);
19317 19095 ASSERT(pktp != NULL);
19318 19096
19319 19097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19320 19098 "sd_pkt_status_busy: entry\n");
19321 19099
19322 19100 /* If retries are exhausted, just fail the command. */
19323 19101 if (xp->xb_retry_count >= un->un_busy_retry_count) {
19324 19102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
19325 19103 "device busy too long\n");
19326 19104 sd_return_failed_command(un, bp, EIO);
19327 19105 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19328 19106 "sd_pkt_status_busy: exit\n");
19329 19107 return;
19330 19108 }
19331 19109 xp->xb_retry_count++;
19332 19110
19333 19111 /*
19334 19112 * Try to reset the target. However, we do not want to perform
19335 19113 * more than one reset if the device continues to fail. The reset
19336 19114 * will be performed when the retry count reaches the reset
19337 19115 * threshold. This threshold should be set such that at least
19338 19116 * one retry is issued before the reset is performed.
19339 19117 */
19340 19118 if (xp->xb_retry_count ==
19341 19119 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) {
19342 19120 int rval = 0;
19343 19121 mutex_exit(SD_MUTEX(un));
19344 19122 if (un->un_f_allow_bus_device_reset == TRUE) {
19345 19123 /*
19346 19124 * First try to reset the LUN; if we cannot then
19347 19125 * try to reset the target.
19348 19126 */
19349 19127 if (un->un_f_lun_reset_enabled == TRUE) {
19350 19128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19351 19129 "sd_pkt_status_busy: RESET_LUN\n");
19352 19130 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19353 19131 }
19354 19132 if (rval == 0) {
19355 19133 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19356 19134 "sd_pkt_status_busy: RESET_TARGET\n");
19357 19135 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19358 19136 }
19359 19137 }
19360 19138 if (rval == 0) {
19361 19139 /*
19362 19140 * If the RESET_LUN and/or RESET_TARGET failed,
19363 19141 * try RESET_ALL
19364 19142 */
19365 19143 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19366 19144 "sd_pkt_status_busy: RESET_ALL\n");
19367 19145 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL);
19368 19146 }
19369 19147 mutex_enter(SD_MUTEX(un));
19370 19148 if (rval == 0) {
19371 19149 /*
19372 19150 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed.
19373 19151 * At this point we give up & fail the command.
19374 19152 */
19375 19153 sd_return_failed_command(un, bp, EIO);
19376 19154 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19377 19155 "sd_pkt_status_busy: exit (failed cmd)\n");
19378 19156 return;
19379 19157 }
19380 19158 }
19381 19159
19382 19160 /*
19383 19161 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as
19384 19162 * we have already checked the retry counts above.
19385 19163 */
19386 19164 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL,
19387 19165 EIO, un->un_busy_timeout, NULL);
19388 19166
19389 19167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19390 19168 "sd_pkt_status_busy: exit\n");
19391 19169 }
19392 19170
19393 19171
19394 19172 /*
19395 19173 * Function: sd_pkt_status_reservation_conflict
19396 19174 *
19397 19175 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI
19398 19176 * command status.
19399 19177 *
19400 19178 * Context: May be called from interrupt context
19401 19179 */
19402 19180
19403 19181 static void
19404 19182 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp,
19405 19183 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19406 19184 {
19407 19185 ASSERT(un != NULL);
19408 19186 ASSERT(mutex_owned(SD_MUTEX(un)));
19409 19187 ASSERT(bp != NULL);
19410 19188 ASSERT(xp != NULL);
19411 19189 ASSERT(pktp != NULL);
19412 19190
19413 19191 /*
19414 19192 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation
19415 19193 * conflict could be due to various reasons like incorrect keys, not
19416 19194 * registered or not reserved etc. So, we return EACCES to the caller.
19417 19195 */
19418 19196 if (un->un_reservation_type == SD_SCSI3_RESERVATION) {
19419 19197 int cmd = SD_GET_PKT_OPCODE(pktp);
19420 19198 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) ||
19421 19199 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) {
19422 19200 sd_return_failed_command(un, bp, EACCES);
19423 19201 return;
19424 19202 }
19425 19203 }
19426 19204
19427 19205 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
19428 19206
19429 19207 if ((un->un_resvd_status & SD_FAILFAST) != 0) {
19430 19208 if (sd_failfast_enable != 0) {
19431 19209 /* By definition, we must panic here.... */
19432 19210 sd_panic_for_res_conflict(un);
19433 19211 /*NOTREACHED*/
19434 19212 }
19435 19213 SD_ERROR(SD_LOG_IO, un,
19436 19214 "sd_handle_resv_conflict: Disk Reserved\n");
19437 19215 sd_return_failed_command(un, bp, EACCES);
19438 19216 return;
19439 19217 }
19440 19218
19441 19219 /*
19442 19220 * 1147670: retry only if sd_retry_on_reservation_conflict
19443 19221 * property is set (default is 1). Retries will not succeed
19444 19222 * on a disk reserved by another initiator. HA systems
19445 19223 * may reset this via sd.conf to avoid these retries.
19446 19224 *
19447 19225 * Note: The legacy return code for this failure is EIO, however EACCES
19448 19226 * seems more appropriate for a reservation conflict.
19449 19227 */
19450 19228 if (sd_retry_on_reservation_conflict == 0) {
19451 19229 SD_ERROR(SD_LOG_IO, un,
19452 19230 "sd_handle_resv_conflict: Device Reserved\n");
19453 19231 sd_return_failed_command(un, bp, EIO);
19454 19232 return;
19455 19233 }
19456 19234
19457 19235 /*
19458 19236 * Retry the command if we can.
19459 19237 *
19460 19238 * Note: The legacy return code for this failure is EIO, however EACCES
19461 19239 * seems more appropriate for a reservation conflict.
19462 19240 */
19463 19241 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19464 19242 (clock_t)2, NULL);
19465 19243 }
19466 19244
19467 19245
19468 19246
19469 19247 /*
19470 19248 * Function: sd_pkt_status_qfull
19471 19249 *
19472 19250 * Description: Handle a QUEUE FULL condition from the target. This can
19473 19251 * occur if the HBA does not handle the queue full condition.
19474 19252 * (Basically this means third-party HBAs as Sun HBAs will
19475 19253 * handle the queue full condition.) Note that if there are
19476 19254 * some commands already in the transport, then the queue full
19477 19255 * has occurred because the queue for this nexus is actually
19478 19256 * full. If there are no commands in the transport, then the
19479 19257 * queue full is resulting from some other initiator or lun
19480 19258 * consuming all the resources at the target.
19481 19259 *
19482 19260 * Context: May be called from interrupt context
19483 19261 */
19484 19262
19485 19263 static void
19486 19264 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19487 19265 struct scsi_pkt *pktp)
19488 19266 {
19489 19267 ASSERT(un != NULL);
19490 19268 ASSERT(mutex_owned(SD_MUTEX(un)));
19491 19269 ASSERT(bp != NULL);
19492 19270 ASSERT(xp != NULL);
19493 19271 ASSERT(pktp != NULL);
19494 19272
19495 19273 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19496 19274 "sd_pkt_status_qfull: entry\n");
19497 19275
19498 19276 /*
19499 19277 * Just lower the QFULL throttle and retry the command. Note that
19500 19278 * we do not limit the number of retries here.
19501 19279 */
19502 19280 sd_reduce_throttle(un, SD_THROTTLE_QFULL);
19503 19281 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0,
19504 19282 SD_RESTART_TIMEOUT, NULL);
19505 19283
19506 19284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19507 19285 "sd_pkt_status_qfull: exit\n");
19508 19286 }
19509 19287
19510 19288
19511 19289 /*
19512 19290 * Function: sd_reset_target
19513 19291 *
19514 19292 * Description: Issue a scsi_reset(9F), with either RESET_LUN,
19515 19293 * RESET_TARGET, or RESET_ALL.
19516 19294 *
19517 19295 * Context: May be called under interrupt context.
19518 19296 */
19519 19297
19520 19298 static void
19521 19299 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp)
19522 19300 {
19523 19301 int rval = 0;
19524 19302
19525 19303 ASSERT(un != NULL);
19526 19304 ASSERT(mutex_owned(SD_MUTEX(un)));
19527 19305 ASSERT(pktp != NULL);
19528 19306
19529 19307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n");
19530 19308
19531 19309 /*
19532 19310 * No need to reset if the transport layer has already done so.
19533 19311 */
19534 19312 if ((pktp->pkt_statistics &
19535 19313 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) {
19536 19314 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19537 19315 "sd_reset_target: no reset\n");
19538 19316 return;
19539 19317 }
19540 19318
19541 19319 mutex_exit(SD_MUTEX(un));
19542 19320
19543 19321 if (un->un_f_allow_bus_device_reset == TRUE) {
19544 19322 if (un->un_f_lun_reset_enabled == TRUE) {
19545 19323 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19546 19324 "sd_reset_target: RESET_LUN\n");
19547 19325 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19548 19326 }
19549 19327 if (rval == 0) {
19550 19328 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19551 19329 "sd_reset_target: RESET_TARGET\n");
19552 19330 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19553 19331 }
19554 19332 }
19555 19333
19556 19334 if (rval == 0) {
19557 19335 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19558 19336 "sd_reset_target: RESET_ALL\n");
19559 19337 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
19560 19338 }
19561 19339
19562 19340 mutex_enter(SD_MUTEX(un));
19563 19341
19564 19342 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n");
19565 19343 }
19566 19344
19567 19345 /*
19568 19346 * Function: sd_target_change_task
19569 19347 *
19570 19348 * Description: Handle dynamic target change
19571 19349 *
19572 19350 * Context: Executes in a taskq() thread context
19573 19351 */
19574 19352 static void
19575 19353 sd_target_change_task(void *arg)
19576 19354 {
19577 19355 struct sd_lun *un = arg;
19578 19356 uint64_t capacity;
19579 19357 diskaddr_t label_cap;
19580 19358 uint_t lbasize;
19581 19359 sd_ssc_t *ssc;
19582 19360
19583 19361 ASSERT(un != NULL);
19584 19362 ASSERT(!mutex_owned(SD_MUTEX(un)));
19585 19363
19586 19364 if ((un->un_f_blockcount_is_valid == FALSE) ||
19587 19365 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
19588 19366 return;
19589 19367 }
19590 19368
19591 19369 ssc = sd_ssc_init(un);
19592 19370
19593 19371 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity,
19594 19372 &lbasize, SD_PATH_DIRECT) != 0) {
19595 19373 SD_ERROR(SD_LOG_ERROR, un,
19596 19374 "sd_target_change_task: fail to read capacity\n");
19597 19375 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19598 19376 goto task_exit;
19599 19377 }
19600 19378
19601 19379 mutex_enter(SD_MUTEX(un));
19602 19380 if (capacity <= un->un_blockcount) {
19603 19381 mutex_exit(SD_MUTEX(un));
19604 19382 goto task_exit;
19605 19383 }
19606 19384
19607 19385 sd_update_block_info(un, lbasize, capacity);
19608 19386 mutex_exit(SD_MUTEX(un));
19609 19387
19610 19388 /*
19611 19389 * If lun is EFI labeled and lun capacity is greater than the
19612 19390 * capacity contained in the label, log a sys event.
19613 19391 */
19614 19392 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
19615 19393 (void*)SD_PATH_DIRECT) == 0) {
19616 19394 mutex_enter(SD_MUTEX(un));
19617 19395 if (un->un_f_blockcount_is_valid &&
19618 19396 un->un_blockcount > label_cap) {
19619 19397 mutex_exit(SD_MUTEX(un));
19620 19398 sd_log_lun_expansion_event(un, KM_SLEEP);
19621 19399 } else {
19622 19400 mutex_exit(SD_MUTEX(un));
19623 19401 }
19624 19402 }
19625 19403
19626 19404 task_exit:
19627 19405 sd_ssc_fini(ssc);
19628 19406 }
19629 19407
19630 19408
19631 19409 /*
19632 19410 * Function: sd_log_dev_status_event
19633 19411 *
19634 19412 * Description: Log EC_dev_status sysevent
19635 19413 *
19636 19414 * Context: Never called from interrupt context
19637 19415 */
19638 19416 static void
19639 19417 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag)
19640 19418 {
19641 19419 int err;
19642 19420 char *path;
19643 19421 nvlist_t *attr_list;
19644 19422
19645 19423 /* Allocate and build sysevent attribute list */
19646 19424 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag);
19647 19425 if (err != 0) {
19648 19426 SD_ERROR(SD_LOG_ERROR, un,
19649 19427 "sd_log_dev_status_event: fail to allocate space\n");
19650 19428 return;
19651 19429 }
19652 19430
19653 19431 path = kmem_alloc(MAXPATHLEN, km_flag);
19654 19432 if (path == NULL) {
19655 19433 nvlist_free(attr_list);
19656 19434 SD_ERROR(SD_LOG_ERROR, un,
19657 19435 "sd_log_dev_status_event: fail to allocate space\n");
19658 19436 return;
19659 19437 }
19660 19438 /*
19661 19439 * Add path attribute to identify the lun.
19662 19440 * We are using minor node 'a' as the sysevent attribute.
19663 19441 */
19664 19442 (void) snprintf(path, MAXPATHLEN, "/devices");
19665 19443 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path));
19666 19444 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path),
19667 19445 ":a");
19668 19446
19669 19447 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path);
19670 19448 if (err != 0) {
19671 19449 nvlist_free(attr_list);
19672 19450 kmem_free(path, MAXPATHLEN);
19673 19451 SD_ERROR(SD_LOG_ERROR, un,
19674 19452 "sd_log_dev_status_event: fail to add attribute\n");
19675 19453 return;
19676 19454 }
19677 19455
19678 19456 /* Log dynamic lun expansion sysevent */
19679 19457 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS,
19680 19458 esc, attr_list, NULL, km_flag);
19681 19459 if (err != DDI_SUCCESS) {
19682 19460 SD_ERROR(SD_LOG_ERROR, un,
19683 19461 "sd_log_dev_status_event: fail to log sysevent\n");
19684 19462 }
19685 19463
19686 19464 nvlist_free(attr_list);
19687 19465 kmem_free(path, MAXPATHLEN);
19688 19466 }
19689 19467
19690 19468
19691 19469 /*
19692 19470 * Function: sd_log_lun_expansion_event
19693 19471 *
19694 19472 * Description: Log lun expansion sys event
19695 19473 *
19696 19474 * Context: Never called from interrupt context
19697 19475 */
19698 19476 static void
19699 19477 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag)
19700 19478 {
19701 19479 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag);
19702 19480 }
19703 19481
19704 19482
19705 19483 /*
19706 19484 * Function: sd_log_eject_request_event
19707 19485 *
19708 19486 * Description: Log eject request sysevent
19709 19487 *
19710 19488 * Context: Never called from interrupt context
19711 19489 */
19712 19490 static void
19713 19491 sd_log_eject_request_event(struct sd_lun *un, int km_flag)
19714 19492 {
19715 19493 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag);
19716 19494 }
19717 19495
19718 19496
19719 19497 /*
19720 19498 * Function: sd_media_change_task
19721 19499 *
19722 19500 * Description: Recovery action for CDROM to become available.
19723 19501 *
19724 19502 * Context: Executes in a taskq() thread context
19725 19503 */
19726 19504
19727 19505 static void
19728 19506 sd_media_change_task(void *arg)
19729 19507 {
19730 19508 struct scsi_pkt *pktp = arg;
19731 19509 struct sd_lun *un;
19732 19510 struct buf *bp;
19733 19511 struct sd_xbuf *xp;
19734 19512 int err = 0;
19735 19513 int retry_count = 0;
19736 19514 int retry_limit = SD_UNIT_ATTENTION_RETRY/10;
19737 19515 struct sd_sense_info si;
19738 19516
19739 19517 ASSERT(pktp != NULL);
19740 19518 bp = (struct buf *)pktp->pkt_private;
19741 19519 ASSERT(bp != NULL);
19742 19520 xp = SD_GET_XBUF(bp);
19743 19521 ASSERT(xp != NULL);
19744 19522 un = SD_GET_UN(bp);
19745 19523 ASSERT(un != NULL);
19746 19524 ASSERT(!mutex_owned(SD_MUTEX(un)));
19747 19525 ASSERT(un->un_f_monitor_media_state);
19748 19526
19749 19527 si.ssi_severity = SCSI_ERR_INFO;
19750 19528 si.ssi_pfa_flag = FALSE;
19751 19529
19752 19530 /*
19753 19531 * When a reset is issued on a CDROM, it takes a long time to
19754 19532 * recover. First few attempts to read capacity and other things
19755 19533 * related to handling unit attention fail (with a ASC 0x4 and
19756 19534 * ASCQ 0x1). In that case we want to do enough retries and we want
19757 19535 * to limit the retries in other cases of genuine failures like
19758 19536 * no media in drive.
19759 19537 */
19760 19538 while (retry_count++ < retry_limit) {
19761 19539 if ((err = sd_handle_mchange(un)) == 0) {
19762 19540 break;
19763 19541 }
19764 19542 if (err == EAGAIN) {
19765 19543 retry_limit = SD_UNIT_ATTENTION_RETRY;
19766 19544 }
19767 19545 /* Sleep for 0.5 sec. & try again */
19768 19546 delay(drv_usectohz(500000));
19769 19547 }
19770 19548
19771 19549 /*
19772 19550 * Dispatch (retry or fail) the original command here,
19773 19551 * along with appropriate console messages....
19774 19552 *
19775 19553 * Must grab the mutex before calling sd_retry_command,
19776 19554 * sd_print_sense_msg and sd_return_failed_command.
19777 19555 */
19778 19556 mutex_enter(SD_MUTEX(un));
19779 19557 if (err != SD_CMD_SUCCESS) {
19780 19558 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19781 19559 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
19782 19560 si.ssi_severity = SCSI_ERR_FATAL;
19783 19561 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
19784 19562 sd_return_failed_command(un, bp, EIO);
19785 19563 } else {
19786 19564 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg,
19787 19565 &si, EIO, (clock_t)0, NULL);
19788 19566 }
19789 19567 mutex_exit(SD_MUTEX(un));
19790 19568 }
19791 19569
19792 19570
19793 19571
19794 19572 /*
19795 19573 * Function: sd_handle_mchange
19796 19574 *
19797 19575 * Description: Perform geometry validation & other recovery when CDROM
19798 19576 * has been removed from drive.
19799 19577 *
19800 19578 * Return Code: 0 for success
19801 19579 * errno-type return code of either sd_send_scsi_DOORLOCK() or
19802 19580 * sd_send_scsi_READ_CAPACITY()
19803 19581 *
19804 19582 * Context: Executes in a taskq() thread context
19805 19583 */
19806 19584
19807 19585 static int
19808 19586 sd_handle_mchange(struct sd_lun *un)
19809 19587 {
19810 19588 uint64_t capacity;
19811 19589 uint32_t lbasize;
19812 19590 int rval;
19813 19591 sd_ssc_t *ssc;
19814 19592
19815 19593 ASSERT(!mutex_owned(SD_MUTEX(un)));
19816 19594 ASSERT(un->un_f_monitor_media_state);
19817 19595
19818 19596 ssc = sd_ssc_init(un);
19819 19597 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
19820 19598 SD_PATH_DIRECT_PRIORITY);
19821 19599
19822 19600 if (rval != 0)
19823 19601 goto failed;
19824 19602
19825 19603 mutex_enter(SD_MUTEX(un));
19826 19604 sd_update_block_info(un, lbasize, capacity);
19827 19605
19828 19606 if (un->un_errstats != NULL) {
19829 19607 struct sd_errstats *stp =
19830 19608 (struct sd_errstats *)un->un_errstats->ks_data;
19831 19609 stp->sd_capacity.value.ui64 = (uint64_t)
19832 19610 ((uint64_t)un->un_blockcount *
19833 19611 (uint64_t)un->un_tgt_blocksize);
19834 19612 }
19835 19613
19836 19614 /*
19837 19615 * Check if the media in the device is writable or not
19838 19616 */
19839 19617 if (ISCD(un)) {
19840 19618 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY);
19841 19619 }
19842 19620
19843 19621 /*
19844 19622 * Note: Maybe let the strategy/partitioning chain worry about getting
19845 19623 * valid geometry.
19846 19624 */
19847 19625 mutex_exit(SD_MUTEX(un));
19848 19626 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
19849 19627
19850 19628
19851 19629 if (cmlb_validate(un->un_cmlbhandle, 0,
19852 19630 (void *)SD_PATH_DIRECT_PRIORITY) != 0) {
19853 19631 sd_ssc_fini(ssc);
19854 19632 return (EIO);
19855 19633 } else {
19856 19634 if (un->un_f_pkstats_enabled) {
19857 19635 sd_set_pstats(un);
19858 19636 SD_TRACE(SD_LOG_IO_PARTITION, un,
19859 19637 "sd_handle_mchange: un:0x%p pstats created and "
19860 19638 "set\n", un);
19861 19639 }
19862 19640 }
19863 19641
19864 19642 /*
19865 19643 * Try to lock the door
19866 19644 */
19867 19645 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
19868 19646 SD_PATH_DIRECT_PRIORITY);
19869 19647 failed:
19870 19648 if (rval != 0)
19871 19649 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19872 19650 sd_ssc_fini(ssc);
19873 19651 return (rval);
19874 19652 }
19875 19653
19876 19654
19877 19655 /*
19878 19656 * Function: sd_send_scsi_DOORLOCK
19879 19657 *
19880 19658 * Description: Issue the scsi DOOR LOCK command
19881 19659 *
19882 19660 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
19883 19661 * structure for this target.
19884 19662 * flag - SD_REMOVAL_ALLOW
19885 19663 * SD_REMOVAL_PREVENT
19886 19664 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19887 19665 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19888 19666 * to use the USCSI "direct" chain and bypass the normal
19889 19667 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19890 19668 * command is issued as part of an error recovery action.
19891 19669 *
19892 19670 * Return Code: 0 - Success
19893 19671 * errno return code from sd_ssc_send()
19894 19672 *
19895 19673 * Context: Can sleep.
19896 19674 */
19897 19675
19898 19676 static int
19899 19677 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag)
19900 19678 {
19901 19679 struct scsi_extended_sense sense_buf;
19902 19680 union scsi_cdb cdb;
19903 19681 struct uscsi_cmd ucmd_buf;
19904 19682 int status;
19905 19683 struct sd_lun *un;
19906 19684
19907 19685 ASSERT(ssc != NULL);
19908 19686 un = ssc->ssc_un;
19909 19687 ASSERT(un != NULL);
19910 19688 ASSERT(!mutex_owned(SD_MUTEX(un)));
19911 19689
19912 19690 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un);
19913 19691
19914 19692 /* already determined doorlock is not supported, fake success */
19915 19693 if (un->un_f_doorlock_supported == FALSE) {
19916 19694 return (0);
19917 19695 }
19918 19696
19919 19697 /*
19920 19698 * If we are ejecting and see an SD_REMOVAL_PREVENT
19921 19699 * ignore the command so we can complete the eject
19922 19700 * operation.
19923 19701 */
19924 19702 if (flag == SD_REMOVAL_PREVENT) {
19925 19703 mutex_enter(SD_MUTEX(un));
19926 19704 if (un->un_f_ejecting == TRUE) {
19927 19705 mutex_exit(SD_MUTEX(un));
19928 19706 return (EAGAIN);
19929 19707 }
19930 19708 mutex_exit(SD_MUTEX(un));
19931 19709 }
19932 19710
19933 19711 bzero(&cdb, sizeof (cdb));
19934 19712 bzero(&ucmd_buf, sizeof (ucmd_buf));
19935 19713
19936 19714 cdb.scc_cmd = SCMD_DOORLOCK;
19937 19715 cdb.cdb_opaque[4] = (uchar_t)flag;
19938 19716
19939 19717 ucmd_buf.uscsi_cdb = (char *)&cdb;
19940 19718 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
19941 19719 ucmd_buf.uscsi_bufaddr = NULL;
19942 19720 ucmd_buf.uscsi_buflen = 0;
19943 19721 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
19944 19722 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
19945 19723 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
19946 19724 ucmd_buf.uscsi_timeout = 15;
19947 19725
19948 19726 SD_TRACE(SD_LOG_IO, un,
19949 19727 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
19950 19728
19951 19729 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
19952 19730 UIO_SYSSPACE, path_flag);
19953 19731
19954 19732 if (status == 0)
19955 19733 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
19956 19734
19957 19735 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) &&
19958 19736 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
19959 19737 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) {
19960 19738 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19961 19739
19962 19740 /* fake success and skip subsequent doorlock commands */
19963 19741 un->un_f_doorlock_supported = FALSE;
19964 19742 return (0);
19965 19743 }
19966 19744
19967 19745 return (status);
19968 19746 }
19969 19747
19970 19748 /*
19971 19749 * Function: sd_send_scsi_READ_CAPACITY
19972 19750 *
19973 19751 * Description: This routine uses the scsi READ CAPACITY command to determine
19974 19752 * the device capacity in number of blocks and the device native
19975 19753 * block size. If this function returns a failure, then the
19976 19754 * values in *capp and *lbap are undefined. If the capacity
19977 19755 * returned is 0xffffffff then the lun is too large for a
19978 19756 * normal READ CAPACITY command and the results of a
19979 19757 * READ CAPACITY 16 will be used instead.
19980 19758 *
19981 19759 * Arguments: ssc - ssc contains ptr to soft state struct for the target
19982 19760 * capp - ptr to unsigned 64-bit variable to receive the
19983 19761 * capacity value from the command.
19984 19762 * lbap - ptr to unsigned 32-bit varaible to receive the
19985 19763 * block size value from the command
19986 19764 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19987 19765 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19988 19766 * to use the USCSI "direct" chain and bypass the normal
19989 19767 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19990 19768 * command is issued as part of an error recovery action.
19991 19769 *
19992 19770 * Return Code: 0 - Success
19993 19771 * EIO - IO error
19994 19772 * EACCES - Reservation conflict detected
19995 19773 * EAGAIN - Device is becoming ready
19996 19774 * errno return code from sd_ssc_send()
19997 19775 *
19998 19776 * Context: Can sleep. Blocks until command completes.
19999 19777 */
20000 19778
20001 19779 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
20002 19780
20003 19781 static int
20004 19782 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20005 19783 int path_flag)
20006 19784 {
20007 19785 struct scsi_extended_sense sense_buf;
20008 19786 struct uscsi_cmd ucmd_buf;
20009 19787 union scsi_cdb cdb;
20010 19788 uint32_t *capacity_buf;
20011 19789 uint64_t capacity;
20012 19790 uint32_t lbasize;
20013 19791 uint32_t pbsize;
20014 19792 int status;
20015 19793 struct sd_lun *un;
20016 19794
20017 19795 ASSERT(ssc != NULL);
20018 19796
20019 19797 un = ssc->ssc_un;
20020 19798 ASSERT(un != NULL);
20021 19799 ASSERT(!mutex_owned(SD_MUTEX(un)));
20022 19800 ASSERT(capp != NULL);
20023 19801 ASSERT(lbap != NULL);
20024 19802
20025 19803 SD_TRACE(SD_LOG_IO, un,
20026 19804 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20027 19805
20028 19806 /*
20029 19807 * First send a READ_CAPACITY command to the target.
20030 19808 * (This command is mandatory under SCSI-2.)
20031 19809 *
20032 19810 * Set up the CDB for the READ_CAPACITY command. The Partial
20033 19811 * Medium Indicator bit is cleared. The address field must be
20034 19812 * zero if the PMI bit is zero.
20035 19813 */
20036 19814 bzero(&cdb, sizeof (cdb));
20037 19815 bzero(&ucmd_buf, sizeof (ucmd_buf));
20038 19816
20039 19817 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP);
|
↓ open down ↓ |
735 lines elided |
↑ open up ↑ |
20040 19818
20041 19819 cdb.scc_cmd = SCMD_READ_CAPACITY;
20042 19820
20043 19821 ucmd_buf.uscsi_cdb = (char *)&cdb;
20044 19822 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20045 19823 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf;
20046 19824 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE;
20047 19825 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20048 19826 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20049 19827 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20050 - ucmd_buf.uscsi_timeout = 60;
19828 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
20051 19829
20052 19830 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20053 19831 UIO_SYSSPACE, path_flag);
20054 19832
20055 19833 switch (status) {
20056 19834 case 0:
20057 19835 /* Return failure if we did not get valid capacity data. */
20058 19836 if (ucmd_buf.uscsi_resid != 0) {
20059 19837 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20060 19838 "sd_send_scsi_READ_CAPACITY received invalid "
20061 19839 "capacity data");
20062 19840 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20063 19841 return (EIO);
20064 19842 }
20065 19843 /*
20066 19844 * Read capacity and block size from the READ CAPACITY 10 data.
20067 19845 * This data may be adjusted later due to device specific
20068 19846 * issues.
20069 19847 *
20070 19848 * According to the SCSI spec, the READ CAPACITY 10
20071 19849 * command returns the following:
20072 19850 *
20073 19851 * bytes 0-3: Maximum logical block address available.
20074 19852 * (MSB in byte:0 & LSB in byte:3)
20075 19853 *
20076 19854 * bytes 4-7: Block length in bytes
20077 19855 * (MSB in byte:4 & LSB in byte:7)
20078 19856 *
20079 19857 */
20080 19858 capacity = BE_32(capacity_buf[0]);
20081 19859 lbasize = BE_32(capacity_buf[1]);
20082 19860
20083 19861 /*
20084 19862 * Done with capacity_buf
20085 19863 */
20086 19864 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20087 19865
20088 19866 /*
20089 19867 * if the reported capacity is set to all 0xf's, then
20090 19868 * this disk is too large and requires SBC-2 commands.
20091 19869 * Reissue the request using READ CAPACITY 16.
20092 19870 */
20093 19871 if (capacity == 0xffffffff) {
20094 19872 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20095 19873 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity,
20096 19874 &lbasize, &pbsize, path_flag);
20097 19875 if (status != 0) {
20098 19876 return (status);
20099 19877 } else {
20100 19878 goto rc16_done;
20101 19879 }
20102 19880 }
20103 19881 break; /* Success! */
20104 19882 case EIO:
20105 19883 switch (ucmd_buf.uscsi_status) {
20106 19884 case STATUS_RESERVATION_CONFLICT:
20107 19885 status = EACCES;
20108 19886 break;
20109 19887 case STATUS_CHECK:
20110 19888 /*
20111 19889 * Check condition; look for ASC/ASCQ of 0x04/0x01
20112 19890 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20113 19891 */
20114 19892 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20115 19893 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20116 19894 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20117 19895 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20118 19896 return (EAGAIN);
20119 19897 }
20120 19898 break;
20121 19899 default:
20122 19900 break;
20123 19901 }
20124 19902 /* FALLTHRU */
20125 19903 default:
20126 19904 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20127 19905 return (status);
20128 19906 }
20129 19907
20130 19908 /*
20131 19909 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20132 19910 * (2352 and 0 are common) so for these devices always force the value
20133 19911 * to 2048 as required by the ATAPI specs.
20134 19912 */
20135 19913 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20136 19914 lbasize = 2048;
20137 19915 }
20138 19916
20139 19917 /*
20140 19918 * Get the maximum LBA value from the READ CAPACITY data.
20141 19919 * Here we assume that the Partial Medium Indicator (PMI) bit
20142 19920 * was cleared when issuing the command. This means that the LBA
20143 19921 * returned from the device is the LBA of the last logical block
20144 19922 * on the logical unit. The actual logical block count will be
20145 19923 * this value plus one.
20146 19924 */
20147 19925 capacity += 1;
20148 19926
20149 19927 /*
20150 19928 * Currently, for removable media, the capacity is saved in terms
20151 19929 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20152 19930 */
20153 19931 if (un->un_f_has_removable_media)
20154 19932 capacity *= (lbasize / un->un_sys_blocksize);
20155 19933
20156 19934 rc16_done:
20157 19935
20158 19936 /*
20159 19937 * Copy the values from the READ CAPACITY command into the space
20160 19938 * provided by the caller.
20161 19939 */
20162 19940 *capp = capacity;
20163 19941 *lbap = lbasize;
20164 19942
20165 19943 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: "
20166 19944 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize);
20167 19945
20168 19946 /*
20169 19947 * Both the lbasize and capacity from the device must be nonzero,
20170 19948 * otherwise we assume that the values are not valid and return
20171 19949 * failure to the caller. (4203735)
20172 19950 */
20173 19951 if ((capacity == 0) || (lbasize == 0)) {
20174 19952 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20175 19953 "sd_send_scsi_READ_CAPACITY received invalid value "
20176 19954 "capacity %llu lbasize %d", capacity, lbasize);
20177 19955 return (EIO);
20178 19956 }
20179 19957 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20180 19958 return (0);
20181 19959 }
20182 19960
20183 19961 /*
20184 19962 * Function: sd_send_scsi_READ_CAPACITY_16
20185 19963 *
20186 19964 * Description: This routine uses the scsi READ CAPACITY 16 command to
20187 19965 * determine the device capacity in number of blocks and the
20188 19966 * device native block size. If this function returns a failure,
20189 19967 * then the values in *capp and *lbap are undefined.
20190 19968 * This routine should be called by sd_send_scsi_READ_CAPACITY
20191 19969 * which will apply any device specific adjustments to capacity
20192 19970 * and lbasize. One exception is it is also called by
20193 19971 * sd_get_media_info_ext. In that function, there is no need to
20194 19972 * adjust the capacity and lbasize.
20195 19973 *
20196 19974 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20197 19975 * capp - ptr to unsigned 64-bit variable to receive the
20198 19976 * capacity value from the command.
20199 19977 * lbap - ptr to unsigned 32-bit varaible to receive the
20200 19978 * block size value from the command
20201 19979 * psp - ptr to unsigned 32-bit variable to receive the
20202 19980 * physical block size value from the command
20203 19981 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20204 19982 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20205 19983 * to use the USCSI "direct" chain and bypass the normal
20206 19984 * command waitq. SD_PATH_DIRECT_PRIORITY is used when
20207 19985 * this command is issued as part of an error recovery
20208 19986 * action.
20209 19987 *
20210 19988 * Return Code: 0 - Success
20211 19989 * EIO - IO error
20212 19990 * EACCES - Reservation conflict detected
20213 19991 * EAGAIN - Device is becoming ready
20214 19992 * errno return code from sd_ssc_send()
20215 19993 *
20216 19994 * Context: Can sleep. Blocks until command completes.
20217 19995 */
20218 19996
20219 19997 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
20220 19998
20221 19999 static int
20222 20000 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20223 20001 uint32_t *psp, int path_flag)
20224 20002 {
20225 20003 struct scsi_extended_sense sense_buf;
20226 20004 struct uscsi_cmd ucmd_buf;
20227 20005 union scsi_cdb cdb;
20228 20006 uint64_t *capacity16_buf;
20229 20007 uint64_t capacity;
20230 20008 uint32_t lbasize;
20231 20009 uint32_t pbsize;
20232 20010 uint32_t lbpb_exp;
20233 20011 int status;
20234 20012 struct sd_lun *un;
20235 20013
20236 20014 ASSERT(ssc != NULL);
20237 20015
20238 20016 un = ssc->ssc_un;
20239 20017 ASSERT(un != NULL);
20240 20018 ASSERT(!mutex_owned(SD_MUTEX(un)));
20241 20019 ASSERT(capp != NULL);
20242 20020 ASSERT(lbap != NULL);
20243 20021
20244 20022 SD_TRACE(SD_LOG_IO, un,
20245 20023 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20246 20024
20247 20025 /*
20248 20026 * First send a READ_CAPACITY_16 command to the target.
20249 20027 *
20250 20028 * Set up the CDB for the READ_CAPACITY_16 command. The Partial
20251 20029 * Medium Indicator bit is cleared. The address field must be
20252 20030 * zero if the PMI bit is zero.
20253 20031 */
20254 20032 bzero(&cdb, sizeof (cdb));
20255 20033 bzero(&ucmd_buf, sizeof (ucmd_buf));
|
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
20256 20034
20257 20035 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
20258 20036
20259 20037 ucmd_buf.uscsi_cdb = (char *)&cdb;
20260 20038 ucmd_buf.uscsi_cdblen = CDB_GROUP4;
20261 20039 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf;
20262 20040 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE;
20263 20041 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20264 20042 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20265 20043 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20266 - ucmd_buf.uscsi_timeout = 60;
20044 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
20267 20045
20268 20046 /*
20269 20047 * Read Capacity (16) is a Service Action In command. One
20270 20048 * command byte (0x9E) is overloaded for multiple operations,
20271 20049 * with the second CDB byte specifying the desired operation
20272 20050 */
20273 20051 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
20274 20052 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
20275 20053
20276 20054 /*
20277 20055 * Fill in allocation length field
20278 20056 */
20279 20057 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen);
20280 20058
20281 20059 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20282 20060 UIO_SYSSPACE, path_flag);
20283 20061
20284 20062 switch (status) {
20285 20063 case 0:
20286 20064 /* Return failure if we did not get valid capacity data. */
20287 20065 if (ucmd_buf.uscsi_resid > 20) {
20288 20066 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20289 20067 "sd_send_scsi_READ_CAPACITY_16 received invalid "
20290 20068 "capacity data");
20291 20069 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20292 20070 return (EIO);
20293 20071 }
20294 20072
20295 20073 /*
20296 20074 * Read capacity and block size from the READ CAPACITY 16 data.
20297 20075 * This data may be adjusted later due to device specific
20298 20076 * issues.
20299 20077 *
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
20300 20078 * According to the SCSI spec, the READ CAPACITY 16
20301 20079 * command returns the following:
20302 20080 *
20303 20081 * bytes 0-7: Maximum logical block address available.
20304 20082 * (MSB in byte:0 & LSB in byte:7)
20305 20083 *
20306 20084 * bytes 8-11: Block length in bytes
20307 20085 * (MSB in byte:8 & LSB in byte:11)
20308 20086 *
20309 20087 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
20088 + *
20089 + * byte 14:
20090 + * bit 7: Thin-Provisioning Enabled
20091 + * bit 6: Thin-Provisioning Read Zeros
20310 20092 */
20311 20093 capacity = BE_64(capacity16_buf[0]);
20312 20094 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
20313 20095 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
20314 20096
20097 + un->un_thin_flags = 0;
20098 + if (((uint8_t *)capacity16_buf)[14] & (1 << 7))
20099 + un->un_thin_flags |= SD_THIN_PROV_ENABLED;
20100 + if (((uint8_t *)capacity16_buf)[14] & (1 << 6))
20101 + un->un_thin_flags |= SD_THIN_PROV_READ_ZEROS;
20102 +
20315 20103 pbsize = lbasize << lbpb_exp;
20316 20104
20317 20105 /*
20318 20106 * Done with capacity16_buf
20319 20107 */
20320 20108 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20321 20109
20322 20110 /*
20323 20111 * if the reported capacity is set to all 0xf's, then
20324 20112 * this disk is too large. This could only happen with
20325 20113 * a device that supports LBAs larger than 64 bits which
20326 20114 * are not defined by any current T10 standards.
20327 20115 */
20328 20116 if (capacity == 0xffffffffffffffff) {
20329 20117 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20330 20118 "disk is too large");
20331 20119 return (EIO);
20332 20120 }
20333 20121 break; /* Success! */
20334 20122 case EIO:
20335 20123 switch (ucmd_buf.uscsi_status) {
20336 20124 case STATUS_RESERVATION_CONFLICT:
20337 20125 status = EACCES;
20338 20126 break;
20339 20127 case STATUS_CHECK:
20340 20128 /*
20341 20129 * Check condition; look for ASC/ASCQ of 0x04/0x01
20342 20130 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20343 20131 */
20344 20132 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20345 20133 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20346 20134 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20347 20135 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20348 20136 return (EAGAIN);
20349 20137 }
20350 20138 break;
20351 20139 default:
20352 20140 break;
20353 20141 }
20354 20142 /* FALLTHRU */
20355 20143 default:
20356 20144 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20357 20145 return (status);
20358 20146 }
20359 20147
20360 20148 /*
20361 20149 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20362 20150 * (2352 and 0 are common) so for these devices always force the value
20363 20151 * to 2048 as required by the ATAPI specs.
20364 20152 */
20365 20153 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20366 20154 lbasize = 2048;
20367 20155 }
20368 20156
20369 20157 /*
20370 20158 * Get the maximum LBA value from the READ CAPACITY 16 data.
20371 20159 * Here we assume that the Partial Medium Indicator (PMI) bit
20372 20160 * was cleared when issuing the command. This means that the LBA
20373 20161 * returned from the device is the LBA of the last logical block
20374 20162 * on the logical unit. The actual logical block count will be
20375 20163 * this value plus one.
20376 20164 */
20377 20165 capacity += 1;
20378 20166
20379 20167 /*
20380 20168 * Currently, for removable media, the capacity is saved in terms
20381 20169 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20382 20170 */
20383 20171 if (un->un_f_has_removable_media)
20384 20172 capacity *= (lbasize / un->un_sys_blocksize);
20385 20173
20386 20174 *capp = capacity;
20387 20175 *lbap = lbasize;
20388 20176 *psp = pbsize;
20389 20177
20390 20178 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: "
20391 20179 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
20392 20180 capacity, lbasize, pbsize);
20393 20181
20394 20182 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) {
20395 20183 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20396 20184 "sd_send_scsi_READ_CAPACITY_16 received invalid value "
20397 20185 "capacity %llu lbasize %d pbsize %d", capacity, lbasize);
20398 20186 return (EIO);
20399 20187 }
20400 20188
20401 20189 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20402 20190 return (0);
20403 20191 }
20404 20192
20405 20193
20406 20194 /*
20407 20195 * Function: sd_send_scsi_START_STOP_UNIT
20408 20196 *
20409 20197 * Description: Issue a scsi START STOP UNIT command to the target.
20410 20198 *
20411 20199 * Arguments: ssc - ssc contatins pointer to driver soft state (unit)
20412 20200 * structure for this target.
20413 20201 * pc_flag - SD_POWER_CONDITION
20414 20202 * SD_START_STOP
20415 20203 * flag - SD_TARGET_START
20416 20204 * SD_TARGET_STOP
20417 20205 * SD_TARGET_EJECT
20418 20206 * SD_TARGET_CLOSE
20419 20207 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20420 20208 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20421 20209 * to use the USCSI "direct" chain and bypass the normal
20422 20210 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20423 20211 * command is issued as part of an error recovery action.
20424 20212 *
20425 20213 * Return Code: 0 - Success
20426 20214 * EIO - IO error
20427 20215 * EACCES - Reservation conflict detected
20428 20216 * ENXIO - Not Ready, medium not present
20429 20217 * errno return code from sd_ssc_send()
20430 20218 *
20431 20219 * Context: Can sleep.
20432 20220 */
20433 20221
20434 20222 static int
20435 20223 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag,
20436 20224 int path_flag)
20437 20225 {
20438 20226 struct scsi_extended_sense sense_buf;
20439 20227 union scsi_cdb cdb;
20440 20228 struct uscsi_cmd ucmd_buf;
20441 20229 int status;
20442 20230 struct sd_lun *un;
20443 20231
20444 20232 ASSERT(ssc != NULL);
20445 20233 un = ssc->ssc_un;
20446 20234 ASSERT(un != NULL);
20447 20235 ASSERT(!mutex_owned(SD_MUTEX(un)));
20448 20236
20449 20237 SD_TRACE(SD_LOG_IO, un,
20450 20238 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un);
20451 20239
20452 20240 if (un->un_f_check_start_stop &&
20453 20241 (pc_flag == SD_START_STOP) &&
20454 20242 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) &&
20455 20243 (un->un_f_start_stop_supported != TRUE)) {
20456 20244 return (0);
20457 20245 }
20458 20246
20459 20247 /*
20460 20248 * If we are performing an eject operation and
20461 20249 * we receive any command other than SD_TARGET_EJECT
20462 20250 * we should immediately return.
20463 20251 */
20464 20252 if (flag != SD_TARGET_EJECT) {
20465 20253 mutex_enter(SD_MUTEX(un));
20466 20254 if (un->un_f_ejecting == TRUE) {
20467 20255 mutex_exit(SD_MUTEX(un));
20468 20256 return (EAGAIN);
20469 20257 }
20470 20258 mutex_exit(SD_MUTEX(un));
20471 20259 }
20472 20260
20473 20261 bzero(&cdb, sizeof (cdb));
20474 20262 bzero(&ucmd_buf, sizeof (ucmd_buf));
20475 20263 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20476 20264
20477 20265 cdb.scc_cmd = SCMD_START_STOP;
|
↓ open down ↓ |
153 lines elided |
↑ open up ↑ |
20478 20266 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
20479 20267 (uchar_t)(flag << 4) : (uchar_t)flag;
20480 20268
20481 20269 ucmd_buf.uscsi_cdb = (char *)&cdb;
20482 20270 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20483 20271 ucmd_buf.uscsi_bufaddr = NULL;
20484 20272 ucmd_buf.uscsi_buflen = 0;
20485 20273 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20486 20274 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20487 20275 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20488 - ucmd_buf.uscsi_timeout = 200;
20276 + ucmd_buf.uscsi_timeout = 3 * un->un_uscsi_timeout;
20489 20277
20490 20278 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20491 20279 UIO_SYSSPACE, path_flag);
20492 20280
20493 20281 switch (status) {
20494 20282 case 0:
20495 20283 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20496 20284 break; /* Success! */
20497 20285 case EIO:
20498 20286 switch (ucmd_buf.uscsi_status) {
20499 20287 case STATUS_RESERVATION_CONFLICT:
20500 20288 status = EACCES;
20501 20289 break;
20502 20290 case STATUS_CHECK:
20503 20291 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) {
20504 20292 switch (scsi_sense_key(
20505 20293 (uint8_t *)&sense_buf)) {
20506 20294 case KEY_ILLEGAL_REQUEST:
20507 20295 status = ENOTSUP;
20508 20296 break;
20509 20297 case KEY_NOT_READY:
20510 20298 if (scsi_sense_asc(
20511 20299 (uint8_t *)&sense_buf)
20512 20300 == 0x3A) {
20513 20301 status = ENXIO;
20514 20302 }
20515 20303 break;
20516 20304 default:
20517 20305 break;
20518 20306 }
20519 20307 }
20520 20308 break;
20521 20309 default:
20522 20310 break;
20523 20311 }
20524 20312 break;
20525 20313 default:
20526 20314 break;
20527 20315 }
20528 20316
20529 20317 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n");
20530 20318
20531 20319 return (status);
20532 20320 }
20533 20321
20534 20322
20535 20323 /*
20536 20324 * Function: sd_start_stop_unit_callback
20537 20325 *
20538 20326 * Description: timeout(9F) callback to begin recovery process for a
20539 20327 * device that has spun down.
20540 20328 *
20541 20329 * Arguments: arg - pointer to associated softstate struct.
20542 20330 *
20543 20331 * Context: Executes in a timeout(9F) thread context
20544 20332 */
20545 20333
20546 20334 static void
20547 20335 sd_start_stop_unit_callback(void *arg)
20548 20336 {
20549 20337 struct sd_lun *un = arg;
20550 20338 ASSERT(un != NULL);
20551 20339 ASSERT(!mutex_owned(SD_MUTEX(un)));
20552 20340
20553 20341 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n");
20554 20342
20555 20343 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP);
20556 20344 }
20557 20345
20558 20346
20559 20347 /*
20560 20348 * Function: sd_start_stop_unit_task
20561 20349 *
20562 20350 * Description: Recovery procedure when a drive is spun down.
20563 20351 *
20564 20352 * Arguments: arg - pointer to associated softstate struct.
20565 20353 *
20566 20354 * Context: Executes in a taskq() thread context
20567 20355 */
20568 20356
20569 20357 static void
20570 20358 sd_start_stop_unit_task(void *arg)
20571 20359 {
20572 20360 struct sd_lun *un = arg;
20573 20361 sd_ssc_t *ssc;
20574 20362 int power_level;
20575 20363 int rval;
20576 20364
20577 20365 ASSERT(un != NULL);
20578 20366 ASSERT(!mutex_owned(SD_MUTEX(un)));
20579 20367
20580 20368 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n");
20581 20369
20582 20370 /*
20583 20371 * Some unformatted drives report not ready error, no need to
20584 20372 * restart if format has been initiated.
20585 20373 */
20586 20374 mutex_enter(SD_MUTEX(un));
20587 20375 if (un->un_f_format_in_progress == TRUE) {
20588 20376 mutex_exit(SD_MUTEX(un));
20589 20377 return;
20590 20378 }
20591 20379 mutex_exit(SD_MUTEX(un));
20592 20380
20593 20381 ssc = sd_ssc_init(un);
20594 20382 /*
20595 20383 * When a START STOP command is issued from here, it is part of a
20596 20384 * failure recovery operation and must be issued before any other
20597 20385 * commands, including any pending retries. Thus it must be sent
20598 20386 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up
20599 20387 * succeeds or not, we will start I/O after the attempt.
20600 20388 * If power condition is supported and the current power level
20601 20389 * is capable of performing I/O, we should set the power condition
20602 20390 * to that level. Otherwise, set the power condition to ACTIVE.
20603 20391 */
20604 20392 if (un->un_f_power_condition_supported) {
20605 20393 mutex_enter(SD_MUTEX(un));
20606 20394 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level));
20607 20395 power_level = sd_pwr_pc.ran_perf[un->un_power_level]
20608 20396 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE;
20609 20397 mutex_exit(SD_MUTEX(un));
20610 20398 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
20611 20399 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY);
20612 20400 } else {
20613 20401 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
20614 20402 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY);
20615 20403 }
20616 20404
20617 20405 if (rval != 0)
20618 20406 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20619 20407 sd_ssc_fini(ssc);
20620 20408 /*
20621 20409 * The above call blocks until the START_STOP_UNIT command completes.
20622 20410 * Now that it has completed, we must re-try the original IO that
20623 20411 * received the NOT READY condition in the first place. There are
20624 20412 * three possible conditions here:
20625 20413 *
20626 20414 * (1) The original IO is on un_retry_bp.
20627 20415 * (2) The original IO is on the regular wait queue, and un_retry_bp
20628 20416 * is NULL.
20629 20417 * (3) The original IO is on the regular wait queue, and un_retry_bp
20630 20418 * points to some other, unrelated bp.
20631 20419 *
20632 20420 * For each case, we must call sd_start_cmds() with un_retry_bp
20633 20421 * as the argument. If un_retry_bp is NULL, this will initiate
20634 20422 * processing of the regular wait queue. If un_retry_bp is not NULL,
20635 20423 * then this will process the bp on un_retry_bp. That may or may not
20636 20424 * be the original IO, but that does not matter: the important thing
20637 20425 * is to keep the IO processing going at this point.
20638 20426 *
20639 20427 * Note: This is a very specific error recovery sequence associated
20640 20428 * with a drive that is not spun up. We attempt a START_STOP_UNIT and
20641 20429 * serialize the I/O with completion of the spin-up.
20642 20430 */
20643 20431 mutex_enter(SD_MUTEX(un));
20644 20432 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
20645 20433 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
20646 20434 un, un->un_retry_bp);
20647 20435 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */
20648 20436 sd_start_cmds(un, un->un_retry_bp);
20649 20437 mutex_exit(SD_MUTEX(un));
20650 20438
20651 20439 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n");
20652 20440 }
20653 20441
20654 20442
20655 20443 /*
20656 20444 * Function: sd_send_scsi_INQUIRY
20657 20445 *
20658 20446 * Description: Issue the scsi INQUIRY command.
20659 20447 *
20660 20448 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20661 20449 * structure for this target.
20662 20450 * bufaddr
20663 20451 * buflen
20664 20452 * evpd
20665 20453 * page_code
20666 20454 * page_length
20667 20455 *
20668 20456 * Return Code: 0 - Success
20669 20457 * errno return code from sd_ssc_send()
20670 20458 *
20671 20459 * Context: Can sleep. Does not return until command is completed.
20672 20460 */
20673 20461
20674 20462 static int
20675 20463 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen,
20676 20464 uchar_t evpd, uchar_t page_code, size_t *residp)
20677 20465 {
20678 20466 union scsi_cdb cdb;
20679 20467 struct uscsi_cmd ucmd_buf;
20680 20468 int status;
20681 20469 struct sd_lun *un;
20682 20470
20683 20471 ASSERT(ssc != NULL);
20684 20472 un = ssc->ssc_un;
20685 20473 ASSERT(un != NULL);
20686 20474 ASSERT(!mutex_owned(SD_MUTEX(un)));
20687 20475 ASSERT(bufaddr != NULL);
20688 20476
20689 20477 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un);
20690 20478
20691 20479 bzero(&cdb, sizeof (cdb));
20692 20480 bzero(&ucmd_buf, sizeof (ucmd_buf));
20693 20481 bzero(bufaddr, buflen);
20694 20482
20695 20483 cdb.scc_cmd = SCMD_INQUIRY;
20696 20484 cdb.cdb_opaque[1] = evpd;
|
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
20697 20485 cdb.cdb_opaque[2] = page_code;
20698 20486 FORMG0COUNT(&cdb, buflen);
20699 20487
20700 20488 ucmd_buf.uscsi_cdb = (char *)&cdb;
20701 20489 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20702 20490 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
20703 20491 ucmd_buf.uscsi_buflen = buflen;
20704 20492 ucmd_buf.uscsi_rqbuf = NULL;
20705 20493 ucmd_buf.uscsi_rqlen = 0;
20706 20494 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
20707 - ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */
20495 + ucmd_buf.uscsi_timeout = 2 * un->un_uscsi_timeout;
20708 20496
20709 20497 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20710 20498 UIO_SYSSPACE, SD_PATH_DIRECT);
20711 20499
20712 20500 /*
20713 20501 * Only handle status == 0, the upper-level caller
20714 20502 * will put different assessment based on the context.
20715 20503 */
20716 20504 if (status == 0)
20717 20505 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20718 20506
20719 20507 if ((status == 0) && (residp != NULL)) {
20720 20508 *residp = ucmd_buf.uscsi_resid;
20721 20509 }
20722 20510
20723 20511 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n");
20724 20512
20725 20513 return (status);
20726 20514 }
20727 20515
20728 20516
20729 20517 /*
20730 20518 * Function: sd_send_scsi_TEST_UNIT_READY
20731 20519 *
20732 20520 * Description: Issue the scsi TEST UNIT READY command.
20733 20521 * This routine can be told to set the flag USCSI_DIAGNOSE to
20734 20522 * prevent retrying failed commands. Use this when the intent
20735 20523 * is either to check for device readiness, to clear a Unit
20736 20524 * Attention, or to clear any outstanding sense data.
20737 20525 * However under specific conditions the expected behavior
20738 20526 * is for retries to bring a device ready, so use the flag
20739 20527 * with caution.
20740 20528 *
20741 20529 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20742 20530 * structure for this target.
20743 20531 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present
20744 20532 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE.
20745 20533 * 0: dont check for media present, do retries on cmd.
20746 20534 *
20747 20535 * Return Code: 0 - Success
20748 20536 * EIO - IO error
20749 20537 * EACCES - Reservation conflict detected
20750 20538 * ENXIO - Not Ready, medium not present
20751 20539 * errno return code from sd_ssc_send()
20752 20540 *
20753 20541 * Context: Can sleep. Does not return until command is completed.
20754 20542 */
20755 20543
20756 20544 static int
20757 20545 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag)
20758 20546 {
20759 20547 struct scsi_extended_sense sense_buf;
20760 20548 union scsi_cdb cdb;
20761 20549 struct uscsi_cmd ucmd_buf;
20762 20550 int status;
20763 20551 struct sd_lun *un;
20764 20552
20765 20553 ASSERT(ssc != NULL);
20766 20554 un = ssc->ssc_un;
20767 20555 ASSERT(un != NULL);
20768 20556 ASSERT(!mutex_owned(SD_MUTEX(un)));
20769 20557
20770 20558 SD_TRACE(SD_LOG_IO, un,
20771 20559 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un);
20772 20560
20773 20561 /*
20774 20562 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect
20775 20563 * timeouts when they receive a TUR and the queue is not empty. Check
20776 20564 * the configuration flag set during attach (indicating the drive has
20777 20565 * this firmware bug) and un_ncmds_in_transport before issuing the
20778 20566 * TUR. If there are
20779 20567 * pending commands return success, this is a bit arbitrary but is ok
20780 20568 * for non-removables (i.e. the eliteI disks) and non-clustering
20781 20569 * configurations.
20782 20570 */
20783 20571 if (un->un_f_cfg_tur_check == TRUE) {
20784 20572 mutex_enter(SD_MUTEX(un));
20785 20573 if (un->un_ncmds_in_transport != 0) {
20786 20574 mutex_exit(SD_MUTEX(un));
20787 20575 return (0);
20788 20576 }
20789 20577 mutex_exit(SD_MUTEX(un));
20790 20578 }
20791 20579
20792 20580 bzero(&cdb, sizeof (cdb));
20793 20581 bzero(&ucmd_buf, sizeof (ucmd_buf));
20794 20582 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20795 20583
20796 20584 cdb.scc_cmd = SCMD_TEST_UNIT_READY;
20797 20585
20798 20586 ucmd_buf.uscsi_cdb = (char *)&cdb;
20799 20587 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
20800 20588 ucmd_buf.uscsi_bufaddr = NULL;
20801 20589 ucmd_buf.uscsi_buflen = 0;
20802 20590 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20803 20591 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20804 20592 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20805 20593
20806 20594 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20807 20595 if ((flag & SD_DONT_RETRY_TUR) != 0) {
20808 20596 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
20809 20597 }
20810 - ucmd_buf.uscsi_timeout = 60;
20598 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
20811 20599
20812 20600 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20813 20601 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
20814 20602 SD_PATH_STANDARD));
20815 20603
20816 20604 switch (status) {
20817 20605 case 0:
20818 20606 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20819 20607 break; /* Success! */
20820 20608 case EIO:
20821 20609 switch (ucmd_buf.uscsi_status) {
20822 20610 case STATUS_RESERVATION_CONFLICT:
20823 20611 status = EACCES;
20824 20612 break;
20825 20613 case STATUS_CHECK:
20826 20614 if ((flag & SD_CHECK_FOR_MEDIA) == 0) {
20827 20615 break;
20828 20616 }
20829 20617 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20830 20618 (scsi_sense_key((uint8_t *)&sense_buf) ==
20831 20619 KEY_NOT_READY) &&
20832 20620 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) {
20833 20621 status = ENXIO;
20834 20622 }
20835 20623 break;
20836 20624 default:
20837 20625 break;
20838 20626 }
20839 20627 break;
20840 20628 default:
20841 20629 break;
20842 20630 }
20843 20631
20844 20632 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n");
20845 20633
20846 20634 return (status);
20847 20635 }
20848 20636
20849 20637 /*
20850 20638 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN
20851 20639 *
20852 20640 * Description: Issue the scsi PERSISTENT RESERVE IN command.
20853 20641 *
20854 20642 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20855 20643 * structure for this target.
20856 20644 *
20857 20645 * Return Code: 0 - Success
20858 20646 * EACCES
20859 20647 * ENOTSUP
20860 20648 * errno return code from sd_ssc_send()
20861 20649 *
20862 20650 * Context: Can sleep. Does not return until command is completed.
20863 20651 */
20864 20652
20865 20653 static int
20866 20654 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd,
20867 20655 uint16_t data_len, uchar_t *data_bufp)
20868 20656 {
20869 20657 struct scsi_extended_sense sense_buf;
20870 20658 union scsi_cdb cdb;
20871 20659 struct uscsi_cmd ucmd_buf;
20872 20660 int status;
20873 20661 int no_caller_buf = FALSE;
20874 20662 struct sd_lun *un;
20875 20663
20876 20664 ASSERT(ssc != NULL);
20877 20665 un = ssc->ssc_un;
20878 20666 ASSERT(un != NULL);
20879 20667 ASSERT(!mutex_owned(SD_MUTEX(un)));
20880 20668 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV));
20881 20669
20882 20670 SD_TRACE(SD_LOG_IO, un,
20883 20671 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un);
20884 20672
20885 20673 bzero(&cdb, sizeof (cdb));
20886 20674 bzero(&ucmd_buf, sizeof (ucmd_buf));
20887 20675 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20888 20676 if (data_bufp == NULL) {
20889 20677 /* Allocate a default buf if the caller did not give one */
20890 20678 ASSERT(data_len == 0);
20891 20679 data_len = MHIOC_RESV_KEY_SIZE;
20892 20680 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP);
20893 20681 no_caller_buf = TRUE;
20894 20682 }
20895 20683
20896 20684 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN;
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
20897 20685 cdb.cdb_opaque[1] = usr_cmd;
20898 20686 FORMG1COUNT(&cdb, data_len);
20899 20687
20900 20688 ucmd_buf.uscsi_cdb = (char *)&cdb;
20901 20689 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20902 20690 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp;
20903 20691 ucmd_buf.uscsi_buflen = data_len;
20904 20692 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20905 20693 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20906 20694 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20907 - ucmd_buf.uscsi_timeout = 60;
20695 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
20908 20696
20909 20697 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20910 20698 UIO_SYSSPACE, SD_PATH_STANDARD);
20911 20699
20912 20700 switch (status) {
20913 20701 case 0:
20914 20702 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20915 20703
20916 20704 break; /* Success! */
20917 20705 case EIO:
20918 20706 switch (ucmd_buf.uscsi_status) {
20919 20707 case STATUS_RESERVATION_CONFLICT:
20920 20708 status = EACCES;
20921 20709 break;
20922 20710 case STATUS_CHECK:
20923 20711 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20924 20712 (scsi_sense_key((uint8_t *)&sense_buf) ==
20925 20713 KEY_ILLEGAL_REQUEST)) {
20926 20714 status = ENOTSUP;
20927 20715 }
20928 20716 break;
20929 20717 default:
20930 20718 break;
20931 20719 }
20932 20720 break;
20933 20721 default:
20934 20722 break;
20935 20723 }
20936 20724
20937 20725 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
20938 20726
20939 20727 if (no_caller_buf == TRUE) {
20940 20728 kmem_free(data_bufp, data_len);
20941 20729 }
20942 20730
20943 20731 return (status);
20944 20732 }
20945 20733
20946 20734
20947 20735 /*
20948 20736 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT
20949 20737 *
20950 20738 * Description: This routine is the driver entry point for handling CD-ROM
20951 20739 * multi-host persistent reservation requests (MHIOCGRP_INKEYS,
20952 20740 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the
20953 20741 * device.
20954 20742 *
20955 20743 * Arguments: ssc - ssc contains un - pointer to soft state struct
20956 20744 * for the target.
20957 20745 * usr_cmd SCSI-3 reservation facility command (one of
20958 20746 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE,
20959 20747 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR)
20960 20748 * usr_bufp - user provided pointer register, reserve descriptor or
20961 20749 * preempt and abort structure (mhioc_register_t,
20962 20750 * mhioc_resv_desc_t, mhioc_preemptandabort_t)
20963 20751 *
20964 20752 * Return Code: 0 - Success
20965 20753 * EACCES
20966 20754 * ENOTSUP
20967 20755 * errno return code from sd_ssc_send()
20968 20756 *
20969 20757 * Context: Can sleep. Does not return until command is completed.
20970 20758 */
20971 20759
20972 20760 static int
20973 20761 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd,
20974 20762 uchar_t *usr_bufp)
20975 20763 {
20976 20764 struct scsi_extended_sense sense_buf;
20977 20765 union scsi_cdb cdb;
20978 20766 struct uscsi_cmd ucmd_buf;
20979 20767 int status;
20980 20768 uchar_t data_len = sizeof (sd_prout_t);
20981 20769 sd_prout_t *prp;
20982 20770 struct sd_lun *un;
20983 20771
20984 20772 ASSERT(ssc != NULL);
20985 20773 un = ssc->ssc_un;
20986 20774 ASSERT(un != NULL);
20987 20775 ASSERT(!mutex_owned(SD_MUTEX(un)));
20988 20776 ASSERT(data_len == 24); /* required by scsi spec */
20989 20777
20990 20778 SD_TRACE(SD_LOG_IO, un,
20991 20779 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un);
20992 20780
20993 20781 if (usr_bufp == NULL) {
20994 20782 return (EINVAL);
20995 20783 }
20996 20784
20997 20785 bzero(&cdb, sizeof (cdb));
20998 20786 bzero(&ucmd_buf, sizeof (ucmd_buf));
20999 20787 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21000 20788 prp = kmem_zalloc(data_len, KM_SLEEP);
21001 20789
21002 20790 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT;
|
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
21003 20791 cdb.cdb_opaque[1] = usr_cmd;
21004 20792 FORMG1COUNT(&cdb, data_len);
21005 20793
21006 20794 ucmd_buf.uscsi_cdb = (char *)&cdb;
21007 20795 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21008 20796 ucmd_buf.uscsi_bufaddr = (caddr_t)prp;
21009 20797 ucmd_buf.uscsi_buflen = data_len;
21010 20798 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21011 20799 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21012 20800 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21013 - ucmd_buf.uscsi_timeout = 60;
20801 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
21014 20802
21015 20803 switch (usr_cmd) {
21016 20804 case SD_SCSI3_REGISTER: {
21017 20805 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
21018 20806
21019 20807 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21020 20808 bcopy(ptr->newkey.key, prp->service_key,
21021 20809 MHIOC_RESV_KEY_SIZE);
21022 20810 prp->aptpl = ptr->aptpl;
21023 20811 break;
21024 20812 }
21025 20813 case SD_SCSI3_CLEAR: {
21026 20814 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21027 20815
21028 20816 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21029 20817 break;
21030 20818 }
21031 20819 case SD_SCSI3_RESERVE:
21032 20820 case SD_SCSI3_RELEASE: {
21033 20821 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21034 20822
21035 20823 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21036 20824 prp->scope_address = BE_32(ptr->scope_specific_addr);
21037 20825 cdb.cdb_opaque[2] = ptr->type;
21038 20826 break;
21039 20827 }
21040 20828 case SD_SCSI3_PREEMPTANDABORT: {
21041 20829 mhioc_preemptandabort_t *ptr =
21042 20830 (mhioc_preemptandabort_t *)usr_bufp;
21043 20831
21044 20832 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21045 20833 bcopy(ptr->victim_key.key, prp->service_key,
21046 20834 MHIOC_RESV_KEY_SIZE);
21047 20835 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr);
21048 20836 cdb.cdb_opaque[2] = ptr->resvdesc.type;
21049 20837 ucmd_buf.uscsi_flags |= USCSI_HEAD;
21050 20838 break;
21051 20839 }
21052 20840 case SD_SCSI3_REGISTERANDIGNOREKEY:
21053 20841 {
21054 20842 mhioc_registerandignorekey_t *ptr;
21055 20843 ptr = (mhioc_registerandignorekey_t *)usr_bufp;
21056 20844 bcopy(ptr->newkey.key,
21057 20845 prp->service_key, MHIOC_RESV_KEY_SIZE);
21058 20846 prp->aptpl = ptr->aptpl;
21059 20847 break;
21060 20848 }
21061 20849 default:
21062 20850 ASSERT(FALSE);
21063 20851 break;
21064 20852 }
21065 20853
21066 20854 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21067 20855 UIO_SYSSPACE, SD_PATH_STANDARD);
21068 20856
21069 20857 switch (status) {
21070 20858 case 0:
21071 20859 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21072 20860 break; /* Success! */
21073 20861 case EIO:
21074 20862 switch (ucmd_buf.uscsi_status) {
21075 20863 case STATUS_RESERVATION_CONFLICT:
21076 20864 status = EACCES;
21077 20865 break;
21078 20866 case STATUS_CHECK:
21079 20867 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21080 20868 (scsi_sense_key((uint8_t *)&sense_buf) ==
21081 20869 KEY_ILLEGAL_REQUEST)) {
21082 20870 status = ENOTSUP;
21083 20871 }
21084 20872 break;
21085 20873 default:
21086 20874 break;
21087 20875 }
21088 20876 break;
21089 20877 default:
21090 20878 break;
21091 20879 }
21092 20880
21093 20881 kmem_free(prp, data_len);
21094 20882 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
21095 20883 return (status);
21096 20884 }
21097 20885
21098 20886
21099 20887 /*
21100 20888 * Function: sd_send_scsi_SYNCHRONIZE_CACHE
21101 20889 *
21102 20890 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target
21103 20891 *
21104 20892 * Arguments: un - pointer to the target's soft state struct
21105 20893 * dkc - pointer to the callback structure
21106 20894 *
21107 20895 * Return Code: 0 - success
21108 20896 * errno-type error code
21109 20897 *
21110 20898 * Context: kernel thread context only.
21111 20899 *
21112 20900 * _______________________________________________________________
21113 20901 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE |
21114 20902 * |FLUSH_VOLATILE| | operation |
21115 20903 * |______________|______________|_________________________________|
21116 20904 * | 0 | NULL | Synchronous flush on both |
21117 20905 * | | | volatile and non-volatile cache |
21118 20906 * |______________|______________|_________________________________|
21119 20907 * | 1 | NULL | Synchronous flush on volatile |
21120 20908 * | | | cache; disk drivers may suppress|
21121 20909 * | | | flush if disk table indicates |
21122 20910 * | | | non-volatile cache |
21123 20911 * |______________|______________|_________________________________|
21124 20912 * | 0 | !NULL | Asynchronous flush on both |
21125 20913 * | | | volatile and non-volatile cache;|
21126 20914 * |______________|______________|_________________________________|
21127 20915 * | 1 | !NULL | Asynchronous flush on volatile |
21128 20916 * | | | cache; disk drivers may suppress|
21129 20917 * | | | flush if disk table indicates |
21130 20918 * | | | non-volatile cache |
21131 20919 * |______________|______________|_________________________________|
21132 20920 *
21133 20921 */
21134 20922
21135 20923 static int
21136 20924 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc)
21137 20925 {
21138 20926 struct sd_uscsi_info *uip;
21139 20927 struct uscsi_cmd *uscmd;
21140 20928 union scsi_cdb *cdb;
21141 20929 struct buf *bp;
21142 20930 int rval = 0;
21143 20931 int is_async;
21144 20932
21145 20933 SD_TRACE(SD_LOG_IO, un,
21146 20934 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un);
21147 20935
21148 20936 ASSERT(un != NULL);
21149 20937 ASSERT(!mutex_owned(SD_MUTEX(un)));
21150 20938
21151 20939 if (dkc == NULL || dkc->dkc_callback == NULL) {
21152 20940 is_async = FALSE;
21153 20941 } else {
21154 20942 is_async = TRUE;
21155 20943 }
21156 20944
21157 20945 mutex_enter(SD_MUTEX(un));
21158 20946 /* check whether cache flush should be suppressed */
21159 20947 if (un->un_f_suppress_cache_flush == TRUE) {
21160 20948 mutex_exit(SD_MUTEX(un));
21161 20949 /*
21162 20950 * suppress the cache flush if the device is told to do
21163 20951 * so by sd.conf or disk table
21164 20952 */
21165 20953 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \
21166 20954 skip the cache flush since suppress_cache_flush is %d!\n",
21167 20955 un->un_f_suppress_cache_flush);
21168 20956
21169 20957 if (is_async == TRUE) {
21170 20958 /* invoke callback for asynchronous flush */
21171 20959 (*dkc->dkc_callback)(dkc->dkc_cookie, 0);
21172 20960 }
21173 20961 return (rval);
21174 20962 }
21175 20963 mutex_exit(SD_MUTEX(un));
21176 20964
21177 20965 /*
21178 20966 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be
21179 20967 * set properly
21180 20968 */
21181 20969 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP);
21182 20970 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE;
21183 20971
21184 20972 mutex_enter(SD_MUTEX(un));
21185 20973 if (dkc != NULL && un->un_f_sync_nv_supported &&
21186 20974 (dkc->dkc_flag & FLUSH_VOLATILE)) {
21187 20975 /*
21188 20976 * if the device supports SYNC_NV bit, turn on
21189 20977 * the SYNC_NV bit to only flush volatile cache
21190 20978 */
21191 20979 cdb->cdb_un.tag |= SD_SYNC_NV_BIT;
21192 20980 }
21193 20981 mutex_exit(SD_MUTEX(un));
21194 20982
21195 20983 /*
21196 20984 * First get some memory for the uscsi_cmd struct and cdb
21197 20985 * and initialize for SYNCHRONIZE_CACHE cmd.
|
↓ open down ↓ |
174 lines elided |
↑ open up ↑ |
21198 20986 */
21199 20987 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
21200 20988 uscmd->uscsi_cdblen = CDB_GROUP1;
21201 20989 uscmd->uscsi_cdb = (caddr_t)cdb;
21202 20990 uscmd->uscsi_bufaddr = NULL;
21203 20991 uscmd->uscsi_buflen = 0;
21204 20992 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
21205 20993 uscmd->uscsi_rqlen = SENSE_LENGTH;
21206 20994 uscmd->uscsi_rqresid = SENSE_LENGTH;
21207 20995 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
21208 - uscmd->uscsi_timeout = sd_io_time;
20996 + uscmd->uscsi_timeout = un->un_cmd_timeout;
21209 20997
21210 20998 /*
21211 20999 * Allocate an sd_uscsi_info struct and fill it with the info
21212 21000 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
21213 21001 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
21214 21002 * since we allocate the buf here in this function, we do not
21215 21003 * need to preserve the prior contents of b_private.
21216 21004 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21217 21005 */
21218 21006 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
21219 21007 uip->ui_flags = SD_PATH_DIRECT;
21220 21008 uip->ui_cmdp = uscmd;
21221 21009
21222 21010 bp = getrbuf(KM_SLEEP);
21223 21011 bp->b_private = uip;
21224 21012
21225 21013 /*
21226 21014 * Setup buffer to carry uscsi request.
21227 21015 */
21228 21016 bp->b_flags = B_BUSY;
21229 21017 bp->b_bcount = 0;
21230 21018 bp->b_blkno = 0;
21231 21019
21232 21020 if (is_async == TRUE) {
21233 21021 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone;
21234 21022 uip->ui_dkc = *dkc;
21235 21023 }
21236 21024
21237 21025 bp->b_edev = SD_GET_DEV(un);
21238 21026 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */
21239 21027
21240 21028 /*
21241 21029 * Unset un_f_sync_cache_required flag
21242 21030 */
21243 21031 mutex_enter(SD_MUTEX(un));
21244 21032 un->un_f_sync_cache_required = FALSE;
21245 21033 mutex_exit(SD_MUTEX(un));
21246 21034
21247 21035 (void) sd_uscsi_strategy(bp);
21248 21036
21249 21037 /*
21250 21038 * If synchronous request, wait for completion
21251 21039 * If async just return and let b_iodone callback
21252 21040 * cleanup.
21253 21041 * NOTE: On return, u_ncmds_in_driver will be decremented,
21254 21042 * but it was also incremented in sd_uscsi_strategy(), so
21255 21043 * we should be ok.
21256 21044 */
21257 21045 if (is_async == FALSE) {
21258 21046 (void) biowait(bp);
21259 21047 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp);
21260 21048 }
21261 21049
21262 21050 return (rval);
21263 21051 }
21264 21052
21265 21053
21266 21054 static int
21267 21055 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp)
21268 21056 {
21269 21057 struct sd_uscsi_info *uip;
21270 21058 struct uscsi_cmd *uscmd;
21271 21059 uint8_t *sense_buf;
21272 21060 struct sd_lun *un;
21273 21061 int status;
21274 21062 union scsi_cdb *cdb;
21275 21063
21276 21064 uip = (struct sd_uscsi_info *)(bp->b_private);
21277 21065 ASSERT(uip != NULL);
21278 21066
21279 21067 uscmd = uip->ui_cmdp;
21280 21068 ASSERT(uscmd != NULL);
21281 21069
21282 21070 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf;
21283 21071 ASSERT(sense_buf != NULL);
21284 21072
21285 21073 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
21286 21074 ASSERT(un != NULL);
21287 21075
21288 21076 cdb = (union scsi_cdb *)uscmd->uscsi_cdb;
21289 21077
21290 21078 status = geterror(bp);
21291 21079 switch (status) {
21292 21080 case 0:
21293 21081 break; /* Success! */
21294 21082 case EIO:
21295 21083 switch (uscmd->uscsi_status) {
21296 21084 case STATUS_RESERVATION_CONFLICT:
21297 21085 /* Ignore reservation conflict */
21298 21086 status = 0;
21299 21087 goto done;
21300 21088
21301 21089 case STATUS_CHECK:
21302 21090 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) &&
21303 21091 (scsi_sense_key(sense_buf) ==
21304 21092 KEY_ILLEGAL_REQUEST)) {
21305 21093 /* Ignore Illegal Request error */
21306 21094 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) {
21307 21095 mutex_enter(SD_MUTEX(un));
21308 21096 un->un_f_sync_nv_supported = FALSE;
21309 21097 mutex_exit(SD_MUTEX(un));
21310 21098 status = 0;
21311 21099 SD_TRACE(SD_LOG_IO, un,
21312 21100 "un_f_sync_nv_supported \
21313 21101 is set to false.\n");
21314 21102 goto done;
21315 21103 }
21316 21104
21317 21105 mutex_enter(SD_MUTEX(un));
21318 21106 un->un_f_sync_cache_supported = FALSE;
21319 21107 mutex_exit(SD_MUTEX(un));
21320 21108 SD_TRACE(SD_LOG_IO, un,
21321 21109 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
21322 21110 un_f_sync_cache_supported set to false \
21323 21111 with asc = %x, ascq = %x\n",
21324 21112 scsi_sense_asc(sense_buf),
21325 21113 scsi_sense_ascq(sense_buf));
21326 21114 status = ENOTSUP;
21327 21115 goto done;
21328 21116 }
21329 21117 break;
21330 21118 default:
21331 21119 break;
21332 21120 }
21333 21121 /* FALLTHRU */
21334 21122 default:
21335 21123 /*
21336 21124 * Turn on the un_f_sync_cache_required flag
21337 21125 * since the SYNC CACHE command failed
21338 21126 */
21339 21127 mutex_enter(SD_MUTEX(un));
21340 21128 un->un_f_sync_cache_required = TRUE;
21341 21129 mutex_exit(SD_MUTEX(un));
21342 21130
21343 21131 /*
21344 21132 * Don't log an error message if this device
|
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
21345 21133 * has removable media.
21346 21134 */
21347 21135 if (!un->un_f_has_removable_media) {
21348 21136 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
21349 21137 "SYNCHRONIZE CACHE command failed (%d)\n", status);
21350 21138 }
21351 21139 break;
21352 21140 }
21353 21141
21354 21142 done:
21355 - if (uip->ui_dkc.dkc_callback != NULL) {
21143 + if (uip->ui_dkc.dkc_callback != NULL)
21356 21144 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
21357 - }
21358 21145
21359 21146 ASSERT((bp->b_flags & B_REMAPPED) == 0);
21360 21147 freerbuf(bp);
21361 21148 kmem_free(uip, sizeof (struct sd_uscsi_info));
21362 21149 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
21363 21150 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
21364 21151 kmem_free(uscmd, sizeof (struct uscsi_cmd));
21365 21152
21366 21153 return (status);
21367 21154 }
21368 21155
21156 +/*
21157 + * Issues a single SCSI UNMAP command with a prepared UNMAP parameter list.
21158 + * Returns zero on success, or the non-zero command error code on failure.
21159 + */
21160 +static int
21161 +sd_send_scsi_UNMAP_issue_one(sd_ssc_t *ssc, unmap_param_hdr_t *uph,
21162 + uint64_t num_descr, uint64_t bytes)
21163 +{
21164 + struct sd_lun *un = ssc->ssc_un;
21165 + struct scsi_extended_sense sense_buf;
21166 + union scsi_cdb cdb;
21167 + struct uscsi_cmd ucmd_buf;
21168 + int status;
21169 + const uint64_t param_size = sizeof (unmap_param_hdr_t) +
21170 + num_descr * sizeof (unmap_blk_descr_t);
21369 21171
21172 + uph->uph_data_len = BE_16(param_size - 2);
21173 + uph->uph_descr_data_len = BE_16(param_size - 8);
21174 +
21175 + bzero(&cdb, sizeof (cdb));
21176 + bzero(&ucmd_buf, sizeof (ucmd_buf));
21177 + bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21178 +
21179 + cdb.scc_cmd = SCMD_UNMAP;
21180 + FORMG1COUNT(&cdb, param_size);
21181 +
21182 + ucmd_buf.uscsi_cdb = (char *)&cdb;
21183 + ucmd_buf.uscsi_cdblen = (uchar_t)CDB_GROUP1;
21184 + ucmd_buf.uscsi_bufaddr = (caddr_t)uph;
21185 + ucmd_buf.uscsi_buflen = param_size;
21186 + ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21187 + ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21188 + ucmd_buf.uscsi_flags = USCSI_WRITE | USCSI_RQENABLE | USCSI_SILENT;
21189 + ucmd_buf.uscsi_timeout = un->un_cmd_timeout;
21190 +
21191 + status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE,
21192 + SD_PATH_STANDARD);
21193 +
21194 + switch (status) {
21195 + case 0:
21196 + sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21197 +
21198 + if (un->un_unmapstats) {
21199 + atomic_inc_64(&un->un_unmapstats->us_cmds.value.ui64);
21200 + atomic_add_64(&un->un_unmapstats->us_extents.value.ui64,
21201 + num_descr);
21202 + atomic_add_64(&un->un_unmapstats->us_bytes.value.ui64,
21203 + bytes);
21204 + }
21205 + break; /* Success! */
21206 + case EIO:
21207 + if (un->un_unmapstats)
21208 + atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64);
21209 + switch (ucmd_buf.uscsi_status) {
21210 + case STATUS_RESERVATION_CONFLICT:
21211 + status = EACCES;
21212 + break;
21213 + default:
21214 + break;
21215 + }
21216 + break;
21217 + default:
21218 + if (un->un_unmapstats)
21219 + atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64);
21220 + break;
21221 + }
21222 +
21223 + return (status);
21224 +}
21225 +
21370 21226 /*
21227 + * Returns a pointer to the i'th block descriptor inside an UNMAP param list.
21228 + */
21229 +static inline unmap_blk_descr_t *
21230 +UNMAP_blk_descr_i(void *buf, uint64_t i)
21231 +{
21232 + return ((unmap_blk_descr_t *)((uint8_t *)buf +
21233 + sizeof (unmap_param_hdr_t) + (i * sizeof (unmap_blk_descr_t))));
21234 +}
21235 +
21236 +/*
21237 + * Takes the list of extents from sd_send_scsi_UNMAP, chops it up, prepares
21238 + * UNMAP block descriptors and issues individual SCSI UNMAP commands. While
21239 + * doing so we consult the block limits to determine at most how many
21240 + * extents and LBAs we can UNMAP in one command.
21241 + * If a command fails for whatever, reason, extent list processing is aborted
21242 + * and the failed command's status is returned. Otherwise returns 0 on
21243 + * success.
21244 + */
21245 +static int
21246 +sd_send_scsi_UNMAP_issue(dev_t dev, sd_ssc_t *ssc, const dkioc_free_list_t *dfl)
21247 +{
21248 + struct sd_lun *un = ssc->ssc_un;
21249 + unmap_param_hdr_t *uph;
21250 + sd_blk_limits_t *lim = &un->un_blk_lim;
21251 + int rval = 0;
21252 + int partition;
21253 + /* partition offset & length in system blocks */
21254 + diskaddr_t part_off_sysblks = 0, part_len_sysblks = 0;
21255 + uint64_t part_off, part_len;
21256 + uint64_t descr_cnt_lim, byte_cnt_lim;
21257 + uint64_t descr_issued = 0, bytes_issued = 0;
21258 +
21259 + uph = kmem_zalloc(SD_UNMAP_PARAM_LIST_MAXSZ, KM_SLEEP);
21260 +
21261 + partition = SDPART(dev);
21262 + (void) cmlb_partinfo(un->un_cmlbhandle, partition, &part_len_sysblks,
21263 + &part_off_sysblks, NULL, NULL, (void *)SD_PATH_DIRECT);
21264 + part_off = SD_SYSBLOCKS2BYTES(part_off_sysblks);
21265 + part_len = SD_SYSBLOCKS2BYTES(part_len_sysblks);
21266 +
21267 + ASSERT(un->un_blk_lim.lim_max_unmap_lba_cnt != 0);
21268 + ASSERT(un->un_blk_lim.lim_max_unmap_descr_cnt != 0);
21269 + /* Spec says 0xffffffff are special values, so compute maximums. */
21270 + byte_cnt_lim = lim->lim_max_unmap_lba_cnt < UINT32_MAX ?
21271 + (uint64_t)lim->lim_max_unmap_lba_cnt * un->un_tgt_blocksize :
21272 + UINT64_MAX;
21273 + descr_cnt_lim = MIN(lim->lim_max_unmap_descr_cnt, SD_UNMAP_MAX_DESCR);
21274 +
21275 + for (size_t i = 0; i < dfl->dfl_num_exts; i++) {
21276 + const dkioc_free_list_ext_t *ext = &dfl->dfl_exts[i];
21277 + uint64_t ext_start = ext->dfle_start;
21278 + uint64_t ext_length = ext->dfle_length;
21279 +
21280 + while (ext_length > 0) {
21281 + unmap_blk_descr_t *ubd;
21282 + /* Respect device limit on LBA count per command */
21283 + uint64_t len = MIN(MIN(ext_length, byte_cnt_lim -
21284 + bytes_issued), SD_TGTBLOCKS2BYTES(un, UINT32_MAX));
21285 +
21286 + /* check partition limits */
21287 + if (ext_start + len > part_len) {
21288 + rval = SET_ERROR(EINVAL);
21289 + goto out;
21290 + }
21291 +#ifdef DEBUG
21292 + if (dfl->dfl_ck_func)
21293 + dfl->dfl_ck_func(dfl->dfl_offset + ext_start,
21294 + len, dfl->dfl_ck_arg);
21295 +#endif
21296 + ASSERT3U(descr_issued, <, descr_cnt_lim);
21297 + ASSERT3U(bytes_issued, <, byte_cnt_lim);
21298 + ubd = UNMAP_blk_descr_i(uph, descr_issued);
21299 +
21300 + /* adjust in-partition addresses to be device-global */
21301 + ubd->ubd_lba = BE_64(SD_BYTES2TGTBLOCKS(un,
21302 + dfl->dfl_offset + ext_start + part_off));
21303 + ubd->ubd_lba_cnt = BE_32(SD_BYTES2TGTBLOCKS(un, len));
21304 +
21305 + descr_issued++;
21306 + bytes_issued += len;
21307 +
21308 + /* Issue command when device limits reached */
21309 + if (descr_issued == descr_cnt_lim ||
21310 + bytes_issued == byte_cnt_lim) {
21311 + rval = sd_send_scsi_UNMAP_issue_one(ssc, uph,
21312 + descr_issued, bytes_issued);
21313 + if (rval != 0)
21314 + goto out;
21315 + descr_issued = 0;
21316 + bytes_issued = 0;
21317 + }
21318 +
21319 + ext_start += len;
21320 + ext_length -= len;
21321 + }
21322 + }
21323 +
21324 + if (descr_issued > 0) {
21325 + /* issue last command */
21326 + rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, descr_issued,
21327 + bytes_issued);
21328 + }
21329 +
21330 +out:
21331 + kmem_free(uph, SD_UNMAP_PARAM_LIST_MAXSZ);
21332 + return (rval);
21333 +}
21334 +
21335 +/*
21336 + * Issues one or several UNMAP commands based on a list of extents to be
21337 + * unmapped. The internal multi-command processing is hidden, as the exact
21338 + * number of commands and extents per command is limited by both SCSI
21339 + * command syntax and device limits (as expressed in the SCSI Block Limits
21340 + * VPD page and un_blk_lim in struct sd_lun).
21341 + * Returns zero on success, or the error code of the first failed SCSI UNMAP
21342 + * command.
21343 + */
21344 +static int
21345 +sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, int flag)
21346 +{
21347 + struct sd_lun *un = ssc->ssc_un;
21348 + int rval = 0;
21349 +
21350 + ASSERT(!mutex_owned(SD_MUTEX(un)));
21351 + ASSERT(dfl != NULL);
21352 +
21353 + /* Per spec, any of these conditions signals lack of UNMAP support. */
21354 + if (!(un->un_thin_flags & SD_THIN_PROV_ENABLED) ||
21355 + un->un_blk_lim.lim_max_unmap_descr_cnt == 0 ||
21356 + un->un_blk_lim.lim_max_unmap_lba_cnt == 0) {
21357 + return (SET_ERROR(ENOTSUP));
21358 + }
21359 +
21360 + /* For userspace calls we must copy in. */
21361 + if (!(flag & FKIOCTL) && (dfl = dfl_copyin(dfl, flag, KM_SLEEP)) ==
21362 + NULL)
21363 + return (SET_ERROR(EFAULT));
21364 +
21365 + rval = sd_send_scsi_UNMAP_issue(dev, ssc, dfl);
21366 +
21367 + if (!(flag & FKIOCTL)) {
21368 + dfl_free(dfl);
21369 + dfl = NULL;
21370 + }
21371 +
21372 + return (rval);
21373 +}
21374 +
21375 +/*
21371 21376 * Function: sd_send_scsi_GET_CONFIGURATION
21372 21377 *
21373 21378 * Description: Issues the get configuration command to the device.
21374 21379 * Called from sd_check_for_writable_cd & sd_get_media_info
21375 21380 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21376 21381 * Arguments: ssc
21377 21382 * ucmdbuf
21378 21383 * rqbuf
21379 21384 * rqbuflen
21380 21385 * bufaddr
21381 21386 * buflen
21382 21387 * path_flag
21383 21388 *
21384 21389 * Return Code: 0 - Success
21385 21390 * errno return code from sd_ssc_send()
21386 21391 *
21387 21392 * Context: Can sleep. Does not return until command is completed.
21388 21393 *
21389 21394 */
21390 21395
21391 21396 static int
21392 21397 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21393 21398 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21394 21399 int path_flag)
21395 21400 {
21396 21401 char cdb[CDB_GROUP1];
21397 21402 int status;
21398 21403 struct sd_lun *un;
21399 21404
21400 21405 ASSERT(ssc != NULL);
21401 21406 un = ssc->ssc_un;
21402 21407 ASSERT(un != NULL);
21403 21408 ASSERT(!mutex_owned(SD_MUTEX(un)));
21404 21409 ASSERT(bufaddr != NULL);
21405 21410 ASSERT(ucmdbuf != NULL);
21406 21411 ASSERT(rqbuf != NULL);
21407 21412
21408 21413 SD_TRACE(SD_LOG_IO, un,
21409 21414 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un);
21410 21415
21411 21416 bzero(cdb, sizeof (cdb));
21412 21417 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21413 21418 bzero(rqbuf, rqbuflen);
21414 21419 bzero(bufaddr, buflen);
21415 21420
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
21416 21421 /*
21417 21422 * Set up cdb field for the get configuration command.
21418 21423 */
21419 21424 cdb[0] = SCMD_GET_CONFIGURATION;
21420 21425 cdb[1] = 0x02; /* Requested Type */
21421 21426 cdb[8] = SD_PROFILE_HEADER_LEN;
21422 21427 ucmdbuf->uscsi_cdb = cdb;
21423 21428 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21424 21429 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21425 21430 ucmdbuf->uscsi_buflen = buflen;
21426 - ucmdbuf->uscsi_timeout = sd_io_time;
21431 + ucmdbuf->uscsi_timeout = un->un_uscsi_timeout;
21427 21432 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21428 21433 ucmdbuf->uscsi_rqlen = rqbuflen;
21429 21434 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21430 21435
21431 21436 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21432 21437 UIO_SYSSPACE, path_flag);
21433 21438
21434 21439 switch (status) {
21435 21440 case 0:
21436 21441 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21437 21442 break; /* Success! */
21438 21443 case EIO:
21439 21444 switch (ucmdbuf->uscsi_status) {
21440 21445 case STATUS_RESERVATION_CONFLICT:
21441 21446 status = EACCES;
21442 21447 break;
21443 21448 default:
21444 21449 break;
21445 21450 }
21446 21451 break;
21447 21452 default:
21448 21453 break;
21449 21454 }
21450 21455
21451 21456 if (status == 0) {
21452 21457 SD_DUMP_MEMORY(un, SD_LOG_IO,
21453 21458 "sd_send_scsi_GET_CONFIGURATION: data",
21454 21459 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21455 21460 }
21456 21461
21457 21462 SD_TRACE(SD_LOG_IO, un,
21458 21463 "sd_send_scsi_GET_CONFIGURATION: exit\n");
21459 21464
21460 21465 return (status);
21461 21466 }
21462 21467
21463 21468 /*
21464 21469 * Function: sd_send_scsi_feature_GET_CONFIGURATION
21465 21470 *
21466 21471 * Description: Issues the get configuration command to the device to
21467 21472 * retrieve a specific feature. Called from
21468 21473 * sd_check_for_writable_cd & sd_set_mmc_caps.
21469 21474 * Arguments: ssc
21470 21475 * ucmdbuf
21471 21476 * rqbuf
21472 21477 * rqbuflen
21473 21478 * bufaddr
21474 21479 * buflen
21475 21480 * feature
21476 21481 *
21477 21482 * Return Code: 0 - Success
21478 21483 * errno return code from sd_ssc_send()
21479 21484 *
21480 21485 * Context: Can sleep. Does not return until command is completed.
21481 21486 *
21482 21487 */
21483 21488 static int
21484 21489 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21485 21490 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21486 21491 char feature, int path_flag)
21487 21492 {
21488 21493 char cdb[CDB_GROUP1];
21489 21494 int status;
21490 21495 struct sd_lun *un;
21491 21496
21492 21497 ASSERT(ssc != NULL);
21493 21498 un = ssc->ssc_un;
21494 21499 ASSERT(un != NULL);
21495 21500 ASSERT(!mutex_owned(SD_MUTEX(un)));
21496 21501 ASSERT(bufaddr != NULL);
21497 21502 ASSERT(ucmdbuf != NULL);
21498 21503 ASSERT(rqbuf != NULL);
21499 21504
21500 21505 SD_TRACE(SD_LOG_IO, un,
21501 21506 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un);
21502 21507
21503 21508 bzero(cdb, sizeof (cdb));
21504 21509 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21505 21510 bzero(rqbuf, rqbuflen);
21506 21511 bzero(bufaddr, buflen);
21507 21512
21508 21513 /*
|
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
21509 21514 * Set up cdb field for the get configuration command.
21510 21515 */
21511 21516 cdb[0] = SCMD_GET_CONFIGURATION;
21512 21517 cdb[1] = 0x02; /* Requested Type */
21513 21518 cdb[3] = feature;
21514 21519 cdb[8] = buflen;
21515 21520 ucmdbuf->uscsi_cdb = cdb;
21516 21521 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21517 21522 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21518 21523 ucmdbuf->uscsi_buflen = buflen;
21519 - ucmdbuf->uscsi_timeout = sd_io_time;
21524 + ucmdbuf->uscsi_timeout = un->un_uscsi_timeout;
21520 21525 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21521 21526 ucmdbuf->uscsi_rqlen = rqbuflen;
21522 21527 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21523 21528
21524 21529 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21525 21530 UIO_SYSSPACE, path_flag);
21526 21531
21527 21532 switch (status) {
21528 21533 case 0:
21529 21534
21530 21535 break; /* Success! */
21531 21536 case EIO:
21532 21537 switch (ucmdbuf->uscsi_status) {
21533 21538 case STATUS_RESERVATION_CONFLICT:
21534 21539 status = EACCES;
21535 21540 break;
21536 21541 default:
21537 21542 break;
21538 21543 }
21539 21544 break;
21540 21545 default:
21541 21546 break;
21542 21547 }
21543 21548
21544 21549 if (status == 0) {
21545 21550 SD_DUMP_MEMORY(un, SD_LOG_IO,
21546 21551 "sd_send_scsi_feature_GET_CONFIGURATION: data",
21547 21552 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21548 21553 }
21549 21554
21550 21555 SD_TRACE(SD_LOG_IO, un,
21551 21556 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
21552 21557
21553 21558 return (status);
21554 21559 }
21555 21560
21556 21561
21557 21562 /*
21558 21563 * Function: sd_send_scsi_MODE_SENSE
21559 21564 *
21560 21565 * Description: Utility function for issuing a scsi MODE SENSE command.
21561 21566 * Note: This routine uses a consistent implementation for Group0,
21562 21567 * Group1, and Group2 commands across all platforms. ATAPI devices
21563 21568 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21564 21569 *
21565 21570 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21566 21571 * structure for this target.
21567 21572 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21568 21573 * CDB_GROUP[1|2] (10 byte).
21569 21574 * bufaddr - buffer for page data retrieved from the target.
21570 21575 * buflen - size of page to be retrieved.
21571 21576 * page_code - page code of data to be retrieved from the target.
21572 21577 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21573 21578 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21574 21579 * to use the USCSI "direct" chain and bypass the normal
21575 21580 * command waitq.
21576 21581 *
21577 21582 * Return Code: 0 - Success
21578 21583 * errno return code from sd_ssc_send()
21579 21584 *
21580 21585 * Context: Can sleep. Does not return until command is completed.
21581 21586 */
21582 21587
21583 21588 static int
21584 21589 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21585 21590 size_t buflen, uchar_t page_code, int path_flag)
21586 21591 {
21587 21592 struct scsi_extended_sense sense_buf;
21588 21593 union scsi_cdb cdb;
21589 21594 struct uscsi_cmd ucmd_buf;
21590 21595 int status;
21591 21596 int headlen;
21592 21597 struct sd_lun *un;
21593 21598
21594 21599 ASSERT(ssc != NULL);
21595 21600 un = ssc->ssc_un;
21596 21601 ASSERT(un != NULL);
21597 21602 ASSERT(!mutex_owned(SD_MUTEX(un)));
21598 21603 ASSERT(bufaddr != NULL);
21599 21604 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21600 21605 (cdbsize == CDB_GROUP2));
21601 21606
21602 21607 SD_TRACE(SD_LOG_IO, un,
21603 21608 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un);
21604 21609
21605 21610 bzero(&cdb, sizeof (cdb));
21606 21611 bzero(&ucmd_buf, sizeof (ucmd_buf));
21607 21612 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21608 21613 bzero(bufaddr, buflen);
21609 21614
21610 21615 if (cdbsize == CDB_GROUP0) {
21611 21616 cdb.scc_cmd = SCMD_MODE_SENSE;
21612 21617 cdb.cdb_opaque[2] = page_code;
21613 21618 FORMG0COUNT(&cdb, buflen);
21614 21619 headlen = MODE_HEADER_LENGTH;
21615 21620 } else {
21616 21621 cdb.scc_cmd = SCMD_MODE_SENSE_G1;
21617 21622 cdb.cdb_opaque[2] = page_code;
21618 21623 FORMG1COUNT(&cdb, buflen);
21619 21624 headlen = MODE_HEADER_LENGTH_GRP2;
21620 21625 }
21621 21626
|
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
21622 21627 ASSERT(headlen <= buflen);
21623 21628 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21624 21629
21625 21630 ucmd_buf.uscsi_cdb = (char *)&cdb;
21626 21631 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21627 21632 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21628 21633 ucmd_buf.uscsi_buflen = buflen;
21629 21634 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21630 21635 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21631 21636 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21632 - ucmd_buf.uscsi_timeout = 60;
21637 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
21633 21638
21634 21639 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21635 21640 UIO_SYSSPACE, path_flag);
21636 21641
21637 21642 switch (status) {
21638 21643 case 0:
21639 21644 /*
21640 21645 * sr_check_wp() uses 0x3f page code and check the header of
21641 21646 * mode page to determine if target device is write-protected.
21642 21647 * But some USB devices return 0 bytes for 0x3f page code. For
21643 21648 * this case, make sure that mode page header is returned at
21644 21649 * least.
21645 21650 */
21646 21651 if (buflen - ucmd_buf.uscsi_resid < headlen) {
21647 21652 status = EIO;
21648 21653 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
21649 21654 "mode page header is not returned");
21650 21655 }
21651 21656 break; /* Success! */
21652 21657 case EIO:
21653 21658 switch (ucmd_buf.uscsi_status) {
21654 21659 case STATUS_RESERVATION_CONFLICT:
21655 21660 status = EACCES;
21656 21661 break;
21657 21662 default:
21658 21663 break;
21659 21664 }
21660 21665 break;
21661 21666 default:
21662 21667 break;
21663 21668 }
21664 21669
21665 21670 if (status == 0) {
21666 21671 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data",
21667 21672 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21668 21673 }
21669 21674 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n");
21670 21675
21671 21676 return (status);
21672 21677 }
21673 21678
21674 21679
21675 21680 /*
21676 21681 * Function: sd_send_scsi_MODE_SELECT
21677 21682 *
21678 21683 * Description: Utility function for issuing a scsi MODE SELECT command.
21679 21684 * Note: This routine uses a consistent implementation for Group0,
21680 21685 * Group1, and Group2 commands across all platforms. ATAPI devices
21681 21686 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21682 21687 *
21683 21688 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21684 21689 * structure for this target.
21685 21690 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21686 21691 * CDB_GROUP[1|2] (10 byte).
21687 21692 * bufaddr - buffer for page data retrieved from the target.
21688 21693 * buflen - size of page to be retrieved.
21689 21694 * save_page - boolean to determin if SP bit should be set.
21690 21695 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21691 21696 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21692 21697 * to use the USCSI "direct" chain and bypass the normal
21693 21698 * command waitq.
21694 21699 *
21695 21700 * Return Code: 0 - Success
21696 21701 * errno return code from sd_ssc_send()
21697 21702 *
21698 21703 * Context: Can sleep. Does not return until command is completed.
21699 21704 */
21700 21705
21701 21706 static int
21702 21707 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21703 21708 size_t buflen, uchar_t save_page, int path_flag)
21704 21709 {
21705 21710 struct scsi_extended_sense sense_buf;
21706 21711 union scsi_cdb cdb;
21707 21712 struct uscsi_cmd ucmd_buf;
21708 21713 int status;
21709 21714 struct sd_lun *un;
21710 21715
21711 21716 ASSERT(ssc != NULL);
21712 21717 un = ssc->ssc_un;
21713 21718 ASSERT(un != NULL);
21714 21719 ASSERT(!mutex_owned(SD_MUTEX(un)));
21715 21720 ASSERT(bufaddr != NULL);
21716 21721 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21717 21722 (cdbsize == CDB_GROUP2));
21718 21723
21719 21724 SD_TRACE(SD_LOG_IO, un,
21720 21725 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un);
21721 21726
21722 21727 bzero(&cdb, sizeof (cdb));
21723 21728 bzero(&ucmd_buf, sizeof (ucmd_buf));
21724 21729 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21725 21730
21726 21731 /* Set the PF bit for many third party drives */
21727 21732 cdb.cdb_opaque[1] = 0x10;
21728 21733
21729 21734 /* Set the savepage(SP) bit if given */
21730 21735 if (save_page == SD_SAVE_PAGE) {
21731 21736 cdb.cdb_opaque[1] |= 0x01;
21732 21737 }
21733 21738
21734 21739 if (cdbsize == CDB_GROUP0) {
21735 21740 cdb.scc_cmd = SCMD_MODE_SELECT;
21736 21741 FORMG0COUNT(&cdb, buflen);
21737 21742 } else {
21738 21743 cdb.scc_cmd = SCMD_MODE_SELECT_G1;
21739 21744 FORMG1COUNT(&cdb, buflen);
21740 21745 }
|
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
21741 21746
21742 21747 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21743 21748
21744 21749 ucmd_buf.uscsi_cdb = (char *)&cdb;
21745 21750 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21746 21751 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21747 21752 ucmd_buf.uscsi_buflen = buflen;
21748 21753 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21749 21754 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21750 21755 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21751 - ucmd_buf.uscsi_timeout = 60;
21756 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
21752 21757
21753 21758 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21754 21759 UIO_SYSSPACE, path_flag);
21755 21760
21756 21761 switch (status) {
21757 21762 case 0:
21758 21763 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21759 21764 break; /* Success! */
21760 21765 case EIO:
21761 21766 switch (ucmd_buf.uscsi_status) {
21762 21767 case STATUS_RESERVATION_CONFLICT:
21763 21768 status = EACCES;
21764 21769 break;
21765 21770 default:
21766 21771 break;
21767 21772 }
21768 21773 break;
21769 21774 default:
21770 21775 break;
21771 21776 }
21772 21777
21773 21778 if (status == 0) {
21774 21779 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data",
21775 21780 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21776 21781 }
21777 21782 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n");
21778 21783
21779 21784 return (status);
21780 21785 }
21781 21786
21782 21787
21783 21788 /*
21784 21789 * Function: sd_send_scsi_RDWR
21785 21790 *
21786 21791 * Description: Issue a scsi READ or WRITE command with the given parameters.
21787 21792 *
21788 21793 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21789 21794 * structure for this target.
21790 21795 * cmd: SCMD_READ or SCMD_WRITE
21791 21796 * bufaddr: Address of caller's buffer to receive the RDWR data
21792 21797 * buflen: Length of caller's buffer receive the RDWR data.
21793 21798 * start_block: Block number for the start of the RDWR operation.
21794 21799 * (Assumes target-native block size.)
21795 21800 * residp: Pointer to variable to receive the redisual of the
21796 21801 * RDWR operation (may be NULL of no residual requested).
21797 21802 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21798 21803 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21799 21804 * to use the USCSI "direct" chain and bypass the normal
21800 21805 * command waitq.
21801 21806 *
21802 21807 * Return Code: 0 - Success
21803 21808 * errno return code from sd_ssc_send()
21804 21809 *
21805 21810 * Context: Can sleep. Does not return until command is completed.
21806 21811 */
21807 21812
21808 21813 static int
21809 21814 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
21810 21815 size_t buflen, daddr_t start_block, int path_flag)
21811 21816 {
21812 21817 struct scsi_extended_sense sense_buf;
21813 21818 union scsi_cdb cdb;
21814 21819 struct uscsi_cmd ucmd_buf;
21815 21820 uint32_t block_count;
21816 21821 int status;
21817 21822 int cdbsize;
21818 21823 uchar_t flag;
21819 21824 struct sd_lun *un;
21820 21825
21821 21826 ASSERT(ssc != NULL);
21822 21827 un = ssc->ssc_un;
21823 21828 ASSERT(un != NULL);
21824 21829 ASSERT(!mutex_owned(SD_MUTEX(un)));
21825 21830 ASSERT(bufaddr != NULL);
21826 21831 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE));
21827 21832
21828 21833 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un);
21829 21834
21830 21835 if (un->un_f_tgt_blocksize_is_valid != TRUE) {
21831 21836 return (EINVAL);
21832 21837 }
21833 21838
21834 21839 mutex_enter(SD_MUTEX(un));
21835 21840 block_count = SD_BYTES2TGTBLOCKS(un, buflen);
21836 21841 mutex_exit(SD_MUTEX(un));
21837 21842
21838 21843 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE;
21839 21844
21840 21845 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: "
21841 21846 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
21842 21847 bufaddr, buflen, start_block, block_count);
21843 21848
21844 21849 bzero(&cdb, sizeof (cdb));
21845 21850 bzero(&ucmd_buf, sizeof (ucmd_buf));
21846 21851 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21847 21852
21848 21853 /* Compute CDB size to use */
21849 21854 if (start_block > 0xffffffff)
21850 21855 cdbsize = CDB_GROUP4;
21851 21856 else if ((start_block & 0xFFE00000) ||
21852 21857 (un->un_f_cfg_is_atapi == TRUE))
21853 21858 cdbsize = CDB_GROUP1;
21854 21859 else
21855 21860 cdbsize = CDB_GROUP0;
21856 21861
21857 21862 switch (cdbsize) {
21858 21863 case CDB_GROUP0: /* 6-byte CDBs */
21859 21864 cdb.scc_cmd = cmd;
21860 21865 FORMG0ADDR(&cdb, start_block);
21861 21866 FORMG0COUNT(&cdb, block_count);
21862 21867 break;
21863 21868 case CDB_GROUP1: /* 10-byte CDBs */
21864 21869 cdb.scc_cmd = cmd | SCMD_GROUP1;
21865 21870 FORMG1ADDR(&cdb, start_block);
21866 21871 FORMG1COUNT(&cdb, block_count);
21867 21872 break;
21868 21873 case CDB_GROUP4: /* 16-byte CDBs */
21869 21874 cdb.scc_cmd = cmd | SCMD_GROUP4;
21870 21875 FORMG4LONGADDR(&cdb, (uint64_t)start_block);
21871 21876 FORMG4COUNT(&cdb, block_count);
21872 21877 break;
21873 21878 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */
21874 21879 default:
21875 21880 /* All others reserved */
21876 21881 return (EINVAL);
21877 21882 }
21878 21883
|
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
21879 21884 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21880 21885 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21881 21886
21882 21887 ucmd_buf.uscsi_cdb = (char *)&cdb;
21883 21888 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21884 21889 ucmd_buf.uscsi_bufaddr = bufaddr;
21885 21890 ucmd_buf.uscsi_buflen = buflen;
21886 21891 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21887 21892 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21888 21893 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT;
21889 - ucmd_buf.uscsi_timeout = 60;
21894 + ucmd_buf.uscsi_timeout = un->un_cmd_timeout;
21890 21895 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21891 21896 UIO_SYSSPACE, path_flag);
21892 21897
21893 21898 switch (status) {
21894 21899 case 0:
21895 21900 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21896 21901 break; /* Success! */
21897 21902 case EIO:
21898 21903 switch (ucmd_buf.uscsi_status) {
21899 21904 case STATUS_RESERVATION_CONFLICT:
21900 21905 status = EACCES;
21901 21906 break;
21902 21907 default:
21903 21908 break;
21904 21909 }
21905 21910 break;
21906 21911 default:
21907 21912 break;
21908 21913 }
21909 21914
21910 21915 if (status == 0) {
21911 21916 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data",
21912 21917 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21913 21918 }
21914 21919
21915 21920 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n");
21916 21921
21917 21922 return (status);
21918 21923 }
21919 21924
21920 21925
21921 21926 /*
21922 21927 * Function: sd_send_scsi_LOG_SENSE
21923 21928 *
21924 21929 * Description: Issue a scsi LOG_SENSE command with the given parameters.
21925 21930 *
21926 21931 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21927 21932 * structure for this target.
21928 21933 *
21929 21934 * Return Code: 0 - Success
21930 21935 * errno return code from sd_ssc_send()
21931 21936 *
21932 21937 * Context: Can sleep. Does not return until command is completed.
21933 21938 */
21934 21939
21935 21940 static int
21936 21941 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen,
21937 21942 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag)
21938 21943 {
21939 21944 struct scsi_extended_sense sense_buf;
21940 21945 union scsi_cdb cdb;
21941 21946 struct uscsi_cmd ucmd_buf;
21942 21947 int status;
21943 21948 struct sd_lun *un;
21944 21949
21945 21950 ASSERT(ssc != NULL);
21946 21951 un = ssc->ssc_un;
21947 21952 ASSERT(un != NULL);
21948 21953 ASSERT(!mutex_owned(SD_MUTEX(un)));
21949 21954
21950 21955 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un);
21951 21956
21952 21957 bzero(&cdb, sizeof (cdb));
21953 21958 bzero(&ucmd_buf, sizeof (ucmd_buf));
21954 21959 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21955 21960
21956 21961 cdb.scc_cmd = SCMD_LOG_SENSE_G1;
21957 21962 cdb.cdb_opaque[2] = (page_control << 6) | page_code;
21958 21963 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8);
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
21959 21964 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF);
21960 21965 FORMG1COUNT(&cdb, buflen);
21961 21966
21962 21967 ucmd_buf.uscsi_cdb = (char *)&cdb;
21963 21968 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21964 21969 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21965 21970 ucmd_buf.uscsi_buflen = buflen;
21966 21971 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21967 21972 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21968 21973 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21969 - ucmd_buf.uscsi_timeout = 60;
21974 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
21970 21975
21971 21976 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21972 21977 UIO_SYSSPACE, path_flag);
21973 21978
21974 21979 switch (status) {
21975 21980 case 0:
21976 21981 break;
21977 21982 case EIO:
21978 21983 switch (ucmd_buf.uscsi_status) {
21979 21984 case STATUS_RESERVATION_CONFLICT:
21980 21985 status = EACCES;
21981 21986 break;
21982 21987 case STATUS_CHECK:
21983 21988 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21984 21989 (scsi_sense_key((uint8_t *)&sense_buf) ==
21985 21990 KEY_ILLEGAL_REQUEST) &&
21986 21991 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) {
21987 21992 /*
21988 21993 * ASC 0x24: INVALID FIELD IN CDB
21989 21994 */
21990 21995 switch (page_code) {
21991 21996 case START_STOP_CYCLE_PAGE:
21992 21997 /*
21993 21998 * The start stop cycle counter is
21994 21999 * implemented as page 0x31 in earlier
21995 22000 * generation disks. In new generation
21996 22001 * disks the start stop cycle counter is
21997 22002 * implemented as page 0xE. To properly
21998 22003 * handle this case if an attempt for
21999 22004 * log page 0xE is made and fails we
22000 22005 * will try again using page 0x31.
22001 22006 *
22002 22007 * Network storage BU committed to
22003 22008 * maintain the page 0x31 for this
22004 22009 * purpose and will not have any other
22005 22010 * page implemented with page code 0x31
22006 22011 * until all disks transition to the
22007 22012 * standard page.
22008 22013 */
22009 22014 mutex_enter(SD_MUTEX(un));
22010 22015 un->un_start_stop_cycle_page =
22011 22016 START_STOP_CYCLE_VU_PAGE;
22012 22017 cdb.cdb_opaque[2] =
22013 22018 (char)(page_control << 6) |
22014 22019 un->un_start_stop_cycle_page;
22015 22020 mutex_exit(SD_MUTEX(un));
22016 22021 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22017 22022 status = sd_ssc_send(
22018 22023 ssc, &ucmd_buf, FKIOCTL,
22019 22024 UIO_SYSSPACE, path_flag);
22020 22025
22021 22026 break;
22022 22027 case TEMPERATURE_PAGE:
22023 22028 status = ENOTTY;
22024 22029 break;
22025 22030 default:
22026 22031 break;
22027 22032 }
22028 22033 }
22029 22034 break;
22030 22035 default:
22031 22036 break;
22032 22037 }
22033 22038 break;
22034 22039 default:
22035 22040 break;
22036 22041 }
22037 22042
22038 22043 if (status == 0) {
22039 22044 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22040 22045 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data",
22041 22046 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
22042 22047 }
22043 22048
22044 22049 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n");
22045 22050
22046 22051 return (status);
22047 22052 }
22048 22053
22049 22054
22050 22055 /*
22051 22056 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
22052 22057 *
22053 22058 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command.
22054 22059 *
22055 22060 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
22056 22061 * structure for this target.
22057 22062 * bufaddr
22058 22063 * buflen
22059 22064 * class_req
22060 22065 *
22061 22066 * Return Code: 0 - Success
22062 22067 * errno return code from sd_ssc_send()
22063 22068 *
22064 22069 * Context: Can sleep. Does not return until command is completed.
22065 22070 */
22066 22071
22067 22072 static int
22068 22073 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr,
22069 22074 size_t buflen, uchar_t class_req)
22070 22075 {
22071 22076 union scsi_cdb cdb;
22072 22077 struct uscsi_cmd ucmd_buf;
22073 22078 int status;
22074 22079 struct sd_lun *un;
22075 22080
22076 22081 ASSERT(ssc != NULL);
22077 22082 un = ssc->ssc_un;
22078 22083 ASSERT(un != NULL);
22079 22084 ASSERT(!mutex_owned(SD_MUTEX(un)));
22080 22085 ASSERT(bufaddr != NULL);
22081 22086
22082 22087 SD_TRACE(SD_LOG_IO, un,
22083 22088 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un);
22084 22089
22085 22090 bzero(&cdb, sizeof (cdb));
22086 22091 bzero(&ucmd_buf, sizeof (ucmd_buf));
22087 22092 bzero(bufaddr, buflen);
22088 22093
22089 22094 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION;
22090 22095 cdb.cdb_opaque[1] = 1; /* polled */
|
↓ open down ↓ |
111 lines elided |
↑ open up ↑ |
22091 22096 cdb.cdb_opaque[4] = class_req;
22092 22097 FORMG1COUNT(&cdb, buflen);
22093 22098
22094 22099 ucmd_buf.uscsi_cdb = (char *)&cdb;
22095 22100 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22096 22101 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22097 22102 ucmd_buf.uscsi_buflen = buflen;
22098 22103 ucmd_buf.uscsi_rqbuf = NULL;
22099 22104 ucmd_buf.uscsi_rqlen = 0;
22100 22105 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
22101 - ucmd_buf.uscsi_timeout = 60;
22106 + ucmd_buf.uscsi_timeout = un->un_uscsi_timeout;
22102 22107
22103 22108 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22104 22109 UIO_SYSSPACE, SD_PATH_DIRECT);
22105 22110
22106 22111 /*
22107 22112 * Only handle status == 0, the upper-level caller
22108 22113 * will put different assessment based on the context.
22109 22114 */
22110 22115 if (status == 0) {
22111 22116 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22112 22117
22113 22118 if (ucmd_buf.uscsi_resid != 0) {
22114 22119 status = EIO;
22115 22120 }
22116 22121 }
22117 22122
22118 22123 SD_TRACE(SD_LOG_IO, un,
22119 22124 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
22120 22125
22121 22126 return (status);
22122 22127 }
22123 22128
22124 22129
22125 22130 static boolean_t
22126 22131 sd_gesn_media_data_valid(uchar_t *data)
22127 22132 {
22128 22133 uint16_t len;
22129 22134
22130 22135 len = (data[1] << 8) | data[0];
22131 22136 return ((len >= 6) &&
22132 22137 ((data[2] & SD_GESN_HEADER_NEA) == 0) &&
22133 22138 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) &&
22134 22139 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0));
22135 22140 }
22136 22141
22137 22142
22138 22143 /*
22139 22144 * Function: sdioctl
22140 22145 *
22141 22146 * Description: Driver's ioctl(9e) entry point function.
22142 22147 *
22143 22148 * Arguments: dev - device number
22144 22149 * cmd - ioctl operation to be performed
22145 22150 * arg - user argument, contains data to be set or reference
22146 22151 * parameter for get
22147 22152 * flag - bit flag, indicating open settings, 32/64 bit type
22148 22153 * cred_p - user credential pointer
22149 22154 * rval_p - calling process return value (OPT)
22150 22155 *
22151 22156 * Return Code: EINVAL
22152 22157 * ENOTTY
22153 22158 * ENXIO
22154 22159 * EIO
22155 22160 * EFAULT
22156 22161 * ENOTSUP
22157 22162 * EPERM
22158 22163 *
22159 22164 * Context: Called from the device switch at normal priority.
22160 22165 */
22161 22166
22162 22167 static int
22163 22168 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p)
22164 22169 {
22165 22170 struct sd_lun *un = NULL;
22166 22171 int err = 0;
22167 22172 int i = 0;
22168 22173 cred_t *cr;
22169 22174 int tmprval = EINVAL;
22170 22175 boolean_t is_valid;
22171 22176 sd_ssc_t *ssc;
22172 22177
|
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
22173 22178 /*
22174 22179 * All device accesses go thru sdstrategy where we check on suspend
22175 22180 * status
22176 22181 */
22177 22182 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
22178 22183 return (ENXIO);
22179 22184 }
22180 22185
22181 22186 ASSERT(!mutex_owned(SD_MUTEX(un)));
22182 22187
22183 - /* Initialize sd_ssc_t for internal uscsi commands */
22184 - ssc = sd_ssc_init(un);
22185 -
22186 - is_valid = SD_IS_VALID_LABEL(un);
22187 -
22188 22188 /*
22189 22189 * Moved this wait from sd_uscsi_strategy to here for
22190 22190 * reasons of deadlock prevention. Internal driver commands,
22191 22191 * specifically those to change a devices power level, result
22192 22192 * in a call to sd_uscsi_strategy.
22193 22193 */
22194 22194 mutex_enter(SD_MUTEX(un));
22195 22195 while ((un->un_state == SD_STATE_SUSPENDED) ||
22196 - (un->un_state == SD_STATE_PM_CHANGING)) {
22196 + (un->un_state == SD_STATE_PM_CHANGING) ||
22197 + (un->un_state == SD_STATE_ATTACHING)) {
22197 22198 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
22198 22199 }
22200 +
22201 + if (un->un_state == SD_STATE_ATTACH_FAILED) {
22202 + mutex_exit(SD_MUTEX(un));
22203 + SD_ERROR(SD_LOG_READ_WRITE, un,
22204 + "sdioctl: attach failed\n");
22205 + return (EIO);
22206 + }
22207 +
22199 22208 /*
22200 22209 * Twiddling the counter here protects commands from now
22201 22210 * through to the top of sd_uscsi_strategy. Without the
22202 22211 * counter inc. a power down, for example, could get in
22203 22212 * after the above check for state is made and before
22204 22213 * execution gets to the top of sd_uscsi_strategy.
22205 22214 * That would cause problems.
22206 22215 */
22207 22216 un->un_ncmds_in_driver++;
22217 + mutex_exit(SD_MUTEX(un));
22208 22218
22219 + /* Initialize sd_ssc_t for internal uscsi commands */
22220 + ssc = sd_ssc_init(un);
22221 +
22222 + is_valid = SD_IS_VALID_LABEL(un);
22223 +
22224 + mutex_enter(SD_MUTEX(un));
22225 +
22209 22226 if (!is_valid &&
22210 22227 (flag & (FNDELAY | FNONBLOCK))) {
22211 22228 switch (cmd) {
22212 22229 case DKIOCGGEOM: /* SD_PATH_DIRECT */
22213 22230 case DKIOCGVTOC:
22214 22231 case DKIOCGEXTVTOC:
22215 22232 case DKIOCGAPART:
22216 22233 case DKIOCPARTINFO:
22217 22234 case DKIOCEXTPARTINFO:
22218 22235 case DKIOCSGEOM:
22219 22236 case DKIOCSAPART:
22220 22237 case DKIOCGETEFI:
22221 22238 case DKIOCPARTITION:
22222 22239 case DKIOCSVTOC:
22223 22240 case DKIOCSEXTVTOC:
22224 22241 case DKIOCSETEFI:
22225 22242 case DKIOCGMBOOT:
22226 22243 case DKIOCSMBOOT:
22227 22244 case DKIOCG_PHYGEOM:
22228 22245 case DKIOCG_VIRTGEOM:
22229 -#if defined(__i386) || defined(__amd64)
22230 22246 case DKIOCSETEXTPART:
22231 -#endif
22232 22247 /* let cmlb handle it */
22233 22248 goto skip_ready_valid;
22234 -
22235 22249 case CDROMPAUSE:
22236 22250 case CDROMRESUME:
22237 22251 case CDROMPLAYMSF:
22238 22252 case CDROMPLAYTRKIND:
22239 22253 case CDROMREADTOCHDR:
22240 22254 case CDROMREADTOCENTRY:
22241 22255 case CDROMSTOP:
22242 22256 case CDROMSTART:
22243 22257 case CDROMVOLCTRL:
22244 22258 case CDROMSUBCHNL:
22245 22259 case CDROMREADMODE2:
22246 22260 case CDROMREADMODE1:
22247 22261 case CDROMREADOFFSET:
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
22248 22262 case CDROMSBLKMODE:
22249 22263 case CDROMGBLKMODE:
22250 22264 case CDROMGDRVSPEED:
22251 22265 case CDROMSDRVSPEED:
22252 22266 case CDROMCDDA:
22253 22267 case CDROMCDXA:
22254 22268 case CDROMSUBCODE:
22255 22269 if (!ISCD(un)) {
22256 22270 un->un_ncmds_in_driver--;
22257 22271 ASSERT(un->un_ncmds_in_driver >= 0);
22272 + if (un->un_f_detach_waiting)
22273 + cv_signal(&un->un_detach_cv);
22258 22274 mutex_exit(SD_MUTEX(un));
22259 22275 err = ENOTTY;
22260 22276 goto done_without_assess;
22261 22277 }
22262 22278 break;
22263 22279 case FDEJECT:
22264 22280 case DKIOCEJECT:
22265 22281 case CDROMEJECT:
22266 22282 if (!un->un_f_eject_media_supported) {
22267 22283 un->un_ncmds_in_driver--;
22268 22284 ASSERT(un->un_ncmds_in_driver >= 0);
22285 + if (un->un_f_detach_waiting)
22286 + cv_signal(&un->un_detach_cv);
22269 22287 mutex_exit(SD_MUTEX(un));
22270 22288 err = ENOTTY;
22271 22289 goto done_without_assess;
22272 22290 }
22273 22291 break;
22274 22292 case DKIOCFLUSHWRITECACHE:
22275 22293 mutex_exit(SD_MUTEX(un));
22276 22294 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22277 22295 if (err != 0) {
22278 22296 mutex_enter(SD_MUTEX(un));
22279 22297 un->un_ncmds_in_driver--;
22280 22298 ASSERT(un->un_ncmds_in_driver >= 0);
22299 + if (un->un_f_detach_waiting)
22300 + cv_signal(&un->un_detach_cv);
22281 22301 mutex_exit(SD_MUTEX(un));
22282 22302 err = EIO;
22283 22303 goto done_quick_assess;
22284 22304 }
22285 22305 mutex_enter(SD_MUTEX(un));
22286 22306 /* FALLTHROUGH */
22287 22307 case DKIOCREMOVABLE:
22288 22308 case DKIOCHOTPLUGGABLE:
22289 22309 case DKIOCINFO:
22290 22310 case DKIOCGMEDIAINFO:
22291 22311 case DKIOCGMEDIAINFOEXT:
22292 22312 case DKIOCSOLIDSTATE:
22293 22313 case MHIOCENFAILFAST:
22294 22314 case MHIOCSTATUS:
22295 22315 case MHIOCTKOWN:
22296 22316 case MHIOCRELEASE:
22297 22317 case MHIOCGRP_INKEYS:
22298 22318 case MHIOCGRP_INRESV:
22299 22319 case MHIOCGRP_REGISTER:
22300 22320 case MHIOCGRP_CLEAR:
22301 22321 case MHIOCGRP_RESERVE:
22302 22322 case MHIOCGRP_PREEMPTANDABORT:
22303 22323 case MHIOCGRP_REGISTERANDIGNOREKEY:
22304 22324 case CDROMCLOSETRAY:
22305 22325 case USCSICMD:
22306 22326 goto skip_ready_valid;
22307 22327 default:
22308 22328 break;
22309 22329 }
22310 22330
22311 22331 mutex_exit(SD_MUTEX(un));
22312 22332 err = sd_ready_and_valid(ssc, SDPART(dev));
22313 22333 mutex_enter(SD_MUTEX(un));
22314 22334
22315 22335 if (err != SD_READY_VALID) {
22316 22336 switch (cmd) {
22317 22337 case DKIOCSTATE:
22318 22338 case CDROMGDRVSPEED:
22319 22339 case CDROMSDRVSPEED:
22320 22340 case FDEJECT: /* for eject command */
22321 22341 case DKIOCEJECT:
22322 22342 case CDROMEJECT:
22323 22343 case DKIOCREMOVABLE:
22324 22344 case DKIOCHOTPLUGGABLE:
22325 22345 break;
22326 22346 default:
22327 22347 if (un->un_f_has_removable_media) {
22328 22348 err = ENXIO;
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
22329 22349 } else {
22330 22350 /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22331 22351 if (err == SD_RESERVED_BY_OTHERS) {
22332 22352 err = EACCES;
22333 22353 } else {
22334 22354 err = EIO;
22335 22355 }
22336 22356 }
22337 22357 un->un_ncmds_in_driver--;
22338 22358 ASSERT(un->un_ncmds_in_driver >= 0);
22359 + if (un->un_f_detach_waiting)
22360 + cv_signal(&un->un_detach_cv);
22339 22361 mutex_exit(SD_MUTEX(un));
22340 22362
22341 22363 goto done_without_assess;
22342 22364 }
22343 22365 }
22344 22366 }
22345 22367
22346 22368 skip_ready_valid:
22347 22369 mutex_exit(SD_MUTEX(un));
22348 22370
22349 22371 switch (cmd) {
22350 22372 case DKIOCINFO:
22351 22373 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n");
22352 22374 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag);
22353 22375 break;
22354 22376
22355 22377 case DKIOCGMEDIAINFO:
22356 22378 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n");
22357 22379 err = sd_get_media_info(dev, (caddr_t)arg, flag);
22358 22380 break;
22359 22381
22360 22382 case DKIOCGMEDIAINFOEXT:
22361 22383 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n");
22362 22384 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag);
22363 22385 break;
22364 22386
22365 22387 case DKIOCGGEOM:
22366 22388 case DKIOCGVTOC:
22367 22389 case DKIOCGEXTVTOC:
22368 22390 case DKIOCGAPART:
22369 22391 case DKIOCPARTINFO:
22370 22392 case DKIOCEXTPARTINFO:
22371 22393 case DKIOCSGEOM:
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
22372 22394 case DKIOCSAPART:
22373 22395 case DKIOCGETEFI:
22374 22396 case DKIOCPARTITION:
22375 22397 case DKIOCSVTOC:
22376 22398 case DKIOCSEXTVTOC:
22377 22399 case DKIOCSETEFI:
22378 22400 case DKIOCGMBOOT:
22379 22401 case DKIOCSMBOOT:
22380 22402 case DKIOCG_PHYGEOM:
22381 22403 case DKIOCG_VIRTGEOM:
22382 -#if defined(__i386) || defined(__amd64)
22383 22404 case DKIOCSETEXTPART:
22384 -#endif
22385 22405 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
22386 22406
22387 22407 /* TUR should spin up */
22388 22408
22389 22409 if (un->un_f_has_removable_media)
22390 22410 err = sd_send_scsi_TEST_UNIT_READY(ssc,
22391 22411 SD_CHECK_FOR_MEDIA);
22392 22412
22393 22413 else
22394 22414 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22395 22415
22396 22416 if (err != 0)
22397 22417 goto done_with_assess;
22398 22418
22399 22419 err = cmlb_ioctl(un->un_cmlbhandle, dev,
22400 22420 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT);
22401 22421
22402 22422 if ((err == 0) &&
22403 22423 ((cmd == DKIOCSETEFI) ||
22404 22424 ((un->un_f_pkstats_enabled) &&
22405 22425 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC ||
22406 22426 cmd == DKIOCSEXTVTOC)))) {
22407 22427
22408 22428 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT,
22409 22429 (void *)SD_PATH_DIRECT);
22410 22430 if ((tmprval == 0) && un->un_f_pkstats_enabled) {
22411 22431 sd_set_pstats(un);
22412 22432 SD_TRACE(SD_LOG_IO_PARTITION, un,
22413 22433 "sd_ioctl: un:0x%p pstats created and "
22414 22434 "set\n", un);
22415 22435 }
22416 22436 }
22417 22437
22418 22438 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) ||
22419 22439 ((cmd == DKIOCSETEFI) && (tmprval == 0))) {
22420 22440
22421 22441 mutex_enter(SD_MUTEX(un));
22422 22442 if (un->un_f_devid_supported &&
22423 22443 (un->un_f_opt_fab_devid == TRUE)) {
22424 22444 if (un->un_devid == NULL) {
22425 22445 sd_register_devid(ssc, SD_DEVINFO(un),
22426 22446 SD_TARGET_IS_UNRESERVED);
22427 22447 } else {
22428 22448 /*
22429 22449 * The device id for this disk
22430 22450 * has been fabricated. The
22431 22451 * device id must be preserved
22432 22452 * by writing it back out to
22433 22453 * disk.
22434 22454 */
22435 22455 if (sd_write_deviceid(ssc) != 0) {
22436 22456 ddi_devid_free(un->un_devid);
22437 22457 un->un_devid = NULL;
22438 22458 }
22439 22459 }
22440 22460 }
22441 22461 mutex_exit(SD_MUTEX(un));
22442 22462 }
22443 22463
22444 22464 break;
22445 22465
22446 22466 case DKIOCLOCK:
22447 22467 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n");
22448 22468 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
22449 22469 SD_PATH_STANDARD);
22450 22470 goto done_with_assess;
22451 22471
22452 22472 case DKIOCUNLOCK:
22453 22473 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n");
22454 22474 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
22455 22475 SD_PATH_STANDARD);
22456 22476 goto done_with_assess;
22457 22477
22458 22478 case DKIOCSTATE: {
22459 22479 enum dkio_state state;
22460 22480 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n");
22461 22481
22462 22482 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) {
22463 22483 err = EFAULT;
22464 22484 } else {
22465 22485 err = sd_check_media(dev, state);
22466 22486 if (err == 0) {
22467 22487 if (ddi_copyout(&un->un_mediastate, (void *)arg,
22468 22488 sizeof (int), flag) != 0)
22469 22489 err = EFAULT;
22470 22490 }
22471 22491 }
22472 22492 break;
22473 22493 }
22474 22494
22475 22495 case DKIOCREMOVABLE:
22476 22496 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n");
22477 22497 i = un->un_f_has_removable_media ? 1 : 0;
22478 22498 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22479 22499 err = EFAULT;
22480 22500 } else {
22481 22501 err = 0;
22482 22502 }
22483 22503 break;
22484 22504
22485 22505 case DKIOCSOLIDSTATE:
22486 22506 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n");
22487 22507 i = un->un_f_is_solid_state ? 1 : 0;
22488 22508 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22489 22509 err = EFAULT;
22490 22510 } else {
22491 22511 err = 0;
22492 22512 }
22493 22513 break;
22494 22514
22495 22515 case DKIOCHOTPLUGGABLE:
22496 22516 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n");
22497 22517 i = un->un_f_is_hotpluggable ? 1 : 0;
22498 22518 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22499 22519 err = EFAULT;
22500 22520 } else {
22501 22521 err = 0;
22502 22522 }
22503 22523 break;
22504 22524
22505 22525 case DKIOCREADONLY:
22506 22526 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n");
22507 22527 i = 0;
22508 22528 if ((ISCD(un) && !un->un_f_mmc_writable_media) ||
22509 22529 (sr_check_wp(dev) != 0)) {
22510 22530 i = 1;
22511 22531 }
22512 22532 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22513 22533 err = EFAULT;
22514 22534 } else {
22515 22535 err = 0;
22516 22536 }
22517 22537 break;
22518 22538
22519 22539 case DKIOCGTEMPERATURE:
22520 22540 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n");
22521 22541 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag);
22522 22542 break;
22523 22543
22524 22544 case MHIOCENFAILFAST:
22525 22545 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n");
22526 22546 if ((err = drv_priv(cred_p)) == 0) {
22527 22547 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag);
22528 22548 }
22529 22549 break;
22530 22550
22531 22551 case MHIOCTKOWN:
22532 22552 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n");
22533 22553 if ((err = drv_priv(cred_p)) == 0) {
22534 22554 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag);
22535 22555 }
22536 22556 break;
22537 22557
22538 22558 case MHIOCRELEASE:
22539 22559 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n");
22540 22560 if ((err = drv_priv(cred_p)) == 0) {
22541 22561 err = sd_mhdioc_release(dev);
22542 22562 }
22543 22563 break;
22544 22564
22545 22565 case MHIOCSTATUS:
22546 22566 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n");
22547 22567 if ((err = drv_priv(cred_p)) == 0) {
22548 22568 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) {
22549 22569 case 0:
22550 22570 err = 0;
22551 22571 break;
22552 22572 case EACCES:
22553 22573 *rval_p = 1;
22554 22574 err = 0;
22555 22575 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22556 22576 break;
22557 22577 default:
22558 22578 err = EIO;
22559 22579 goto done_with_assess;
22560 22580 }
22561 22581 }
22562 22582 break;
22563 22583
22564 22584 case MHIOCQRESERVE:
22565 22585 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n");
22566 22586 if ((err = drv_priv(cred_p)) == 0) {
22567 22587 err = sd_reserve_release(dev, SD_RESERVE);
22568 22588 }
22569 22589 break;
22570 22590
22571 22591 case MHIOCREREGISTERDEVID:
22572 22592 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n");
22573 22593 if (drv_priv(cred_p) == EPERM) {
22574 22594 err = EPERM;
22575 22595 } else if (!un->un_f_devid_supported) {
22576 22596 err = ENOTTY;
22577 22597 } else {
22578 22598 err = sd_mhdioc_register_devid(dev);
22579 22599 }
22580 22600 break;
22581 22601
22582 22602 case MHIOCGRP_INKEYS:
22583 22603 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n");
22584 22604 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22585 22605 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22586 22606 err = ENOTSUP;
22587 22607 } else {
22588 22608 err = sd_mhdioc_inkeys(dev, (caddr_t)arg,
22589 22609 flag);
22590 22610 }
22591 22611 }
22592 22612 break;
22593 22613
22594 22614 case MHIOCGRP_INRESV:
22595 22615 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n");
22596 22616 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22597 22617 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22598 22618 err = ENOTSUP;
22599 22619 } else {
22600 22620 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag);
22601 22621 }
22602 22622 }
22603 22623 break;
22604 22624
22605 22625 case MHIOCGRP_REGISTER:
22606 22626 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n");
22607 22627 if ((err = drv_priv(cred_p)) != EPERM) {
22608 22628 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22609 22629 err = ENOTSUP;
22610 22630 } else if (arg != NULL) {
22611 22631 mhioc_register_t reg;
22612 22632 if (ddi_copyin((void *)arg, ®,
22613 22633 sizeof (mhioc_register_t), flag) != 0) {
22614 22634 err = EFAULT;
22615 22635 } else {
22616 22636 err =
22617 22637 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22618 22638 ssc, SD_SCSI3_REGISTER,
22619 22639 (uchar_t *)®);
22620 22640 if (err != 0)
22621 22641 goto done_with_assess;
22622 22642 }
22623 22643 }
22624 22644 }
22625 22645 break;
22626 22646
22627 22647 case MHIOCGRP_CLEAR:
22628 22648 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n");
22629 22649 if ((err = drv_priv(cred_p)) != EPERM) {
22630 22650 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22631 22651 err = ENOTSUP;
22632 22652 } else if (arg != NULL) {
22633 22653 mhioc_register_t reg;
22634 22654 if (ddi_copyin((void *)arg, ®,
22635 22655 sizeof (mhioc_register_t), flag) != 0) {
22636 22656 err = EFAULT;
22637 22657 } else {
22638 22658 err =
22639 22659 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22640 22660 ssc, SD_SCSI3_CLEAR,
22641 22661 (uchar_t *)®);
22642 22662 if (err != 0)
22643 22663 goto done_with_assess;
22644 22664 }
22645 22665 }
22646 22666 }
22647 22667 break;
22648 22668
22649 22669 case MHIOCGRP_RESERVE:
22650 22670 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n");
22651 22671 if ((err = drv_priv(cred_p)) != EPERM) {
22652 22672 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22653 22673 err = ENOTSUP;
22654 22674 } else if (arg != NULL) {
22655 22675 mhioc_resv_desc_t resv_desc;
22656 22676 if (ddi_copyin((void *)arg, &resv_desc,
22657 22677 sizeof (mhioc_resv_desc_t), flag) != 0) {
22658 22678 err = EFAULT;
22659 22679 } else {
22660 22680 err =
22661 22681 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22662 22682 ssc, SD_SCSI3_RESERVE,
22663 22683 (uchar_t *)&resv_desc);
22664 22684 if (err != 0)
22665 22685 goto done_with_assess;
22666 22686 }
22667 22687 }
22668 22688 }
22669 22689 break;
22670 22690
22671 22691 case MHIOCGRP_PREEMPTANDABORT:
22672 22692 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n");
22673 22693 if ((err = drv_priv(cred_p)) != EPERM) {
22674 22694 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22675 22695 err = ENOTSUP;
22676 22696 } else if (arg != NULL) {
22677 22697 mhioc_preemptandabort_t preempt_abort;
22678 22698 if (ddi_copyin((void *)arg, &preempt_abort,
22679 22699 sizeof (mhioc_preemptandabort_t),
22680 22700 flag) != 0) {
22681 22701 err = EFAULT;
22682 22702 } else {
22683 22703 err =
22684 22704 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22685 22705 ssc, SD_SCSI3_PREEMPTANDABORT,
22686 22706 (uchar_t *)&preempt_abort);
22687 22707 if (err != 0)
22688 22708 goto done_with_assess;
22689 22709 }
22690 22710 }
22691 22711 }
22692 22712 break;
22693 22713
22694 22714 case MHIOCGRP_REGISTERANDIGNOREKEY:
22695 22715 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
22696 22716 if ((err = drv_priv(cred_p)) != EPERM) {
22697 22717 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22698 22718 err = ENOTSUP;
22699 22719 } else if (arg != NULL) {
22700 22720 mhioc_registerandignorekey_t r_and_i;
22701 22721 if (ddi_copyin((void *)arg, (void *)&r_and_i,
22702 22722 sizeof (mhioc_registerandignorekey_t),
22703 22723 flag) != 0) {
22704 22724 err = EFAULT;
22705 22725 } else {
22706 22726 err =
22707 22727 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22708 22728 ssc, SD_SCSI3_REGISTERANDIGNOREKEY,
22709 22729 (uchar_t *)&r_and_i);
22710 22730 if (err != 0)
22711 22731 goto done_with_assess;
22712 22732 }
22713 22733 }
22714 22734 }
22715 22735 break;
22716 22736
22717 22737 case USCSICMD:
22718 22738 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n");
22719 22739 cr = ddi_get_cred();
22720 22740 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
22721 22741 err = EPERM;
22722 22742 } else {
22723 22743 enum uio_seg uioseg;
22724 22744
22725 22745 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE :
22726 22746 UIO_USERSPACE;
22727 22747 if (un->un_f_format_in_progress == TRUE) {
22728 22748 err = EAGAIN;
22729 22749 break;
22730 22750 }
22731 22751
22732 22752 err = sd_ssc_send(ssc,
22733 22753 (struct uscsi_cmd *)arg,
22734 22754 flag, uioseg, SD_PATH_STANDARD);
22735 22755 if (err != 0)
22736 22756 goto done_with_assess;
22737 22757 else
22738 22758 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22739 22759 }
22740 22760 break;
22741 22761
22742 22762 case CDROMPAUSE:
22743 22763 case CDROMRESUME:
22744 22764 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n");
22745 22765 if (!ISCD(un)) {
22746 22766 err = ENOTTY;
22747 22767 } else {
22748 22768 err = sr_pause_resume(dev, cmd);
22749 22769 }
22750 22770 break;
22751 22771
22752 22772 case CDROMPLAYMSF:
|
↓ open down ↓ |
358 lines elided |
↑ open up ↑ |
22753 22773 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
22754 22774 if (!ISCD(un)) {
22755 22775 err = ENOTTY;
22756 22776 } else {
22757 22777 err = sr_play_msf(dev, (caddr_t)arg, flag);
22758 22778 }
22759 22779 break;
22760 22780
22761 22781 case CDROMPLAYTRKIND:
22762 22782 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
22763 -#if defined(__i386) || defined(__amd64)
22764 22783 /*
22765 22784 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22766 22785 */
22767 22786 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22768 -#else
22769 - if (!ISCD(un)) {
22770 -#endif
22771 22787 err = ENOTTY;
22772 22788 } else {
22773 22789 err = sr_play_trkind(dev, (caddr_t)arg, flag);
22774 22790 }
22775 22791 break;
22776 22792
22777 22793 case CDROMREADTOCHDR:
22778 22794 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
22779 22795 if (!ISCD(un)) {
22780 22796 err = ENOTTY;
22781 22797 } else {
22782 22798 err = sr_read_tochdr(dev, (caddr_t)arg, flag);
22783 22799 }
22784 22800 break;
22785 22801
22786 22802 case CDROMREADTOCENTRY:
22787 22803 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n");
22788 22804 if (!ISCD(un)) {
22789 22805 err = ENOTTY;
22790 22806 } else {
22791 22807 err = sr_read_tocentry(dev, (caddr_t)arg, flag);
22792 22808 }
22793 22809 break;
22794 22810
22795 22811 case CDROMSTOP:
22796 22812 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n");
22797 22813 if (!ISCD(un)) {
22798 22814 err = ENOTTY;
22799 22815 } else {
22800 22816 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22801 22817 SD_TARGET_STOP, SD_PATH_STANDARD);
22802 22818 goto done_with_assess;
22803 22819 }
22804 22820 break;
22805 22821
22806 22822 case CDROMSTART:
22807 22823 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n");
22808 22824 if (!ISCD(un)) {
22809 22825 err = ENOTTY;
22810 22826 } else {
22811 22827 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22812 22828 SD_TARGET_START, SD_PATH_STANDARD);
22813 22829 goto done_with_assess;
22814 22830 }
22815 22831 break;
22816 22832
22817 22833 case CDROMCLOSETRAY:
22818 22834 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n");
22819 22835 if (!ISCD(un)) {
22820 22836 err = ENOTTY;
22821 22837 } else {
22822 22838 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22823 22839 SD_TARGET_CLOSE, SD_PATH_STANDARD);
22824 22840 goto done_with_assess;
22825 22841 }
22826 22842 break;
22827 22843
22828 22844 case FDEJECT: /* for eject command */
22829 22845 case DKIOCEJECT:
22830 22846 case CDROMEJECT:
22831 22847 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n");
22832 22848 if (!un->un_f_eject_media_supported) {
22833 22849 err = ENOTTY;
22834 22850 } else {
22835 22851 err = sr_eject(dev);
22836 22852 }
22837 22853 break;
22838 22854
22839 22855 case CDROMVOLCTRL:
22840 22856 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n");
22841 22857 if (!ISCD(un)) {
22842 22858 err = ENOTTY;
22843 22859 } else {
22844 22860 err = sr_volume_ctrl(dev, (caddr_t)arg, flag);
22845 22861 }
22846 22862 break;
22847 22863
22848 22864 case CDROMSUBCHNL:
22849 22865 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n");
22850 22866 if (!ISCD(un)) {
22851 22867 err = ENOTTY;
22852 22868 } else {
22853 22869 err = sr_read_subchannel(dev, (caddr_t)arg, flag);
22854 22870 }
22855 22871 break;
22856 22872
22857 22873 case CDROMREADMODE2:
22858 22874 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n");
22859 22875 if (!ISCD(un)) {
22860 22876 err = ENOTTY;
22861 22877 } else if (un->un_f_cfg_is_atapi == TRUE) {
22862 22878 /*
22863 22879 * If the drive supports READ CD, use that instead of
22864 22880 * switching the LBA size via a MODE SELECT
22865 22881 * Block Descriptor
22866 22882 */
22867 22883 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag);
22868 22884 } else {
22869 22885 err = sr_read_mode2(dev, (caddr_t)arg, flag);
22870 22886 }
22871 22887 break;
22872 22888
22873 22889 case CDROMREADMODE1:
22874 22890 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n");
22875 22891 if (!ISCD(un)) {
22876 22892 err = ENOTTY;
22877 22893 } else {
22878 22894 err = sr_read_mode1(dev, (caddr_t)arg, flag);
22879 22895 }
22880 22896 break;
22881 22897
22882 22898 case CDROMREADOFFSET:
22883 22899 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n");
22884 22900 if (!ISCD(un)) {
22885 22901 err = ENOTTY;
22886 22902 } else {
22887 22903 err = sr_read_sony_session_offset(dev, (caddr_t)arg,
22888 22904 flag);
22889 22905 }
22890 22906 break;
22891 22907
22892 22908 case CDROMSBLKMODE:
22893 22909 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n");
22894 22910 /*
22895 22911 * There is no means of changing block size in case of atapi
22896 22912 * drives, thus return ENOTTY if drive type is atapi
22897 22913 */
22898 22914 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22899 22915 err = ENOTTY;
22900 22916 } else if (un->un_f_mmc_cap == TRUE) {
22901 22917
22902 22918 /*
22903 22919 * MMC Devices do not support changing the
22904 22920 * logical block size
22905 22921 *
22906 22922 * Note: EINVAL is being returned instead of ENOTTY to
22907 22923 * maintain consistancy with the original mmc
22908 22924 * driver update.
22909 22925 */
22910 22926 err = EINVAL;
22911 22927 } else {
22912 22928 mutex_enter(SD_MUTEX(un));
22913 22929 if ((!(un->un_exclopen & (1<<SDPART(dev)))) ||
22914 22930 (un->un_ncmds_in_transport > 0)) {
22915 22931 mutex_exit(SD_MUTEX(un));
22916 22932 err = EINVAL;
22917 22933 } else {
22918 22934 mutex_exit(SD_MUTEX(un));
22919 22935 err = sr_change_blkmode(dev, cmd, arg, flag);
22920 22936 }
22921 22937 }
22922 22938 break;
22923 22939
22924 22940 case CDROMGBLKMODE:
22925 22941 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n");
22926 22942 if (!ISCD(un)) {
22927 22943 err = ENOTTY;
22928 22944 } else if ((un->un_f_cfg_is_atapi != FALSE) &&
22929 22945 (un->un_f_blockcount_is_valid != FALSE)) {
22930 22946 /*
22931 22947 * Drive is an ATAPI drive so return target block
22932 22948 * size for ATAPI drives since we cannot change the
22933 22949 * blocksize on ATAPI drives. Used primarily to detect
22934 22950 * if an ATAPI cdrom is present.
22935 22951 */
22936 22952 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg,
22937 22953 sizeof (int), flag) != 0) {
22938 22954 err = EFAULT;
22939 22955 } else {
22940 22956 err = 0;
22941 22957 }
22942 22958
22943 22959 } else {
22944 22960 /*
22945 22961 * Drive supports changing block sizes via a Mode
22946 22962 * Select.
22947 22963 */
22948 22964 err = sr_change_blkmode(dev, cmd, arg, flag);
22949 22965 }
22950 22966 break;
22951 22967
22952 22968 case CDROMGDRVSPEED:
22953 22969 case CDROMSDRVSPEED:
22954 22970 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n");
22955 22971 if (!ISCD(un)) {
22956 22972 err = ENOTTY;
22957 22973 } else if (un->un_f_mmc_cap == TRUE) {
22958 22974 /*
22959 22975 * Note: In the future the driver implementation
22960 22976 * for getting and
22961 22977 * setting cd speed should entail:
22962 22978 * 1) If non-mmc try the Toshiba mode page
22963 22979 * (sr_change_speed)
22964 22980 * 2) If mmc but no support for Real Time Streaming try
22965 22981 * the SET CD SPEED (0xBB) command
22966 22982 * (sr_atapi_change_speed)
22967 22983 * 3) If mmc and support for Real Time Streaming
22968 22984 * try the GET PERFORMANCE and SET STREAMING
22969 22985 * commands (not yet implemented, 4380808)
22970 22986 */
22971 22987 /*
22972 22988 * As per recent MMC spec, CD-ROM speed is variable
22973 22989 * and changes with LBA. Since there is no such
22974 22990 * things as drive speed now, fail this ioctl.
22975 22991 *
22976 22992 * Note: EINVAL is returned for consistancy of original
22977 22993 * implementation which included support for getting
22978 22994 * the drive speed of mmc devices but not setting
22979 22995 * the drive speed. Thus EINVAL would be returned
22980 22996 * if a set request was made for an mmc device.
22981 22997 * We no longer support get or set speed for
22982 22998 * mmc but need to remain consistent with regard
22983 22999 * to the error code returned.
22984 23000 */
22985 23001 err = EINVAL;
22986 23002 } else if (un->un_f_cfg_is_atapi == TRUE) {
22987 23003 err = sr_atapi_change_speed(dev, cmd, arg, flag);
22988 23004 } else {
22989 23005 err = sr_change_speed(dev, cmd, arg, flag);
22990 23006 }
22991 23007 break;
22992 23008
22993 23009 case CDROMCDDA:
22994 23010 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n");
22995 23011 if (!ISCD(un)) {
22996 23012 err = ENOTTY;
22997 23013 } else {
22998 23014 err = sr_read_cdda(dev, (void *)arg, flag);
22999 23015 }
23000 23016 break;
23001 23017
23002 23018 case CDROMCDXA:
23003 23019 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n");
23004 23020 if (!ISCD(un)) {
23005 23021 err = ENOTTY;
23006 23022 } else {
23007 23023 err = sr_read_cdxa(dev, (caddr_t)arg, flag);
23008 23024 }
23009 23025 break;
23010 23026
23011 23027 case CDROMSUBCODE:
23012 23028 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n");
23013 23029 if (!ISCD(un)) {
23014 23030 err = ENOTTY;
23015 23031 } else {
23016 23032 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag);
23017 23033 }
23018 23034 break;
23019 23035
23020 23036
23021 23037 #ifdef SDDEBUG
23022 23038 /* RESET/ABORTS testing ioctls */
23023 23039 case DKIOCRESET: {
23024 23040 int reset_level;
23025 23041
23026 23042 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) {
23027 23043 err = EFAULT;
23028 23044 } else {
23029 23045 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: "
23030 23046 "reset_level = 0x%lx\n", reset_level);
23031 23047 if (scsi_reset(SD_ADDRESS(un), reset_level)) {
23032 23048 err = 0;
23033 23049 } else {
23034 23050 err = EIO;
23035 23051 }
23036 23052 }
23037 23053 break;
23038 23054 }
23039 23055
23040 23056 case DKIOCABORT:
23041 23057 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n");
23042 23058 if (scsi_abort(SD_ADDRESS(un), NULL)) {
23043 23059 err = 0;
23044 23060 } else {
23045 23061 err = EIO;
23046 23062 }
23047 23063 break;
23048 23064 #endif
23049 23065
23050 23066 #ifdef SD_FAULT_INJECTION
|
↓ open down ↓ |
270 lines elided |
↑ open up ↑ |
23051 23067 /* SDIOC FaultInjection testing ioctls */
23052 23068 case SDIOCSTART:
23053 23069 case SDIOCSTOP:
23054 23070 case SDIOCINSERTPKT:
23055 23071 case SDIOCINSERTXB:
23056 23072 case SDIOCINSERTUN:
23057 23073 case SDIOCINSERTARQ:
23058 23074 case SDIOCPUSH:
23059 23075 case SDIOCRETRIEVE:
23060 23076 case SDIOCRUN:
23077 + case SDIOCINSERTTRAN:
23061 23078 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
23062 23079 "SDIOC detected cmd:0x%X:\n", cmd);
23063 23080 /* call error generator */
23064 - sd_faultinjection_ioctl(cmd, arg, un);
23065 - err = 0;
23081 + err = sd_faultinjection_ioctl(cmd, arg, un);
23066 23082 break;
23067 23083
23068 23084 #endif /* SD_FAULT_INJECTION */
23069 23085
23070 23086 case DKIOCFLUSHWRITECACHE:
23071 23087 {
23072 23088 struct dk_callback *dkc = (struct dk_callback *)arg;
23073 23089
23074 23090 mutex_enter(SD_MUTEX(un));
23075 23091 if (!un->un_f_sync_cache_supported ||
23076 23092 !un->un_f_write_cache_enabled) {
23077 23093 err = un->un_f_sync_cache_supported ?
23078 23094 0 : ENOTSUP;
23079 23095 mutex_exit(SD_MUTEX(un));
23080 23096 if ((flag & FKIOCTL) && dkc != NULL &&
23081 23097 dkc->dkc_callback != NULL) {
23082 23098 (*dkc->dkc_callback)(dkc->dkc_cookie,
23083 23099 err);
23084 23100 /*
23085 23101 * Did callback and reported error.
23086 23102 * Since we did a callback, ioctl
23087 23103 * should return 0.
23088 23104 */
23089 23105 err = 0;
23090 23106 }
23091 23107 break;
23092 23108 }
23093 23109 mutex_exit(SD_MUTEX(un));
23094 23110
23095 23111 if ((flag & FKIOCTL) && dkc != NULL &&
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
23096 23112 dkc->dkc_callback != NULL) {
23097 23113 /* async SYNC CACHE request */
23098 23114 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
23099 23115 } else {
23100 23116 /* synchronous SYNC CACHE request */
23101 23117 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
23102 23118 }
23103 23119 }
23104 23120 break;
23105 23121
23122 + case DKIOCFREE:
23123 + {
23124 + dkioc_free_list_t *dfl = (dkioc_free_list_t *)arg;
23125 +
23126 + /* bad userspace ioctls shouldn't panic */
23127 + if (dfl == NULL && !(flag & FKIOCTL)) {
23128 + err = SET_ERROR(EINVAL);
23129 + break;
23130 + }
23131 + /* synchronous UNMAP request */
23132 + err = sd_send_scsi_UNMAP(dev, ssc, dfl, flag);
23133 + }
23134 + break;
23135 +
23106 23136 case DKIOCGETWCE: {
23107 23137
23108 23138 int wce;
23109 23139
23110 23140 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
23111 23141 break;
23112 23142 }
23113 23143
23114 23144 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
23115 23145 err = EFAULT;
23116 23146 }
23117 23147 break;
23118 23148 }
23119 23149
23120 23150 case DKIOCSETWCE: {
23121 23151
23122 23152 int wce, sync_supported;
23123 23153 int cur_wce = 0;
23124 23154
23125 23155 if (!un->un_f_cache_mode_changeable) {
23126 23156 err = EINVAL;
23127 23157 break;
23128 23158 }
23129 23159
23130 23160 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) {
23131 23161 err = EFAULT;
23132 23162 break;
23133 23163 }
23134 23164
23135 23165 /*
23136 23166 * Synchronize multiple threads trying to enable
23137 23167 * or disable the cache via the un_f_wcc_cv
23138 23168 * condition variable.
23139 23169 */
23140 23170 mutex_enter(SD_MUTEX(un));
23141 23171
23142 23172 /*
23143 23173 * Don't allow the cache to be enabled if the
23144 23174 * config file has it disabled.
23145 23175 */
23146 23176 if (un->un_f_opt_disable_cache && wce) {
23147 23177 mutex_exit(SD_MUTEX(un));
23148 23178 err = EINVAL;
23149 23179 break;
23150 23180 }
23151 23181
23152 23182 /*
23153 23183 * Wait for write cache change in progress
23154 23184 * bit to be clear before proceeding.
23155 23185 */
23156 23186 while (un->un_f_wcc_inprog)
23157 23187 cv_wait(&un->un_wcc_cv, SD_MUTEX(un));
23158 23188
23159 23189 un->un_f_wcc_inprog = 1;
23160 23190
23161 23191 mutex_exit(SD_MUTEX(un));
23162 23192
23163 23193 /*
23164 23194 * Get the current write cache state
23165 23195 */
23166 23196 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) {
23167 23197 mutex_enter(SD_MUTEX(un));
23168 23198 un->un_f_wcc_inprog = 0;
23169 23199 cv_broadcast(&un->un_wcc_cv);
23170 23200 mutex_exit(SD_MUTEX(un));
23171 23201 break;
23172 23202 }
23173 23203
23174 23204 mutex_enter(SD_MUTEX(un));
23175 23205 un->un_f_write_cache_enabled = (cur_wce != 0);
23176 23206
23177 23207 if (un->un_f_write_cache_enabled && wce == 0) {
23178 23208 /*
23179 23209 * Disable the write cache. Don't clear
23180 23210 * un_f_write_cache_enabled until after
23181 23211 * the mode select and flush are complete.
23182 23212 */
23183 23213 sync_supported = un->un_f_sync_cache_supported;
23184 23214
23185 23215 /*
23186 23216 * If cache flush is suppressed, we assume that the
23187 23217 * controller firmware will take care of managing the
23188 23218 * write cache for us: no need to explicitly
23189 23219 * disable it.
23190 23220 */
23191 23221 if (!un->un_f_suppress_cache_flush) {
23192 23222 mutex_exit(SD_MUTEX(un));
23193 23223 if ((err = sd_cache_control(ssc,
23194 23224 SD_CACHE_NOCHANGE,
23195 23225 SD_CACHE_DISABLE)) == 0 &&
23196 23226 sync_supported) {
23197 23227 err = sd_send_scsi_SYNCHRONIZE_CACHE(un,
23198 23228 NULL);
23199 23229 }
23200 23230 } else {
23201 23231 mutex_exit(SD_MUTEX(un));
23202 23232 }
23203 23233
23204 23234 mutex_enter(SD_MUTEX(un));
23205 23235 if (err == 0) {
23206 23236 un->un_f_write_cache_enabled = 0;
23207 23237 }
23208 23238
23209 23239 } else if (!un->un_f_write_cache_enabled && wce != 0) {
23210 23240 /*
23211 23241 * Set un_f_write_cache_enabled first, so there is
23212 23242 * no window where the cache is enabled, but the
23213 23243 * bit says it isn't.
23214 23244 */
23215 23245 un->un_f_write_cache_enabled = 1;
23216 23246
23217 23247 /*
23218 23248 * If cache flush is suppressed, we assume that the
23219 23249 * controller firmware will take care of managing the
23220 23250 * write cache for us: no need to explicitly
23221 23251 * enable it.
23222 23252 */
23223 23253 if (!un->un_f_suppress_cache_flush) {
23224 23254 mutex_exit(SD_MUTEX(un));
23225 23255 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE,
23226 23256 SD_CACHE_ENABLE);
23227 23257 } else {
23228 23258 mutex_exit(SD_MUTEX(un));
23229 23259 }
23230 23260
23231 23261 mutex_enter(SD_MUTEX(un));
23232 23262
23233 23263 if (err) {
23234 23264 un->un_f_write_cache_enabled = 0;
23235 23265 }
23236 23266 }
23237 23267
23238 23268 un->un_f_wcc_inprog = 0;
23239 23269 cv_broadcast(&un->un_wcc_cv);
23240 23270 mutex_exit(SD_MUTEX(un));
|
↓ open down ↓ |
125 lines elided |
↑ open up ↑ |
23241 23271 break;
23242 23272 }
23243 23273
23244 23274 default:
23245 23275 err = ENOTTY;
23246 23276 break;
23247 23277 }
23248 23278 mutex_enter(SD_MUTEX(un));
23249 23279 un->un_ncmds_in_driver--;
23250 23280 ASSERT(un->un_ncmds_in_driver >= 0);
23281 + if (un->un_f_detach_waiting)
23282 + cv_signal(&un->un_detach_cv);
23251 23283 mutex_exit(SD_MUTEX(un));
23252 23284
23253 23285
23254 23286 done_without_assess:
23255 23287 sd_ssc_fini(ssc);
23256 23288
23257 23289 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23258 23290 return (err);
23259 23291
23260 23292 done_with_assess:
23261 23293 mutex_enter(SD_MUTEX(un));
23262 23294 un->un_ncmds_in_driver--;
23263 23295 ASSERT(un->un_ncmds_in_driver >= 0);
23296 + if (un->un_f_detach_waiting)
23297 + cv_signal(&un->un_detach_cv);
23264 23298 mutex_exit(SD_MUTEX(un));
23265 23299
23266 23300 done_quick_assess:
23267 23301 if (err != 0)
23268 23302 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23269 23303 /* Uninitialize sd_ssc_t pointer */
23270 23304 sd_ssc_fini(ssc);
23271 23305
23272 23306 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23273 23307 return (err);
23274 23308 }
23275 23309
23276 23310
23277 23311 /*
23278 23312 * Function: sd_dkio_ctrl_info
23279 23313 *
23280 23314 * Description: This routine is the driver entry point for handling controller
23281 23315 * information ioctl requests (DKIOCINFO).
23282 23316 *
23283 23317 * Arguments: dev - the device number
23284 23318 * arg - pointer to user provided dk_cinfo structure
23285 23319 * specifying the controller type and attributes.
23286 23320 * flag - this argument is a pass through to ddi_copyxxx()
23287 23321 * directly from the mode argument of ioctl().
23288 23322 *
23289 23323 * Return Code: 0
23290 23324 * EFAULT
23291 23325 * ENXIO
23292 23326 */
23293 23327
23294 23328 static int
23295 23329 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag)
23296 23330 {
23297 23331 struct sd_lun *un = NULL;
23298 23332 struct dk_cinfo *info;
23299 23333 dev_info_t *pdip;
23300 23334 int lun, tgt;
23301 23335
23302 23336 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23303 23337 return (ENXIO);
23304 23338 }
23305 23339
23306 23340 info = (struct dk_cinfo *)
23307 23341 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
23308 23342
23309 23343 switch (un->un_ctype) {
23310 23344 case CTYPE_CDROM:
23311 23345 info->dki_ctype = DKC_CDROM;
23312 23346 break;
23313 23347 default:
23314 23348 info->dki_ctype = DKC_SCSI_CCS;
23315 23349 break;
23316 23350 }
23317 23351 pdip = ddi_get_parent(SD_DEVINFO(un));
23318 23352 info->dki_cnum = ddi_get_instance(pdip);
23319 23353 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) {
23320 23354 (void) strcpy(info->dki_cname, ddi_get_name(pdip));
23321 23355 } else {
23322 23356 (void) strncpy(info->dki_cname, ddi_node_name(pdip),
23323 23357 DK_DEVLEN - 1);
23324 23358 }
23325 23359
23326 23360 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23327 23361 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
23328 23362 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23329 23363 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0);
23330 23364
23331 23365 /* Unit Information */
23332 23366 info->dki_unit = ddi_get_instance(SD_DEVINFO(un));
23333 23367 info->dki_slave = ((tgt << 3) | lun);
23334 23368 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)),
23335 23369 DK_DEVLEN - 1);
23336 23370 info->dki_flags = DKI_FMTVOL;
23337 23371 info->dki_partition = SDPART(dev);
23338 23372
23339 23373 /* Max Transfer size of this device in blocks */
23340 23374 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize;
23341 23375 info->dki_addr = 0;
23342 23376 info->dki_space = 0;
23343 23377 info->dki_prio = 0;
23344 23378 info->dki_vec = 0;
23345 23379
23346 23380 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) {
23347 23381 kmem_free(info, sizeof (struct dk_cinfo));
23348 23382 return (EFAULT);
23349 23383 } else {
23350 23384 kmem_free(info, sizeof (struct dk_cinfo));
23351 23385 return (0);
23352 23386 }
23353 23387 }
23354 23388
23355 23389 /*
23356 23390 * Function: sd_get_media_info_com
23357 23391 *
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
23358 23392 * Description: This routine returns the information required to populate
23359 23393 * the fields for the dk_minfo/dk_minfo_ext structures.
23360 23394 *
23361 23395 * Arguments: dev - the device number
23362 23396 * dki_media_type - media_type
23363 23397 * dki_lbsize - logical block size
23364 23398 * dki_capacity - capacity in blocks
23365 23399 * dki_pbsize - physical block size (if requested)
23366 23400 *
23367 23401 * Return Code: 0
23368 - * EACCESS
23402 + * EACCES
23369 23403 * EFAULT
23370 23404 * ENXIO
23371 23405 * EIO
23372 23406 */
23373 23407 static int
23374 23408 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
23375 23409 diskaddr_t *dki_capacity, uint_t *dki_pbsize)
23376 23410 {
23377 23411 struct sd_lun *un = NULL;
23378 23412 struct uscsi_cmd com;
23379 23413 struct scsi_inquiry *sinq;
23380 23414 u_longlong_t media_capacity;
23381 23415 uint64_t capacity;
23382 23416 uint_t lbasize;
23383 23417 uint_t pbsize;
23384 23418 uchar_t *out_data;
23385 23419 uchar_t *rqbuf;
23386 23420 int rval = 0;
23387 23421 int rtn;
23388 23422 sd_ssc_t *ssc;
23389 23423
23390 23424 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
23391 23425 (un->un_state == SD_STATE_OFFLINE)) {
23392 23426 return (ENXIO);
23393 23427 }
23394 23428
23395 23429 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n");
23396 23430
23397 23431 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
23398 23432 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
23399 23433 ssc = sd_ssc_init(un);
23400 23434
23401 23435 /* Issue a TUR to determine if the drive is ready with media present */
23402 23436 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA);
23403 23437 if (rval == ENXIO) {
23404 23438 goto done;
23405 23439 } else if (rval != 0) {
23406 23440 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23407 23441 }
23408 23442
23409 23443 /* Now get configuration data */
23410 23444 if (ISCD(un)) {
23411 23445 *dki_media_type = DK_CDROM;
23412 23446
23413 23447 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */
23414 23448 if (un->un_f_mmc_cap == TRUE) {
23415 23449 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf,
23416 23450 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN,
23417 23451 SD_PATH_STANDARD);
23418 23452
23419 23453 if (rtn) {
23420 23454 /*
23421 23455 * We ignore all failures for CD and need to
23422 23456 * put the assessment before processing code
23423 23457 * to avoid missing assessment for FMA.
23424 23458 */
23425 23459 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23426 23460 /*
23427 23461 * Failed for other than an illegal request
23428 23462 * or command not supported
23429 23463 */
23430 23464 if ((com.uscsi_status == STATUS_CHECK) &&
23431 23465 (com.uscsi_rqstatus == STATUS_GOOD)) {
23432 23466 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) ||
23433 23467 (rqbuf[12] != 0x20)) {
23434 23468 rval = EIO;
23435 23469 goto no_assessment;
23436 23470 }
23437 23471 }
23438 23472 } else {
23439 23473 /*
23440 23474 * The GET CONFIGURATION command succeeded
23441 23475 * so set the media type according to the
23442 23476 * returned data
23443 23477 */
23444 23478 *dki_media_type = out_data[6];
23445 23479 *dki_media_type <<= 8;
23446 23480 *dki_media_type |= out_data[7];
23447 23481 }
23448 23482 }
23449 23483 } else {
23450 23484 /*
23451 23485 * The profile list is not available, so we attempt to identify
23452 23486 * the media type based on the inquiry data
23453 23487 */
23454 23488 sinq = un->un_sd->sd_inq;
23455 23489 if ((sinq->inq_dtype == DTYPE_DIRECT) ||
23456 23490 (sinq->inq_dtype == DTYPE_OPTICAL)) {
23457 23491 /* This is a direct access device or optical disk */
23458 23492 *dki_media_type = DK_FIXED_DISK;
23459 23493
23460 23494 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) ||
23461 23495 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) {
23462 23496 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) {
23463 23497 *dki_media_type = DK_ZIP;
23464 23498 } else if (
23465 23499 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) {
23466 23500 *dki_media_type = DK_JAZ;
23467 23501 }
23468 23502 }
23469 23503 } else {
23470 23504 /*
23471 23505 * Not a CD, direct access or optical disk so return
23472 23506 * unknown media
23473 23507 */
23474 23508 *dki_media_type = DK_UNKNOWN;
23475 23509 }
|
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
23476 23510 }
23477 23511
23478 23512 /*
23479 23513 * Now read the capacity so we can provide the lbasize,
23480 23514 * pbsize and capacity.
23481 23515 */
23482 23516 if (dki_pbsize && un->un_f_descr_format_supported) {
23483 23517 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
23484 23518 &pbsize, SD_PATH_DIRECT);
23485 23519
23486 - /*
23487 - * Override the physical blocksize if the instance already
23488 - * has a larger value.
23489 - */
23490 - pbsize = MAX(pbsize, un->un_phy_blocksize);
23520 + if (un->un_f_sdconf_phy_blocksize) /* keep sd.conf's pbs */
23521 + pbsize = un->un_phy_blocksize;
23522 + else /* override the pbs if the instance has a larger value */
23523 + pbsize = MAX(pbsize, un->un_phy_blocksize);
23491 23524 }
23492 23525
23493 23526 if (dki_pbsize == NULL || rval != 0 ||
23494 23527 !un->un_f_descr_format_supported) {
23495 23528 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
23496 23529 SD_PATH_DIRECT);
23497 23530
23498 23531 switch (rval) {
23499 23532 case 0:
23500 23533 if (un->un_f_enable_rmw &&
23501 23534 un->un_phy_blocksize != 0) {
23502 23535 pbsize = un->un_phy_blocksize;
23503 23536 } else {
23504 23537 pbsize = lbasize;
23505 23538 }
23506 23539 media_capacity = capacity;
23507 23540
23508 23541 /*
23509 23542 * sd_send_scsi_READ_CAPACITY() reports capacity in
23510 23543 * un->un_sys_blocksize chunks. So we need to convert
23511 23544 * it into cap.lbsize chunks.
23512 23545 */
23513 23546 if (un->un_f_has_removable_media) {
23514 23547 media_capacity *= un->un_sys_blocksize;
23515 23548 media_capacity /= lbasize;
23516 23549 }
23517 23550 break;
23518 23551 case EACCES:
23519 23552 rval = EACCES;
23520 23553 goto done;
23521 23554 default:
23522 23555 rval = EIO;
23523 23556 goto done;
23524 23557 }
23525 23558 } else {
23526 23559 if (un->un_f_enable_rmw &&
23527 23560 !ISP2(pbsize % DEV_BSIZE)) {
23528 23561 pbsize = SSD_SECSIZE;
23529 23562 } else if (!ISP2(lbasize % DEV_BSIZE) ||
23530 23563 !ISP2(pbsize % DEV_BSIZE)) {
23531 23564 pbsize = lbasize = DEV_BSIZE;
23532 23565 }
23533 23566 media_capacity = capacity;
23534 23567 }
23535 23568
23536 23569 /*
23537 23570 * If lun is expanded dynamically, update the un structure.
23538 23571 */
23539 23572 mutex_enter(SD_MUTEX(un));
23540 23573 if ((un->un_f_blockcount_is_valid == TRUE) &&
23541 23574 (un->un_f_tgt_blocksize_is_valid == TRUE) &&
23542 23575 (capacity > un->un_blockcount)) {
23543 23576 un->un_f_expnevent = B_FALSE;
23544 23577 sd_update_block_info(un, lbasize, capacity);
23545 23578 }
23546 23579 mutex_exit(SD_MUTEX(un));
23547 23580
23548 23581 *dki_lbsize = lbasize;
23549 23582 *dki_capacity = media_capacity;
23550 23583 if (dki_pbsize)
23551 23584 *dki_pbsize = pbsize;
23552 23585
23553 23586 done:
23554 23587 if (rval != 0) {
23555 23588 if (rval == EIO)
23556 23589 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23557 23590 else
23558 23591 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23559 23592 }
23560 23593 no_assessment:
23561 23594 sd_ssc_fini(ssc);
23562 23595 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
23563 23596 kmem_free(rqbuf, SENSE_LENGTH);
23564 23597 return (rval);
23565 23598 }
23566 23599
23567 23600 /*
23568 23601 * Function: sd_get_media_info
23569 23602 *
23570 23603 * Description: This routine is the driver entry point for handling ioctl
23571 23604 * requests for the media type or command set profile used by the
23572 23605 * drive to operate on the media (DKIOCGMEDIAINFO).
23573 23606 *
23574 23607 * Arguments: dev - the device number
23575 23608 * arg - pointer to user provided dk_minfo structure
23576 23609 * specifying the media type, logical block size and
23577 23610 * drive capacity.
23578 23611 * flag - this argument is a pass through to ddi_copyxxx()
23579 23612 * directly from the mode argument of ioctl().
23580 23613 *
23581 23614 * Return Code: returns the value from sd_get_media_info_com
23582 23615 */
23583 23616 static int
23584 23617 sd_get_media_info(dev_t dev, caddr_t arg, int flag)
23585 23618 {
23586 23619 struct dk_minfo mi;
23587 23620 int rval;
23588 23621
23589 23622 rval = sd_get_media_info_com(dev, &mi.dki_media_type,
23590 23623 &mi.dki_lbsize, &mi.dki_capacity, NULL);
23591 23624
23592 23625 if (rval)
23593 23626 return (rval);
23594 23627 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag))
23595 23628 rval = EFAULT;
23596 23629 return (rval);
23597 23630 }
23598 23631
23599 23632 /*
23600 23633 * Function: sd_get_media_info_ext
23601 23634 *
23602 23635 * Description: This routine is the driver entry point for handling ioctl
23603 23636 * requests for the media type or command set profile used by the
23604 23637 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The
23605 23638 * difference this ioctl and DKIOCGMEDIAINFO is the return value
23606 23639 * of this ioctl contains both logical block size and physical
23607 23640 * block size.
23608 23641 *
23609 23642 *
23610 23643 * Arguments: dev - the device number
23611 23644 * arg - pointer to user provided dk_minfo_ext structure
23612 23645 * specifying the media type, logical block size,
23613 23646 * physical block size and disk capacity.
23614 23647 * flag - this argument is a pass through to ddi_copyxxx()
23615 23648 * directly from the mode argument of ioctl().
23616 23649 *
23617 23650 * Return Code: returns the value from sd_get_media_info_com
23618 23651 */
23619 23652 static int
23620 23653 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag)
23621 23654 {
23622 23655 struct dk_minfo_ext mie;
23623 23656 int rval = 0;
23624 23657
23625 23658 rval = sd_get_media_info_com(dev, &mie.dki_media_type,
23626 23659 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize);
23627 23660
23628 23661 if (rval)
23629 23662 return (rval);
23630 23663 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag))
23631 23664 rval = EFAULT;
23632 23665 return (rval);
23633 23666
23634 23667 }
23635 23668
23636 23669 /*
23637 23670 * Function: sd_watch_request_submit
23638 23671 *
23639 23672 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit
23640 23673 * depending on which is supported by device.
23641 23674 */
23642 23675 static opaque_t
23643 23676 sd_watch_request_submit(struct sd_lun *un)
23644 23677 {
23645 23678 dev_t dev;
23646 23679
23647 23680 /* All submissions are unified to use same device number */
23648 23681 dev = sd_make_device(SD_DEVINFO(un));
23649 23682
23650 23683 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23651 23684 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un),
23652 23685 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23653 23686 (caddr_t)dev));
23654 23687 } else {
23655 23688 return (scsi_watch_request_submit(SD_SCSI_DEVP(un),
23656 23689 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23657 23690 (caddr_t)dev));
23658 23691 }
23659 23692 }
23660 23693
23661 23694
23662 23695 /*
23663 23696 * Function: sd_check_media
23664 23697 *
23665 23698 * Description: This utility routine implements the functionality for the
23666 23699 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the
23667 23700 * driver state changes from that specified by the user
23668 23701 * (inserted or ejected). For example, if the user specifies
23669 23702 * DKIO_EJECTED and the current media state is inserted this
23670 23703 * routine will immediately return DKIO_INSERTED. However, if the
23671 23704 * current media state is not inserted the user thread will be
23672 23705 * blocked until the drive state changes. If DKIO_NONE is specified
23673 23706 * the user thread will block until a drive state change occurs.
23674 23707 *
23675 23708 * Arguments: dev - the device number
23676 23709 * state - user pointer to a dkio_state, updated with the current
23677 23710 * drive state at return.
23678 23711 *
23679 23712 * Return Code: ENXIO
23680 23713 * EIO
23681 23714 * EAGAIN
23682 23715 * EINTR
23683 23716 */
23684 23717
23685 23718 static int
23686 23719 sd_check_media(dev_t dev, enum dkio_state state)
23687 23720 {
23688 23721 struct sd_lun *un = NULL;
23689 23722 enum dkio_state prev_state;
23690 23723 opaque_t token = NULL;
23691 23724 int rval = 0;
23692 23725 sd_ssc_t *ssc;
23693 23726
23694 23727 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23695 23728 return (ENXIO);
23696 23729 }
23697 23730
23698 23731 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n");
23699 23732
23700 23733 ssc = sd_ssc_init(un);
23701 23734
23702 23735 mutex_enter(SD_MUTEX(un));
23703 23736
23704 23737 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: "
23705 23738 "state=%x, mediastate=%x\n", state, un->un_mediastate);
23706 23739
23707 23740 prev_state = un->un_mediastate;
23708 23741
23709 23742 /* is there anything to do? */
23710 23743 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) {
23711 23744 /*
23712 23745 * submit the request to the scsi_watch service;
23713 23746 * scsi_media_watch_cb() does the real work
23714 23747 */
23715 23748 mutex_exit(SD_MUTEX(un));
23716 23749
23717 23750 /*
23718 23751 * This change handles the case where a scsi watch request is
23719 23752 * added to a device that is powered down. To accomplish this
23720 23753 * we power up the device before adding the scsi watch request,
23721 23754 * since the scsi watch sends a TUR directly to the device
23722 23755 * which the device cannot handle if it is powered down.
23723 23756 */
23724 23757 if (sd_pm_entry(un) != DDI_SUCCESS) {
23725 23758 mutex_enter(SD_MUTEX(un));
23726 23759 goto done;
23727 23760 }
23728 23761
23729 23762 token = sd_watch_request_submit(un);
23730 23763
23731 23764 sd_pm_exit(un);
23732 23765
23733 23766 mutex_enter(SD_MUTEX(un));
23734 23767 if (token == NULL) {
23735 23768 rval = EAGAIN;
23736 23769 goto done;
23737 23770 }
23738 23771
23739 23772 /*
|
↓ open down ↓ |
239 lines elided |
↑ open up ↑ |
23740 23773 * This is a special case IOCTL that doesn't return
23741 23774 * until the media state changes. Routine sdpower
23742 23775 * knows about and handles this so don't count it
23743 23776 * as an active cmd in the driver, which would
23744 23777 * keep the device busy to the pm framework.
23745 23778 * If the count isn't decremented the device can't
23746 23779 * be powered down.
23747 23780 */
23748 23781 un->un_ncmds_in_driver--;
23749 23782 ASSERT(un->un_ncmds_in_driver >= 0);
23783 + if (un->un_f_detach_waiting)
23784 + cv_signal(&un->un_detach_cv);
23750 23785
23751 23786 /*
23752 23787 * if a prior request had been made, this will be the same
23753 23788 * token, as scsi_watch was designed that way.
23754 23789 */
23755 23790 un->un_swr_token = token;
23756 23791 un->un_specified_mediastate = state;
23757 23792
23758 23793 /*
23759 23794 * now wait for media change
23760 23795 * we will not be signalled unless mediastate == state but it is
23761 23796 * still better to test for this condition, since there is a
23762 23797 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED
23763 23798 */
23764 23799 SD_TRACE(SD_LOG_COMMON, un,
23765 23800 "sd_check_media: waiting for media state change\n");
23766 23801 while (un->un_mediastate == state) {
23767 23802 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) {
23768 23803 SD_TRACE(SD_LOG_COMMON, un,
23769 23804 "sd_check_media: waiting for media state "
23770 23805 "was interrupted\n");
23771 23806 un->un_ncmds_in_driver++;
23772 23807 rval = EINTR;
23773 23808 goto done;
23774 23809 }
23775 23810 SD_TRACE(SD_LOG_COMMON, un,
23776 23811 "sd_check_media: received signal, state=%x\n",
23777 23812 un->un_mediastate);
23778 23813 }
23779 23814 /*
23780 23815 * Inc the counter to indicate the device once again
23781 23816 * has an active outstanding cmd.
23782 23817 */
23783 23818 un->un_ncmds_in_driver++;
23784 23819 }
23785 23820
23786 23821 /* invalidate geometry */
23787 23822 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) {
23788 23823 sr_ejected(un);
23789 23824 }
23790 23825
23791 23826 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) {
23792 23827 uint64_t capacity;
23793 23828 uint_t lbasize;
23794 23829
23795 23830 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n");
23796 23831 mutex_exit(SD_MUTEX(un));
23797 23832 /*
23798 23833 * Since the following routines use SD_PATH_DIRECT, we must
23799 23834 * call PM directly before the upcoming disk accesses. This
23800 23835 * may cause the disk to be power/spin up.
23801 23836 */
23802 23837
23803 23838 if (sd_pm_entry(un) == DDI_SUCCESS) {
23804 23839 rval = sd_send_scsi_READ_CAPACITY(ssc,
23805 23840 &capacity, &lbasize, SD_PATH_DIRECT);
23806 23841 if (rval != 0) {
23807 23842 sd_pm_exit(un);
23808 23843 if (rval == EIO)
23809 23844 sd_ssc_assessment(ssc,
23810 23845 SD_FMT_STATUS_CHECK);
23811 23846 else
23812 23847 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23813 23848 mutex_enter(SD_MUTEX(un));
23814 23849 goto done;
23815 23850 }
23816 23851 } else {
23817 23852 rval = EIO;
23818 23853 mutex_enter(SD_MUTEX(un));
23819 23854 goto done;
23820 23855 }
23821 23856 mutex_enter(SD_MUTEX(un));
23822 23857
23823 23858 sd_update_block_info(un, lbasize, capacity);
23824 23859
23825 23860 /*
23826 23861 * Check if the media in the device is writable or not
23827 23862 */
23828 23863 if (ISCD(un)) {
23829 23864 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
23830 23865 }
23831 23866
23832 23867 mutex_exit(SD_MUTEX(un));
23833 23868 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
23834 23869 if ((cmlb_validate(un->un_cmlbhandle, 0,
23835 23870 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) {
23836 23871 sd_set_pstats(un);
23837 23872 SD_TRACE(SD_LOG_IO_PARTITION, un,
23838 23873 "sd_check_media: un:0x%p pstats created and "
23839 23874 "set\n", un);
23840 23875 }
23841 23876
23842 23877 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
23843 23878 SD_PATH_DIRECT);
23844 23879
23845 23880 sd_pm_exit(un);
23846 23881
23847 23882 if (rval != 0) {
23848 23883 if (rval == EIO)
23849 23884 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23850 23885 else
23851 23886 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23852 23887 }
23853 23888
23854 23889 mutex_enter(SD_MUTEX(un));
23855 23890 }
23856 23891 done:
23857 23892 sd_ssc_fini(ssc);
23858 23893 un->un_f_watcht_stopped = FALSE;
23859 23894 if (token != NULL && un->un_swr_token != NULL) {
23860 23895 /*
23861 23896 * Use of this local token and the mutex ensures that we avoid
23862 23897 * some race conditions associated with terminating the
23863 23898 * scsi watch.
23864 23899 */
23865 23900 token = un->un_swr_token;
23866 23901 mutex_exit(SD_MUTEX(un));
23867 23902 (void) scsi_watch_request_terminate(token,
23868 23903 SCSI_WATCH_TERMINATE_WAIT);
23869 23904 if (scsi_watch_get_ref_count(token) == 0) {
23870 23905 mutex_enter(SD_MUTEX(un));
23871 23906 un->un_swr_token = (opaque_t)NULL;
23872 23907 } else {
23873 23908 mutex_enter(SD_MUTEX(un));
23874 23909 }
23875 23910 }
23876 23911
23877 23912 /*
23878 23913 * Update the capacity kstat value, if no media previously
23879 23914 * (capacity kstat is 0) and a media has been inserted
23880 23915 * (un_f_blockcount_is_valid == TRUE)
23881 23916 */
23882 23917 if (un->un_errstats) {
23883 23918 struct sd_errstats *stp = NULL;
23884 23919
23885 23920 stp = (struct sd_errstats *)un->un_errstats->ks_data;
23886 23921 if ((stp->sd_capacity.value.ui64 == 0) &&
23887 23922 (un->un_f_blockcount_is_valid == TRUE)) {
23888 23923 stp->sd_capacity.value.ui64 =
23889 23924 (uint64_t)((uint64_t)un->un_blockcount *
23890 23925 un->un_sys_blocksize);
23891 23926 }
23892 23927 }
23893 23928 mutex_exit(SD_MUTEX(un));
23894 23929 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n");
23895 23930 return (rval);
23896 23931 }
23897 23932
23898 23933
23899 23934 /*
23900 23935 * Function: sd_delayed_cv_broadcast
23901 23936 *
23902 23937 * Description: Delayed cv_broadcast to allow for target to recover from media
23903 23938 * insertion.
23904 23939 *
23905 23940 * Arguments: arg - driver soft state (unit) structure
23906 23941 */
23907 23942
23908 23943 static void
23909 23944 sd_delayed_cv_broadcast(void *arg)
23910 23945 {
23911 23946 struct sd_lun *un = arg;
23912 23947
23913 23948 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n");
23914 23949
23915 23950 mutex_enter(SD_MUTEX(un));
23916 23951 un->un_dcvb_timeid = NULL;
23917 23952 cv_broadcast(&un->un_state_cv);
23918 23953 mutex_exit(SD_MUTEX(un));
23919 23954 }
23920 23955
23921 23956
23922 23957 /*
23923 23958 * Function: sd_media_watch_cb
23924 23959 *
23925 23960 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This
23926 23961 * routine processes the TUR sense data and updates the driver
23927 23962 * state if a transition has occurred. The user thread
23928 23963 * (sd_check_media) is then signalled.
23929 23964 *
23930 23965 * Arguments: arg - the device 'dev_t' is used for context to discriminate
23931 23966 * among multiple watches that share this callback function
23932 23967 * resultp - scsi watch facility result packet containing scsi
23933 23968 * packet, status byte and sense data
23934 23969 *
23935 23970 * Return Code: 0 for success, -1 for failure
23936 23971 */
23937 23972
23938 23973 static int
23939 23974 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
23940 23975 {
23941 23976 struct sd_lun *un;
23942 23977 struct scsi_status *statusp = resultp->statusp;
23943 23978 uint8_t *sensep = (uint8_t *)resultp->sensep;
23944 23979 enum dkio_state state = DKIO_NONE;
23945 23980 dev_t dev = (dev_t)arg;
23946 23981 uchar_t actual_sense_length;
23947 23982 uint8_t skey, asc, ascq;
23948 23983
23949 23984 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23950 23985 return (-1);
23951 23986 }
23952 23987 actual_sense_length = resultp->actual_sense_length;
23953 23988
23954 23989 mutex_enter(SD_MUTEX(un));
23955 23990 SD_TRACE(SD_LOG_COMMON, un,
23956 23991 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
23957 23992 *((char *)statusp), (void *)sensep, actual_sense_length);
23958 23993
23959 23994 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) {
23960 23995 un->un_mediastate = DKIO_DEV_GONE;
23961 23996 cv_broadcast(&un->un_state_cv);
23962 23997 mutex_exit(SD_MUTEX(un));
23963 23998
23964 23999 return (0);
23965 24000 }
23966 24001
23967 24002 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23968 24003 if (sd_gesn_media_data_valid(resultp->mmc_data)) {
23969 24004 if ((resultp->mmc_data[5] &
23970 24005 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) {
23971 24006 state = DKIO_INSERTED;
23972 24007 } else {
23973 24008 state = DKIO_EJECTED;
23974 24009 }
23975 24010 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) ==
23976 24011 SD_GESN_MEDIA_EVENT_EJECTREQUEST) {
23977 24012 sd_log_eject_request_event(un, KM_NOSLEEP);
23978 24013 }
23979 24014 }
23980 24015 } else if (sensep != NULL) {
23981 24016 /*
23982 24017 * If there was a check condition then sensep points to valid
23983 24018 * sense data. If status was not a check condition but a
23984 24019 * reservation or busy status then the new state is DKIO_NONE.
23985 24020 */
23986 24021 skey = scsi_sense_key(sensep);
23987 24022 asc = scsi_sense_asc(sensep);
23988 24023 ascq = scsi_sense_ascq(sensep);
23989 24024
23990 24025 SD_INFO(SD_LOG_COMMON, un,
23991 24026 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
23992 24027 skey, asc, ascq);
23993 24028 /* This routine only uses up to 13 bytes of sense data. */
23994 24029 if (actual_sense_length >= 13) {
23995 24030 if (skey == KEY_UNIT_ATTENTION) {
23996 24031 if (asc == 0x28) {
23997 24032 state = DKIO_INSERTED;
23998 24033 }
23999 24034 } else if (skey == KEY_NOT_READY) {
24000 24035 /*
24001 24036 * Sense data of 02/06/00 means that the
24002 24037 * drive could not read the media (No
24003 24038 * reference position found). In this case
24004 24039 * to prevent a hang on the DKIOCSTATE IOCTL
24005 24040 * we set the media state to DKIO_INSERTED.
24006 24041 */
24007 24042 if (asc == 0x06 && ascq == 0x00)
24008 24043 state = DKIO_INSERTED;
24009 24044
24010 24045 /*
24011 24046 * if 02/04/02 means that the host
24012 24047 * should send start command. Explicitly
24013 24048 * leave the media state as is
24014 24049 * (inserted) as the media is inserted
24015 24050 * and host has stopped device for PM
24016 24051 * reasons. Upon next true read/write
24017 24052 * to this media will bring the
24018 24053 * device to the right state good for
24019 24054 * media access.
24020 24055 */
24021 24056 if (asc == 0x3a) {
24022 24057 state = DKIO_EJECTED;
24023 24058 } else {
24024 24059 /*
24025 24060 * If the drive is busy with an
24026 24061 * operation or long write, keep the
24027 24062 * media in an inserted state.
24028 24063 */
24029 24064
24030 24065 if ((asc == 0x04) &&
24031 24066 ((ascq == 0x02) ||
24032 24067 (ascq == 0x07) ||
24033 24068 (ascq == 0x08))) {
24034 24069 state = DKIO_INSERTED;
24035 24070 }
24036 24071 }
24037 24072 } else if (skey == KEY_NO_SENSE) {
24038 24073 if ((asc == 0x00) && (ascq == 0x00)) {
24039 24074 /*
24040 24075 * Sense Data 00/00/00 does not provide
24041 24076 * any information about the state of
24042 24077 * the media. Ignore it.
24043 24078 */
24044 24079 mutex_exit(SD_MUTEX(un));
24045 24080 return (0);
24046 24081 }
24047 24082 }
24048 24083 }
24049 24084 } else if ((*((char *)statusp) == STATUS_GOOD) &&
24050 24085 (resultp->pkt->pkt_reason == CMD_CMPLT)) {
24051 24086 state = DKIO_INSERTED;
24052 24087 }
24053 24088
24054 24089 SD_TRACE(SD_LOG_COMMON, un,
24055 24090 "sd_media_watch_cb: state=%x, specified=%x\n",
24056 24091 state, un->un_specified_mediastate);
24057 24092
24058 24093 /*
24059 24094 * now signal the waiting thread if this is *not* the specified state;
24060 24095 * delay the signal if the state is DKIO_INSERTED to allow the target
24061 24096 * to recover
24062 24097 */
24063 24098 if (state != un->un_specified_mediastate) {
24064 24099 un->un_mediastate = state;
24065 24100 if (state == DKIO_INSERTED) {
24066 24101 /*
24067 24102 * delay the signal to give the drive a chance
24068 24103 * to do what it apparently needs to do
24069 24104 */
24070 24105 SD_TRACE(SD_LOG_COMMON, un,
24071 24106 "sd_media_watch_cb: delayed cv_broadcast\n");
24072 24107 if (un->un_dcvb_timeid == NULL) {
24073 24108 un->un_dcvb_timeid =
24074 24109 timeout(sd_delayed_cv_broadcast, un,
24075 24110 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
24076 24111 }
24077 24112 } else {
24078 24113 SD_TRACE(SD_LOG_COMMON, un,
24079 24114 "sd_media_watch_cb: immediate cv_broadcast\n");
24080 24115 cv_broadcast(&un->un_state_cv);
24081 24116 }
24082 24117 }
24083 24118 mutex_exit(SD_MUTEX(un));
24084 24119 return (0);
24085 24120 }
24086 24121
24087 24122
24088 24123 /*
24089 24124 * Function: sd_dkio_get_temp
24090 24125 *
24091 24126 * Description: This routine is the driver entry point for handling ioctl
24092 24127 * requests to get the disk temperature.
24093 24128 *
24094 24129 * Arguments: dev - the device number
24095 24130 * arg - pointer to user provided dk_temperature structure.
24096 24131 * flag - this argument is a pass through to ddi_copyxxx()
24097 24132 * directly from the mode argument of ioctl().
24098 24133 *
24099 24134 * Return Code: 0
24100 24135 * EFAULT
24101 24136 * ENXIO
24102 24137 * EAGAIN
24103 24138 */
24104 24139
24105 24140 static int
24106 24141 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag)
24107 24142 {
24108 24143 struct sd_lun *un = NULL;
24109 24144 struct dk_temperature *dktemp = NULL;
24110 24145 uchar_t *temperature_page;
24111 24146 int rval = 0;
24112 24147 int path_flag = SD_PATH_STANDARD;
24113 24148 sd_ssc_t *ssc;
24114 24149
24115 24150 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24116 24151 return (ENXIO);
24117 24152 }
24118 24153
24119 24154 ssc = sd_ssc_init(un);
24120 24155 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP);
24121 24156
24122 24157 /* copyin the disk temp argument to get the user flags */
24123 24158 if (ddi_copyin((void *)arg, dktemp,
24124 24159 sizeof (struct dk_temperature), flag) != 0) {
24125 24160 rval = EFAULT;
24126 24161 goto done;
24127 24162 }
24128 24163
24129 24164 /* Initialize the temperature to invalid. */
24130 24165 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24131 24166 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24132 24167
24133 24168 /*
24134 24169 * Note: Investigate removing the "bypass pm" semantic.
24135 24170 * Can we just bypass PM always?
24136 24171 */
24137 24172 if (dktemp->dkt_flags & DKT_BYPASS_PM) {
24138 24173 path_flag = SD_PATH_DIRECT;
24139 24174 ASSERT(!mutex_owned(&un->un_pm_mutex));
24140 24175 mutex_enter(&un->un_pm_mutex);
24141 24176 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
24142 24177 /*
24143 24178 * If DKT_BYPASS_PM is set, and the drive happens to be
24144 24179 * in low power mode, we can not wake it up, Need to
24145 24180 * return EAGAIN.
24146 24181 */
24147 24182 mutex_exit(&un->un_pm_mutex);
24148 24183 rval = EAGAIN;
24149 24184 goto done;
24150 24185 } else {
24151 24186 /*
24152 24187 * Indicate to PM the device is busy. This is required
24153 24188 * to avoid a race - i.e. the ioctl is issuing a
24154 24189 * command and the pm framework brings down the device
24155 24190 * to low power mode (possible power cut-off on some
24156 24191 * platforms).
24157 24192 */
24158 24193 mutex_exit(&un->un_pm_mutex);
24159 24194 if (sd_pm_entry(un) != DDI_SUCCESS) {
24160 24195 rval = EAGAIN;
24161 24196 goto done;
24162 24197 }
24163 24198 }
24164 24199 }
24165 24200
24166 24201 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP);
24167 24202
24168 24203 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page,
24169 24204 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag);
24170 24205 if (rval != 0)
24171 24206 goto done2;
24172 24207
24173 24208 /*
24174 24209 * For the current temperature verify that the parameter length is 0x02
24175 24210 * and the parameter code is 0x00
24176 24211 */
24177 24212 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) &&
24178 24213 (temperature_page[5] == 0x00)) {
24179 24214 if (temperature_page[9] == 0xFF) {
24180 24215 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24181 24216 } else {
24182 24217 dktemp->dkt_cur_temp = (short)(temperature_page[9]);
24183 24218 }
24184 24219 }
24185 24220
24186 24221 /*
24187 24222 * For the reference temperature verify that the parameter
24188 24223 * length is 0x02 and the parameter code is 0x01
24189 24224 */
24190 24225 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) &&
24191 24226 (temperature_page[11] == 0x01)) {
24192 24227 if (temperature_page[15] == 0xFF) {
24193 24228 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24194 24229 } else {
24195 24230 dktemp->dkt_ref_temp = (short)(temperature_page[15]);
24196 24231 }
24197 24232 }
24198 24233
24199 24234 /* Do the copyout regardless of the temperature commands status. */
24200 24235 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature),
24201 24236 flag) != 0) {
24202 24237 rval = EFAULT;
24203 24238 goto done1;
24204 24239 }
24205 24240
24206 24241 done2:
24207 24242 if (rval != 0) {
24208 24243 if (rval == EIO)
24209 24244 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24210 24245 else
24211 24246 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24212 24247 }
24213 24248 done1:
24214 24249 if (path_flag == SD_PATH_DIRECT) {
24215 24250 sd_pm_exit(un);
24216 24251 }
24217 24252
24218 24253 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE);
24219 24254 done:
24220 24255 sd_ssc_fini(ssc);
24221 24256 if (dktemp != NULL) {
24222 24257 kmem_free(dktemp, sizeof (struct dk_temperature));
24223 24258 }
24224 24259
24225 24260 return (rval);
24226 24261 }
24227 24262
24228 24263
24229 24264 /*
24230 24265 * Function: sd_log_page_supported
24231 24266 *
24232 24267 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of
|
↓ open down ↓ |
473 lines elided |
↑ open up ↑ |
24233 24268 * supported log pages.
24234 24269 *
24235 24270 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
24236 24271 * structure for this target.
24237 24272 * log_page -
24238 24273 *
24239 24274 * Return Code: -1 - on error (log sense is optional and may not be supported).
24240 24275 * 0 - log page not found.
24241 24276 * 1 - log page found.
24242 24277 */
24243 -
24278 +#ifdef notyet
24244 24279 static int
24245 24280 sd_log_page_supported(sd_ssc_t *ssc, int log_page)
24246 24281 {
24247 24282 uchar_t *log_page_data;
24248 24283 int i;
24249 24284 int match = 0;
24250 24285 int log_size;
24251 24286 int status = 0;
24252 24287 struct sd_lun *un;
24253 24288
24254 24289 ASSERT(ssc != NULL);
24255 24290 un = ssc->ssc_un;
24256 24291 ASSERT(un != NULL);
24257 24292
24258 24293 log_page_data = kmem_zalloc(0xFF, KM_SLEEP);
24259 24294
24260 24295 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0,
24261 24296 SD_PATH_DIRECT);
24262 24297
24263 24298 if (status != 0) {
24264 24299 if (status == EIO) {
24265 24300 /*
24266 24301 * Some disks do not support log sense, we
24267 24302 * should ignore this kind of error(sense key is
24268 24303 * 0x5 - illegal request).
24269 24304 */
24270 24305 uint8_t *sensep;
24271 24306 int senlen;
24272 24307
24273 24308 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
24274 24309 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
24275 24310 ssc->ssc_uscsi_cmd->uscsi_rqresid);
24276 24311
24277 24312 if (senlen > 0 &&
24278 24313 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
24279 24314 sd_ssc_assessment(ssc,
24280 24315 SD_FMT_IGNORE_COMPROMISE);
24281 24316 } else {
24282 24317 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24283 24318 }
24284 24319 } else {
24285 24320 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24286 24321 }
24287 24322
24288 24323 SD_ERROR(SD_LOG_COMMON, un,
24289 24324 "sd_log_page_supported: failed log page retrieval\n");
24290 24325 kmem_free(log_page_data, 0xFF);
24291 24326 return (-1);
24292 24327 }
24293 24328
24294 24329 log_size = log_page_data[3];
24295 24330
24296 24331 /*
24297 24332 * The list of supported log pages start from the fourth byte. Check
|
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
24298 24333 * until we run out of log pages or a match is found.
24299 24334 */
24300 24335 for (i = 4; (i < (log_size + 4)) && !match; i++) {
24301 24336 if (log_page_data[i] == log_page) {
24302 24337 match++;
24303 24338 }
24304 24339 }
24305 24340 kmem_free(log_page_data, 0xFF);
24306 24341 return (match);
24307 24342 }
24343 +#endif
24308 24344
24309 -
24310 24345 /*
24311 24346 * Function: sd_mhdioc_failfast
24312 24347 *
24313 24348 * Description: This routine is the driver entry point for handling ioctl
24314 24349 * requests to enable/disable the multihost failfast option.
24315 24350 * (MHIOCENFAILFAST)
24316 24351 *
24317 24352 * Arguments: dev - the device number
24318 24353 * arg - user specified probing interval.
24319 24354 * flag - this argument is a pass through to ddi_copyxxx()
24320 24355 * directly from the mode argument of ioctl().
24321 24356 *
24322 24357 * Return Code: 0
24323 24358 * EFAULT
24324 24359 * ENXIO
24325 24360 */
24326 24361
24327 24362 static int
24328 24363 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag)
24329 24364 {
24330 24365 struct sd_lun *un = NULL;
24331 24366 int mh_time;
24332 24367 int rval = 0;
24333 24368
24334 24369 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24335 24370 return (ENXIO);
24336 24371 }
24337 24372
24338 24373 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag))
24339 24374 return (EFAULT);
24340 24375
24341 24376 if (mh_time) {
24342 24377 mutex_enter(SD_MUTEX(un));
24343 24378 un->un_resvd_status |= SD_FAILFAST;
24344 24379 mutex_exit(SD_MUTEX(un));
24345 24380 /*
24346 24381 * If mh_time is INT_MAX, then this ioctl is being used for
24347 24382 * SCSI-3 PGR purposes, and we don't need to spawn watch thread.
24348 24383 */
24349 24384 if (mh_time != INT_MAX) {
24350 24385 rval = sd_check_mhd(dev, mh_time);
24351 24386 }
24352 24387 } else {
24353 24388 (void) sd_check_mhd(dev, 0);
24354 24389 mutex_enter(SD_MUTEX(un));
24355 24390 un->un_resvd_status &= ~SD_FAILFAST;
24356 24391 mutex_exit(SD_MUTEX(un));
24357 24392 }
24358 24393 return (rval);
24359 24394 }
24360 24395
24361 24396
24362 24397 /*
24363 24398 * Function: sd_mhdioc_takeown
24364 24399 *
24365 24400 * Description: This routine is the driver entry point for handling ioctl
24366 24401 * requests to forcefully acquire exclusive access rights to the
24367 24402 * multihost disk (MHIOCTKOWN).
24368 24403 *
24369 24404 * Arguments: dev - the device number
24370 24405 * arg - user provided structure specifying the delay
24371 24406 * parameters in milliseconds
24372 24407 * flag - this argument is a pass through to ddi_copyxxx()
24373 24408 * directly from the mode argument of ioctl().
24374 24409 *
24375 24410 * Return Code: 0
24376 24411 * EFAULT
24377 24412 * ENXIO
24378 24413 */
24379 24414
24380 24415 static int
24381 24416 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag)
24382 24417 {
24383 24418 struct sd_lun *un = NULL;
24384 24419 struct mhioctkown *tkown = NULL;
24385 24420 int rval = 0;
24386 24421
24387 24422 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24388 24423 return (ENXIO);
24389 24424 }
24390 24425
24391 24426 if (arg != NULL) {
24392 24427 tkown = (struct mhioctkown *)
24393 24428 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP);
24394 24429 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag);
24395 24430 if (rval != 0) {
24396 24431 rval = EFAULT;
24397 24432 goto error;
24398 24433 }
24399 24434 }
24400 24435
24401 24436 rval = sd_take_ownership(dev, tkown);
24402 24437 mutex_enter(SD_MUTEX(un));
24403 24438 if (rval == 0) {
24404 24439 un->un_resvd_status |= SD_RESERVE;
24405 24440 if (tkown != NULL && tkown->reinstate_resv_delay != 0) {
24406 24441 sd_reinstate_resv_delay =
24407 24442 tkown->reinstate_resv_delay * 1000;
24408 24443 } else {
24409 24444 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
24410 24445 }
24411 24446 /*
24412 24447 * Give the scsi_watch routine interval set by
24413 24448 * the MHIOCENFAILFAST ioctl precedence here.
24414 24449 */
24415 24450 if ((un->un_resvd_status & SD_FAILFAST) == 0) {
24416 24451 mutex_exit(SD_MUTEX(un));
24417 24452 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000);
24418 24453 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24419 24454 "sd_mhdioc_takeown : %d\n",
24420 24455 sd_reinstate_resv_delay);
24421 24456 } else {
24422 24457 mutex_exit(SD_MUTEX(un));
24423 24458 }
24424 24459 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY,
24425 24460 sd_mhd_reset_notify_cb, (caddr_t)un);
24426 24461 } else {
24427 24462 un->un_resvd_status &= ~SD_RESERVE;
24428 24463 mutex_exit(SD_MUTEX(un));
24429 24464 }
24430 24465
24431 24466 error:
24432 24467 if (tkown != NULL) {
24433 24468 kmem_free(tkown, sizeof (struct mhioctkown));
24434 24469 }
24435 24470 return (rval);
24436 24471 }
24437 24472
24438 24473
24439 24474 /*
24440 24475 * Function: sd_mhdioc_release
24441 24476 *
24442 24477 * Description: This routine is the driver entry point for handling ioctl
24443 24478 * requests to release exclusive access rights to the multihost
24444 24479 * disk (MHIOCRELEASE).
24445 24480 *
24446 24481 * Arguments: dev - the device number
24447 24482 *
24448 24483 * Return Code: 0
24449 24484 * ENXIO
24450 24485 */
24451 24486
24452 24487 static int
24453 24488 sd_mhdioc_release(dev_t dev)
24454 24489 {
24455 24490 struct sd_lun *un = NULL;
24456 24491 timeout_id_t resvd_timeid_save;
24457 24492 int resvd_status_save;
24458 24493 int rval = 0;
24459 24494
24460 24495 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24461 24496 return (ENXIO);
24462 24497 }
24463 24498
24464 24499 mutex_enter(SD_MUTEX(un));
24465 24500 resvd_status_save = un->un_resvd_status;
24466 24501 un->un_resvd_status &=
24467 24502 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE);
24468 24503 if (un->un_resvd_timeid) {
24469 24504 resvd_timeid_save = un->un_resvd_timeid;
24470 24505 un->un_resvd_timeid = NULL;
24471 24506 mutex_exit(SD_MUTEX(un));
24472 24507 (void) untimeout(resvd_timeid_save);
24473 24508 } else {
24474 24509 mutex_exit(SD_MUTEX(un));
24475 24510 }
24476 24511
24477 24512 /*
24478 24513 * destroy any pending timeout thread that may be attempting to
24479 24514 * reinstate reservation on this device.
24480 24515 */
24481 24516 sd_rmv_resv_reclaim_req(dev);
24482 24517
24483 24518 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) {
24484 24519 mutex_enter(SD_MUTEX(un));
24485 24520 if ((un->un_mhd_token) &&
24486 24521 ((un->un_resvd_status & SD_FAILFAST) == 0)) {
24487 24522 mutex_exit(SD_MUTEX(un));
24488 24523 (void) sd_check_mhd(dev, 0);
24489 24524 } else {
24490 24525 mutex_exit(SD_MUTEX(un));
24491 24526 }
24492 24527 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
24493 24528 sd_mhd_reset_notify_cb, (caddr_t)un);
24494 24529 } else {
24495 24530 /*
24496 24531 * sd_mhd_watch_cb will restart the resvd recover timeout thread
24497 24532 */
24498 24533 mutex_enter(SD_MUTEX(un));
24499 24534 un->un_resvd_status = resvd_status_save;
24500 24535 mutex_exit(SD_MUTEX(un));
24501 24536 }
24502 24537 return (rval);
24503 24538 }
24504 24539
24505 24540
24506 24541 /*
24507 24542 * Function: sd_mhdioc_register_devid
24508 24543 *
24509 24544 * Description: This routine is the driver entry point for handling ioctl
24510 24545 * requests to register the device id (MHIOCREREGISTERDEVID).
24511 24546 *
24512 24547 * Note: The implementation for this ioctl has been updated to
24513 24548 * be consistent with the original PSARC case (1999/357)
24514 24549 * (4375899, 4241671, 4220005)
24515 24550 *
24516 24551 * Arguments: dev - the device number
24517 24552 *
24518 24553 * Return Code: 0
24519 24554 * ENXIO
24520 24555 */
24521 24556
24522 24557 static int
24523 24558 sd_mhdioc_register_devid(dev_t dev)
24524 24559 {
24525 24560 struct sd_lun *un = NULL;
24526 24561 int rval = 0;
24527 24562 sd_ssc_t *ssc;
24528 24563
24529 24564 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24530 24565 return (ENXIO);
24531 24566 }
24532 24567
24533 24568 ASSERT(!mutex_owned(SD_MUTEX(un)));
24534 24569
24535 24570 mutex_enter(SD_MUTEX(un));
24536 24571
24537 24572 /* If a devid already exists, de-register it */
24538 24573 if (un->un_devid != NULL) {
24539 24574 ddi_devid_unregister(SD_DEVINFO(un));
24540 24575 /*
24541 24576 * After unregister devid, needs to free devid memory
24542 24577 */
24543 24578 ddi_devid_free(un->un_devid);
24544 24579 un->un_devid = NULL;
24545 24580 }
24546 24581
24547 24582 /* Check for reservation conflict */
24548 24583 mutex_exit(SD_MUTEX(un));
24549 24584 ssc = sd_ssc_init(un);
24550 24585 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
24551 24586 mutex_enter(SD_MUTEX(un));
24552 24587
24553 24588 switch (rval) {
24554 24589 case 0:
24555 24590 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED);
24556 24591 break;
24557 24592 case EACCES:
24558 24593 break;
24559 24594 default:
24560 24595 rval = EIO;
24561 24596 }
24562 24597
24563 24598 mutex_exit(SD_MUTEX(un));
24564 24599 if (rval != 0) {
24565 24600 if (rval == EIO)
24566 24601 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24567 24602 else
24568 24603 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24569 24604 }
24570 24605 sd_ssc_fini(ssc);
24571 24606 return (rval);
24572 24607 }
24573 24608
24574 24609
24575 24610 /*
24576 24611 * Function: sd_mhdioc_inkeys
24577 24612 *
24578 24613 * Description: This routine is the driver entry point for handling ioctl
24579 24614 * requests to issue the SCSI-3 Persistent In Read Keys command
24580 24615 * to the device (MHIOCGRP_INKEYS).
24581 24616 *
24582 24617 * Arguments: dev - the device number
24583 24618 * arg - user provided in_keys structure
24584 24619 * flag - this argument is a pass through to ddi_copyxxx()
24585 24620 * directly from the mode argument of ioctl().
24586 24621 *
24587 24622 * Return Code: code returned by sd_persistent_reservation_in_read_keys()
24588 24623 * ENXIO
24589 24624 * EFAULT
24590 24625 */
24591 24626
24592 24627 static int
24593 24628 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag)
24594 24629 {
24595 24630 struct sd_lun *un;
24596 24631 mhioc_inkeys_t inkeys;
24597 24632 int rval = 0;
24598 24633
24599 24634 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24600 24635 return (ENXIO);
24601 24636 }
24602 24637
24603 24638 #ifdef _MULTI_DATAMODEL
24604 24639 switch (ddi_model_convert_from(flag & FMODELS)) {
24605 24640 case DDI_MODEL_ILP32: {
24606 24641 struct mhioc_inkeys32 inkeys32;
24607 24642
24608 24643 if (ddi_copyin(arg, &inkeys32,
24609 24644 sizeof (struct mhioc_inkeys32), flag) != 0) {
24610 24645 return (EFAULT);
24611 24646 }
24612 24647 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li;
24613 24648 if ((rval = sd_persistent_reservation_in_read_keys(un,
24614 24649 &inkeys, flag)) != 0) {
24615 24650 return (rval);
24616 24651 }
24617 24652 inkeys32.generation = inkeys.generation;
24618 24653 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32),
24619 24654 flag) != 0) {
24620 24655 return (EFAULT);
24621 24656 }
24622 24657 break;
24623 24658 }
24624 24659 case DDI_MODEL_NONE:
24625 24660 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t),
24626 24661 flag) != 0) {
24627 24662 return (EFAULT);
24628 24663 }
24629 24664 if ((rval = sd_persistent_reservation_in_read_keys(un,
24630 24665 &inkeys, flag)) != 0) {
24631 24666 return (rval);
24632 24667 }
24633 24668 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t),
24634 24669 flag) != 0) {
24635 24670 return (EFAULT);
24636 24671 }
24637 24672 break;
24638 24673 }
24639 24674
24640 24675 #else /* ! _MULTI_DATAMODEL */
24641 24676
24642 24677 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) {
24643 24678 return (EFAULT);
24644 24679 }
24645 24680 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag);
24646 24681 if (rval != 0) {
24647 24682 return (rval);
24648 24683 }
24649 24684 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) {
24650 24685 return (EFAULT);
24651 24686 }
24652 24687
24653 24688 #endif /* _MULTI_DATAMODEL */
24654 24689
24655 24690 return (rval);
24656 24691 }
24657 24692
24658 24693
24659 24694 /*
24660 24695 * Function: sd_mhdioc_inresv
24661 24696 *
24662 24697 * Description: This routine is the driver entry point for handling ioctl
24663 24698 * requests to issue the SCSI-3 Persistent In Read Reservations
24664 24699 * command to the device (MHIOCGRP_INKEYS).
24665 24700 *
24666 24701 * Arguments: dev - the device number
24667 24702 * arg - user provided in_resv structure
24668 24703 * flag - this argument is a pass through to ddi_copyxxx()
24669 24704 * directly from the mode argument of ioctl().
24670 24705 *
24671 24706 * Return Code: code returned by sd_persistent_reservation_in_read_resv()
24672 24707 * ENXIO
24673 24708 * EFAULT
24674 24709 */
24675 24710
24676 24711 static int
24677 24712 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag)
24678 24713 {
24679 24714 struct sd_lun *un;
24680 24715 mhioc_inresvs_t inresvs;
24681 24716 int rval = 0;
24682 24717
24683 24718 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24684 24719 return (ENXIO);
24685 24720 }
24686 24721
24687 24722 #ifdef _MULTI_DATAMODEL
24688 24723
24689 24724 switch (ddi_model_convert_from(flag & FMODELS)) {
24690 24725 case DDI_MODEL_ILP32: {
24691 24726 struct mhioc_inresvs32 inresvs32;
24692 24727
24693 24728 if (ddi_copyin(arg, &inresvs32,
24694 24729 sizeof (struct mhioc_inresvs32), flag) != 0) {
24695 24730 return (EFAULT);
24696 24731 }
24697 24732 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li;
24698 24733 if ((rval = sd_persistent_reservation_in_read_resv(un,
24699 24734 &inresvs, flag)) != 0) {
24700 24735 return (rval);
24701 24736 }
24702 24737 inresvs32.generation = inresvs.generation;
24703 24738 if (ddi_copyout(&inresvs32, arg,
24704 24739 sizeof (struct mhioc_inresvs32), flag) != 0) {
24705 24740 return (EFAULT);
24706 24741 }
24707 24742 break;
24708 24743 }
24709 24744 case DDI_MODEL_NONE:
24710 24745 if (ddi_copyin(arg, &inresvs,
24711 24746 sizeof (mhioc_inresvs_t), flag) != 0) {
24712 24747 return (EFAULT);
24713 24748 }
24714 24749 if ((rval = sd_persistent_reservation_in_read_resv(un,
24715 24750 &inresvs, flag)) != 0) {
24716 24751 return (rval);
24717 24752 }
24718 24753 if (ddi_copyout(&inresvs, arg,
24719 24754 sizeof (mhioc_inresvs_t), flag) != 0) {
24720 24755 return (EFAULT);
24721 24756 }
24722 24757 break;
24723 24758 }
24724 24759
24725 24760 #else /* ! _MULTI_DATAMODEL */
24726 24761
24727 24762 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) {
24728 24763 return (EFAULT);
24729 24764 }
24730 24765 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag);
24731 24766 if (rval != 0) {
24732 24767 return (rval);
24733 24768 }
24734 24769 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) {
24735 24770 return (EFAULT);
24736 24771 }
24737 24772
24738 24773 #endif /* ! _MULTI_DATAMODEL */
24739 24774
24740 24775 return (rval);
24741 24776 }
24742 24777
24743 24778
24744 24779 /*
24745 24780 * The following routines support the clustering functionality described below
24746 24781 * and implement lost reservation reclaim functionality.
24747 24782 *
24748 24783 * Clustering
24749 24784 * ----------
24750 24785 * The clustering code uses two different, independent forms of SCSI
24751 24786 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3
24752 24787 * Persistent Group Reservations. For any particular disk, it will use either
24753 24788 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk.
24754 24789 *
24755 24790 * SCSI-2
24756 24791 * The cluster software takes ownership of a multi-hosted disk by issuing the
24757 24792 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the
24758 24793 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a
24759 24794 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl
24760 24795 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the
24761 24796 * driver. The meaning of failfast is that if the driver (on this host) ever
24762 24797 * encounters the scsi error return code RESERVATION_CONFLICT from the device,
24763 24798 * it should immediately panic the host. The motivation for this ioctl is that
24764 24799 * if this host does encounter reservation conflict, the underlying cause is
24765 24800 * that some other host of the cluster has decided that this host is no longer
24766 24801 * in the cluster and has seized control of the disks for itself. Since this
24767 24802 * host is no longer in the cluster, it ought to panic itself. The
24768 24803 * MHIOCENFAILFAST ioctl does two things:
24769 24804 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT
24770 24805 * error to panic the host
24771 24806 * (b) it sets up a periodic timer to test whether this host still has
24772 24807 * "access" (in that no other host has reserved the device): if the
24773 24808 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The
24774 24809 * purpose of that periodic timer is to handle scenarios where the host is
24775 24810 * otherwise temporarily quiescent, temporarily doing no real i/o.
24776 24811 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host,
24777 24812 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for
24778 24813 * the device itself.
24779 24814 *
24780 24815 * SCSI-3 PGR
24781 24816 * A direct semantic implementation of the SCSI-3 Persistent Reservation
24782 24817 * facility is supported through the shared multihost disk ioctls
24783 24818 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE,
24784 24819 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR)
24785 24820 *
24786 24821 * Reservation Reclaim:
24787 24822 * --------------------
24788 24823 * To support the lost reservation reclaim operations this driver creates a
24789 24824 * single thread to handle reinstating reservations on all devices that have
24790 24825 * lost reservations sd_resv_reclaim_requests are logged for all devices that
24791 24826 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb
24792 24827 * and the reservation reclaim thread loops through the requests to regain the
24793 24828 * lost reservations.
24794 24829 */
24795 24830
24796 24831 /*
24797 24832 * Function: sd_check_mhd()
24798 24833 *
24799 24834 * Description: This function sets up and submits a scsi watch request or
24800 24835 * terminates an existing watch request. This routine is used in
24801 24836 * support of reservation reclaim.
24802 24837 *
24803 24838 * Arguments: dev - the device 'dev_t' is used for context to discriminate
24804 24839 * among multiple watches that share the callback function
24805 24840 * interval - the number of microseconds specifying the watch
24806 24841 * interval for issuing TEST UNIT READY commands. If
24807 24842 * set to 0 the watch should be terminated. If the
24808 24843 * interval is set to 0 and if the device is required
24809 24844 * to hold reservation while disabling failfast, the
24810 24845 * watch is restarted with an interval of
24811 24846 * reinstate_resv_delay.
24812 24847 *
24813 24848 * Return Code: 0 - Successful submit/terminate of scsi watch request
24814 24849 * ENXIO - Indicates an invalid device was specified
24815 24850 * EAGAIN - Unable to submit the scsi watch request
24816 24851 */
24817 24852
24818 24853 static int
24819 24854 sd_check_mhd(dev_t dev, int interval)
24820 24855 {
24821 24856 struct sd_lun *un;
24822 24857 opaque_t token;
24823 24858
24824 24859 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24825 24860 return (ENXIO);
24826 24861 }
24827 24862
24828 24863 /* is this a watch termination request? */
24829 24864 if (interval == 0) {
24830 24865 mutex_enter(SD_MUTEX(un));
24831 24866 /* if there is an existing watch task then terminate it */
24832 24867 if (un->un_mhd_token) {
24833 24868 token = un->un_mhd_token;
24834 24869 un->un_mhd_token = NULL;
24835 24870 mutex_exit(SD_MUTEX(un));
24836 24871 (void) scsi_watch_request_terminate(token,
24837 24872 SCSI_WATCH_TERMINATE_ALL_WAIT);
24838 24873 mutex_enter(SD_MUTEX(un));
24839 24874 } else {
24840 24875 mutex_exit(SD_MUTEX(un));
24841 24876 /*
24842 24877 * Note: If we return here we don't check for the
24843 24878 * failfast case. This is the original legacy
24844 24879 * implementation but perhaps we should be checking
24845 24880 * the failfast case.
24846 24881 */
24847 24882 return (0);
24848 24883 }
24849 24884 /*
24850 24885 * If the device is required to hold reservation while
24851 24886 * disabling failfast, we need to restart the scsi_watch
24852 24887 * routine with an interval of reinstate_resv_delay.
24853 24888 */
24854 24889 if (un->un_resvd_status & SD_RESERVE) {
24855 24890 interval = sd_reinstate_resv_delay/1000;
24856 24891 } else {
24857 24892 /* no failfast so bail */
24858 24893 mutex_exit(SD_MUTEX(un));
24859 24894 return (0);
24860 24895 }
24861 24896 mutex_exit(SD_MUTEX(un));
24862 24897 }
24863 24898
24864 24899 /*
24865 24900 * adjust minimum time interval to 1 second,
24866 24901 * and convert from msecs to usecs
24867 24902 */
24868 24903 if (interval > 0 && interval < 1000) {
24869 24904 interval = 1000;
24870 24905 }
24871 24906 interval *= 1000;
24872 24907
24873 24908 /*
24874 24909 * submit the request to the scsi_watch service
24875 24910 */
24876 24911 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval,
24877 24912 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev);
24878 24913 if (token == NULL) {
24879 24914 return (EAGAIN);
24880 24915 }
24881 24916
24882 24917 /*
24883 24918 * save token for termination later on
24884 24919 */
24885 24920 mutex_enter(SD_MUTEX(un));
24886 24921 un->un_mhd_token = token;
24887 24922 mutex_exit(SD_MUTEX(un));
24888 24923 return (0);
24889 24924 }
24890 24925
24891 24926
24892 24927 /*
24893 24928 * Function: sd_mhd_watch_cb()
24894 24929 *
24895 24930 * Description: This function is the call back function used by the scsi watch
24896 24931 * facility. The scsi watch facility sends the "Test Unit Ready"
24897 24932 * and processes the status. If applicable (i.e. a "Unit Attention"
24898 24933 * status and automatic "Request Sense" not used) the scsi watch
24899 24934 * facility will send a "Request Sense" and retrieve the sense data
24900 24935 * to be passed to this callback function. In either case the
24901 24936 * automatic "Request Sense" or the facility submitting one, this
24902 24937 * callback is passed the status and sense data.
24903 24938 *
24904 24939 * Arguments: arg - the device 'dev_t' is used for context to discriminate
24905 24940 * among multiple watches that share this callback function
24906 24941 * resultp - scsi watch facility result packet containing scsi
24907 24942 * packet, status byte and sense data
24908 24943 *
24909 24944 * Return Code: 0 - continue the watch task
24910 24945 * non-zero - terminate the watch task
24911 24946 */
24912 24947
24913 24948 static int
24914 24949 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
24915 24950 {
24916 24951 struct sd_lun *un;
24917 24952 struct scsi_status *statusp;
24918 24953 uint8_t *sensep;
24919 24954 struct scsi_pkt *pkt;
24920 24955 uchar_t actual_sense_length;
24921 24956 dev_t dev = (dev_t)arg;
24922 24957
24923 24958 ASSERT(resultp != NULL);
24924 24959 statusp = resultp->statusp;
24925 24960 sensep = (uint8_t *)resultp->sensep;
24926 24961 pkt = resultp->pkt;
24927 24962 actual_sense_length = resultp->actual_sense_length;
24928 24963
24929 24964 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24930 24965 return (ENXIO);
24931 24966 }
24932 24967
24933 24968 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24934 24969 "sd_mhd_watch_cb: reason '%s', status '%s'\n",
24935 24970 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp)));
24936 24971
24937 24972 /* Begin processing of the status and/or sense data */
24938 24973 if (pkt->pkt_reason != CMD_CMPLT) {
24939 24974 /* Handle the incomplete packet */
24940 24975 sd_mhd_watch_incomplete(un, pkt);
24941 24976 return (0);
24942 24977 } else if (*((unsigned char *)statusp) != STATUS_GOOD) {
24943 24978 if (*((unsigned char *)statusp)
24944 24979 == STATUS_RESERVATION_CONFLICT) {
24945 24980 /*
24946 24981 * Handle a reservation conflict by panicking if
24947 24982 * configured for failfast or by logging the conflict
24948 24983 * and updating the reservation status
24949 24984 */
24950 24985 mutex_enter(SD_MUTEX(un));
24951 24986 if ((un->un_resvd_status & SD_FAILFAST) &&
24952 24987 (sd_failfast_enable)) {
24953 24988 sd_panic_for_res_conflict(un);
24954 24989 /*NOTREACHED*/
24955 24990 }
24956 24991 SD_INFO(SD_LOG_IOCTL_MHD, un,
24957 24992 "sd_mhd_watch_cb: Reservation Conflict\n");
24958 24993 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
24959 24994 mutex_exit(SD_MUTEX(un));
24960 24995 }
24961 24996 }
24962 24997
24963 24998 if (sensep != NULL) {
24964 24999 if (actual_sense_length >= (SENSE_LENGTH - 2)) {
24965 25000 mutex_enter(SD_MUTEX(un));
24966 25001 if ((scsi_sense_asc(sensep) ==
24967 25002 SD_SCSI_RESET_SENSE_CODE) &&
24968 25003 (un->un_resvd_status & SD_RESERVE)) {
24969 25004 /*
24970 25005 * The additional sense code indicates a power
24971 25006 * on or bus device reset has occurred; update
24972 25007 * the reservation status.
24973 25008 */
24974 25009 un->un_resvd_status |=
24975 25010 (SD_LOST_RESERVE | SD_WANT_RESERVE);
24976 25011 SD_INFO(SD_LOG_IOCTL_MHD, un,
24977 25012 "sd_mhd_watch_cb: Lost Reservation\n");
24978 25013 }
24979 25014 } else {
24980 25015 return (0);
24981 25016 }
24982 25017 } else {
24983 25018 mutex_enter(SD_MUTEX(un));
24984 25019 }
24985 25020
24986 25021 if ((un->un_resvd_status & SD_RESERVE) &&
24987 25022 (un->un_resvd_status & SD_LOST_RESERVE)) {
24988 25023 if (un->un_resvd_status & SD_WANT_RESERVE) {
24989 25024 /*
24990 25025 * A reset occurred in between the last probe and this
24991 25026 * one so if a timeout is pending cancel it.
24992 25027 */
24993 25028 if (un->un_resvd_timeid) {
24994 25029 timeout_id_t temp_id = un->un_resvd_timeid;
24995 25030 un->un_resvd_timeid = NULL;
24996 25031 mutex_exit(SD_MUTEX(un));
24997 25032 (void) untimeout(temp_id);
24998 25033 mutex_enter(SD_MUTEX(un));
24999 25034 }
25000 25035 un->un_resvd_status &= ~SD_WANT_RESERVE;
25001 25036 }
25002 25037 if (un->un_resvd_timeid == 0) {
25003 25038 /* Schedule a timeout to handle the lost reservation */
25004 25039 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover,
25005 25040 (void *)dev,
25006 25041 drv_usectohz(sd_reinstate_resv_delay));
25007 25042 }
25008 25043 }
25009 25044 mutex_exit(SD_MUTEX(un));
25010 25045 return (0);
25011 25046 }
25012 25047
25013 25048
25014 25049 /*
25015 25050 * Function: sd_mhd_watch_incomplete()
25016 25051 *
25017 25052 * Description: This function is used to find out why a scsi pkt sent by the
25018 25053 * scsi watch facility was not completed. Under some scenarios this
25019 25054 * routine will return. Otherwise it will send a bus reset to see
25020 25055 * if the drive is still online.
25021 25056 *
25022 25057 * Arguments: un - driver soft state (unit) structure
25023 25058 * pkt - incomplete scsi pkt
25024 25059 */
25025 25060
25026 25061 static void
25027 25062 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt)
25028 25063 {
25029 25064 int be_chatty;
25030 25065 int perr;
25031 25066
25032 25067 ASSERT(pkt != NULL);
25033 25068 ASSERT(un != NULL);
25034 25069 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT));
25035 25070 perr = (pkt->pkt_statistics & STAT_PERR);
25036 25071
25037 25072 mutex_enter(SD_MUTEX(un));
25038 25073 if (un->un_state == SD_STATE_DUMPING) {
25039 25074 mutex_exit(SD_MUTEX(un));
25040 25075 return;
25041 25076 }
25042 25077
25043 25078 switch (pkt->pkt_reason) {
25044 25079 case CMD_UNX_BUS_FREE:
25045 25080 /*
25046 25081 * If we had a parity error that caused the target to drop BSY*,
25047 25082 * don't be chatty about it.
25048 25083 */
25049 25084 if (perr && be_chatty) {
25050 25085 be_chatty = 0;
25051 25086 }
25052 25087 break;
25053 25088 case CMD_TAG_REJECT:
25054 25089 /*
25055 25090 * The SCSI-2 spec states that a tag reject will be sent by the
25056 25091 * target if tagged queuing is not supported. A tag reject may
25057 25092 * also be sent during certain initialization periods or to
25058 25093 * control internal resources. For the latter case the target
25059 25094 * may also return Queue Full.
25060 25095 *
25061 25096 * If this driver receives a tag reject from a target that is
25062 25097 * going through an init period or controlling internal
25063 25098 * resources tagged queuing will be disabled. This is a less
25064 25099 * than optimal behavior but the driver is unable to determine
25065 25100 * the target state and assumes tagged queueing is not supported
25066 25101 */
25067 25102 pkt->pkt_flags = 0;
25068 25103 un->un_tagflags = 0;
25069 25104
25070 25105 if (un->un_f_opt_queueing == TRUE) {
25071 25106 un->un_throttle = min(un->un_throttle, 3);
25072 25107 } else {
25073 25108 un->un_throttle = 1;
25074 25109 }
25075 25110 mutex_exit(SD_MUTEX(un));
25076 25111 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
25077 25112 mutex_enter(SD_MUTEX(un));
25078 25113 break;
25079 25114 case CMD_INCOMPLETE:
25080 25115 /*
25081 25116 * The transport stopped with an abnormal state, fallthrough and
25082 25117 * reset the target and/or bus unless selection did not complete
25083 25118 * (indicated by STATE_GOT_BUS) in which case we don't want to
25084 25119 * go through a target/bus reset
25085 25120 */
25086 25121 if (pkt->pkt_state == STATE_GOT_BUS) {
25087 25122 break;
25088 25123 }
25089 25124 /*FALLTHROUGH*/
25090 25125
25091 25126 case CMD_TIMEOUT:
25092 25127 default:
25093 25128 /*
25094 25129 * The lun may still be running the command, so a lun reset
25095 25130 * should be attempted. If the lun reset fails or cannot be
25096 25131 * issued, than try a target reset. Lastly try a bus reset.
25097 25132 */
25098 25133 if ((pkt->pkt_statistics &
25099 25134 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
25100 25135 int reset_retval = 0;
25101 25136 mutex_exit(SD_MUTEX(un));
25102 25137 if (un->un_f_allow_bus_device_reset == TRUE) {
25103 25138 if (un->un_f_lun_reset_enabled == TRUE) {
25104 25139 reset_retval =
25105 25140 scsi_reset(SD_ADDRESS(un),
25106 25141 RESET_LUN);
25107 25142 }
25108 25143 if (reset_retval == 0) {
25109 25144 reset_retval =
25110 25145 scsi_reset(SD_ADDRESS(un),
25111 25146 RESET_TARGET);
25112 25147 }
25113 25148 }
25114 25149 if (reset_retval == 0) {
25115 25150 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
25116 25151 }
25117 25152 mutex_enter(SD_MUTEX(un));
25118 25153 }
25119 25154 break;
25120 25155 }
25121 25156
25122 25157 /* A device/bus reset has occurred; update the reservation status. */
25123 25158 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics &
25124 25159 (STAT_BUS_RESET | STAT_DEV_RESET))) {
25125 25160 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25126 25161 un->un_resvd_status |=
25127 25162 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25128 25163 SD_INFO(SD_LOG_IOCTL_MHD, un,
25129 25164 "sd_mhd_watch_incomplete: Lost Reservation\n");
25130 25165 }
25131 25166 }
25132 25167
25133 25168 /*
25134 25169 * The disk has been turned off; Update the device state.
25135 25170 *
25136 25171 * Note: Should we be offlining the disk here?
25137 25172 */
25138 25173 if (pkt->pkt_state == STATE_GOT_BUS) {
25139 25174 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: "
25140 25175 "Disk not responding to selection\n");
25141 25176 if (un->un_state != SD_STATE_OFFLINE) {
25142 25177 New_state(un, SD_STATE_OFFLINE);
25143 25178 }
25144 25179 } else if (be_chatty) {
25145 25180 /*
25146 25181 * suppress messages if they are all the same pkt reason;
25147 25182 * with TQ, many (up to 256) are returned with the same
25148 25183 * pkt_reason
25149 25184 */
25150 25185 if (pkt->pkt_reason != un->un_last_pkt_reason) {
25151 25186 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25152 25187 "sd_mhd_watch_incomplete: "
25153 25188 "SCSI transport failed: reason '%s'\n",
25154 25189 scsi_rname(pkt->pkt_reason));
25155 25190 }
25156 25191 }
25157 25192 un->un_last_pkt_reason = pkt->pkt_reason;
25158 25193 mutex_exit(SD_MUTEX(un));
25159 25194 }
25160 25195
25161 25196
25162 25197 /*
25163 25198 * Function: sd_sname()
25164 25199 *
25165 25200 * Description: This is a simple little routine to return a string containing
25166 25201 * a printable description of command status byte for use in
25167 25202 * logging.
25168 25203 *
25169 25204 * Arguments: status - pointer to a status byte
25170 25205 *
25171 25206 * Return Code: char * - string containing status description.
25172 25207 */
25173 25208
25174 25209 static char *
25175 25210 sd_sname(uchar_t status)
25176 25211 {
25177 25212 switch (status & STATUS_MASK) {
25178 25213 case STATUS_GOOD:
25179 25214 return ("good status");
25180 25215 case STATUS_CHECK:
25181 25216 return ("check condition");
25182 25217 case STATUS_MET:
25183 25218 return ("condition met");
25184 25219 case STATUS_BUSY:
25185 25220 return ("busy");
25186 25221 case STATUS_INTERMEDIATE:
25187 25222 return ("intermediate");
25188 25223 case STATUS_INTERMEDIATE_MET:
25189 25224 return ("intermediate - condition met");
25190 25225 case STATUS_RESERVATION_CONFLICT:
25191 25226 return ("reservation_conflict");
25192 25227 case STATUS_TERMINATED:
25193 25228 return ("command terminated");
25194 25229 case STATUS_QFULL:
25195 25230 return ("queue full");
25196 25231 default:
25197 25232 return ("<unknown status>");
25198 25233 }
25199 25234 }
25200 25235
25201 25236
25202 25237 /*
25203 25238 * Function: sd_mhd_resvd_recover()
25204 25239 *
25205 25240 * Description: This function adds a reservation entry to the
25206 25241 * sd_resv_reclaim_request list and signals the reservation
25207 25242 * reclaim thread that there is work pending. If the reservation
25208 25243 * reclaim thread has not been previously created this function
25209 25244 * will kick it off.
25210 25245 *
25211 25246 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25212 25247 * among multiple watches that share this callback function
25213 25248 *
25214 25249 * Context: This routine is called by timeout() and is run in interrupt
25215 25250 * context. It must not sleep or call other functions which may
25216 25251 * sleep.
25217 25252 */
25218 25253
25219 25254 static void
25220 25255 sd_mhd_resvd_recover(void *arg)
25221 25256 {
25222 25257 dev_t dev = (dev_t)arg;
25223 25258 struct sd_lun *un;
25224 25259 struct sd_thr_request *sd_treq = NULL;
25225 25260 struct sd_thr_request *sd_cur = NULL;
25226 25261 struct sd_thr_request *sd_prev = NULL;
25227 25262 int already_there = 0;
25228 25263
25229 25264 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25230 25265 return;
25231 25266 }
25232 25267
25233 25268 mutex_enter(SD_MUTEX(un));
25234 25269 un->un_resvd_timeid = NULL;
25235 25270 if (un->un_resvd_status & SD_WANT_RESERVE) {
25236 25271 /*
25237 25272 * There was a reset so don't issue the reserve, allow the
25238 25273 * sd_mhd_watch_cb callback function to notice this and
25239 25274 * reschedule the timeout for reservation.
25240 25275 */
25241 25276 mutex_exit(SD_MUTEX(un));
25242 25277 return;
25243 25278 }
25244 25279 mutex_exit(SD_MUTEX(un));
25245 25280
25246 25281 /*
25247 25282 * Add this device to the sd_resv_reclaim_request list and the
25248 25283 * sd_resv_reclaim_thread should take care of the rest.
25249 25284 *
25250 25285 * Note: We can't sleep in this context so if the memory allocation
25251 25286 * fails allow the sd_mhd_watch_cb callback function to notice this and
25252 25287 * reschedule the timeout for reservation. (4378460)
25253 25288 */
25254 25289 sd_treq = (struct sd_thr_request *)
25255 25290 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP);
25256 25291 if (sd_treq == NULL) {
25257 25292 return;
25258 25293 }
25259 25294
25260 25295 sd_treq->sd_thr_req_next = NULL;
25261 25296 sd_treq->dev = dev;
25262 25297 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25263 25298 if (sd_tr.srq_thr_req_head == NULL) {
25264 25299 sd_tr.srq_thr_req_head = sd_treq;
25265 25300 } else {
25266 25301 sd_cur = sd_prev = sd_tr.srq_thr_req_head;
25267 25302 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) {
25268 25303 if (sd_cur->dev == dev) {
25269 25304 /*
25270 25305 * already in Queue so don't log
25271 25306 * another request for the device
25272 25307 */
25273 25308 already_there = 1;
25274 25309 break;
25275 25310 }
25276 25311 sd_prev = sd_cur;
25277 25312 }
25278 25313 if (!already_there) {
25279 25314 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: "
25280 25315 "logging request for %lx\n", dev);
25281 25316 sd_prev->sd_thr_req_next = sd_treq;
25282 25317 } else {
25283 25318 kmem_free(sd_treq, sizeof (struct sd_thr_request));
25284 25319 }
25285 25320 }
25286 25321
25287 25322 /*
25288 25323 * Create a kernel thread to do the reservation reclaim and free up this
25289 25324 * thread. We cannot block this thread while we go away to do the
25290 25325 * reservation reclaim
25291 25326 */
25292 25327 if (sd_tr.srq_resv_reclaim_thread == NULL)
25293 25328 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0,
25294 25329 sd_resv_reclaim_thread, NULL,
25295 25330 0, &p0, TS_RUN, v.v_maxsyspri - 2);
25296 25331
25297 25332 /* Tell the reservation reclaim thread that it has work to do */
25298 25333 cv_signal(&sd_tr.srq_resv_reclaim_cv);
25299 25334 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25300 25335 }
25301 25336
25302 25337 /*
25303 25338 * Function: sd_resv_reclaim_thread()
25304 25339 *
25305 25340 * Description: This function implements the reservation reclaim operations
25306 25341 *
25307 25342 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25308 25343 * among multiple watches that share this callback function
25309 25344 */
25310 25345
25311 25346 static void
25312 25347 sd_resv_reclaim_thread()
25313 25348 {
25314 25349 struct sd_lun *un;
25315 25350 struct sd_thr_request *sd_mhreq;
25316 25351
25317 25352 /* Wait for work */
25318 25353 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25319 25354 if (sd_tr.srq_thr_req_head == NULL) {
25320 25355 cv_wait(&sd_tr.srq_resv_reclaim_cv,
25321 25356 &sd_tr.srq_resv_reclaim_mutex);
25322 25357 }
25323 25358
25324 25359 /* Loop while we have work */
25325 25360 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) {
25326 25361 un = ddi_get_soft_state(sd_state,
25327 25362 SDUNIT(sd_tr.srq_thr_cur_req->dev));
25328 25363 if (un == NULL) {
25329 25364 /*
25330 25365 * softstate structure is NULL so just
25331 25366 * dequeue the request and continue
25332 25367 */
25333 25368 sd_tr.srq_thr_req_head =
25334 25369 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25335 25370 kmem_free(sd_tr.srq_thr_cur_req,
25336 25371 sizeof (struct sd_thr_request));
25337 25372 continue;
25338 25373 }
25339 25374
25340 25375 /* dequeue the request */
25341 25376 sd_mhreq = sd_tr.srq_thr_cur_req;
25342 25377 sd_tr.srq_thr_req_head =
25343 25378 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25344 25379 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25345 25380
25346 25381 /*
25347 25382 * Reclaim reservation only if SD_RESERVE is still set. There
25348 25383 * may have been a call to MHIOCRELEASE before we got here.
25349 25384 */
25350 25385 mutex_enter(SD_MUTEX(un));
25351 25386 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25352 25387 /*
25353 25388 * Note: The SD_LOST_RESERVE flag is cleared before
25354 25389 * reclaiming the reservation. If this is done after the
25355 25390 * call to sd_reserve_release a reservation loss in the
25356 25391 * window between pkt completion of reserve cmd and
25357 25392 * mutex_enter below may not be recognized
25358 25393 */
25359 25394 un->un_resvd_status &= ~SD_LOST_RESERVE;
25360 25395 mutex_exit(SD_MUTEX(un));
25361 25396
25362 25397 if (sd_reserve_release(sd_mhreq->dev,
25363 25398 SD_RESERVE) == 0) {
25364 25399 mutex_enter(SD_MUTEX(un));
25365 25400 un->un_resvd_status |= SD_RESERVE;
25366 25401 mutex_exit(SD_MUTEX(un));
25367 25402 SD_INFO(SD_LOG_IOCTL_MHD, un,
25368 25403 "sd_resv_reclaim_thread: "
25369 25404 "Reservation Recovered\n");
25370 25405 } else {
25371 25406 mutex_enter(SD_MUTEX(un));
25372 25407 un->un_resvd_status |= SD_LOST_RESERVE;
25373 25408 mutex_exit(SD_MUTEX(un));
25374 25409 SD_INFO(SD_LOG_IOCTL_MHD, un,
25375 25410 "sd_resv_reclaim_thread: Failed "
25376 25411 "Reservation Recovery\n");
25377 25412 }
25378 25413 } else {
25379 25414 mutex_exit(SD_MUTEX(un));
25380 25415 }
25381 25416 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25382 25417 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req);
25383 25418 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25384 25419 sd_mhreq = sd_tr.srq_thr_cur_req = NULL;
25385 25420 /*
25386 25421 * wakeup the destroy thread if anyone is waiting on
25387 25422 * us to complete.
25388 25423 */
25389 25424 cv_signal(&sd_tr.srq_inprocess_cv);
25390 25425 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25391 25426 "sd_resv_reclaim_thread: cv_signalling current request \n");
25392 25427 }
25393 25428
25394 25429 /*
25395 25430 * cleanup the sd_tr structure now that this thread will not exist
25396 25431 */
25397 25432 ASSERT(sd_tr.srq_thr_req_head == NULL);
25398 25433 ASSERT(sd_tr.srq_thr_cur_req == NULL);
25399 25434 sd_tr.srq_resv_reclaim_thread = NULL;
25400 25435 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25401 25436 thread_exit();
25402 25437 }
25403 25438
25404 25439
25405 25440 /*
25406 25441 * Function: sd_rmv_resv_reclaim_req()
25407 25442 *
25408 25443 * Description: This function removes any pending reservation reclaim requests
25409 25444 * for the specified device.
25410 25445 *
25411 25446 * Arguments: dev - the device 'dev_t'
25412 25447 */
25413 25448
25414 25449 static void
25415 25450 sd_rmv_resv_reclaim_req(dev_t dev)
25416 25451 {
25417 25452 struct sd_thr_request *sd_mhreq;
25418 25453 struct sd_thr_request *sd_prev;
25419 25454
25420 25455 /* Remove a reservation reclaim request from the list */
25421 25456 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25422 25457 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) {
25423 25458 /*
25424 25459 * We are attempting to reinstate reservation for
25425 25460 * this device. We wait for sd_reserve_release()
25426 25461 * to return before we return.
25427 25462 */
25428 25463 cv_wait(&sd_tr.srq_inprocess_cv,
25429 25464 &sd_tr.srq_resv_reclaim_mutex);
25430 25465 } else {
25431 25466 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head;
25432 25467 if (sd_mhreq && sd_mhreq->dev == dev) {
25433 25468 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next;
25434 25469 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25435 25470 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25436 25471 return;
25437 25472 }
25438 25473 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) {
25439 25474 if (sd_mhreq && sd_mhreq->dev == dev) {
25440 25475 break;
25441 25476 }
25442 25477 sd_prev = sd_mhreq;
25443 25478 }
25444 25479 if (sd_mhreq != NULL) {
25445 25480 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next;
25446 25481 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25447 25482 }
25448 25483 }
25449 25484 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25450 25485 }
25451 25486
25452 25487
25453 25488 /*
25454 25489 * Function: sd_mhd_reset_notify_cb()
25455 25490 *
25456 25491 * Description: This is a call back function for scsi_reset_notify. This
25457 25492 * function updates the softstate reserved status and logs the
25458 25493 * reset. The driver scsi watch facility callback function
25459 25494 * (sd_mhd_watch_cb) and reservation reclaim thread functionality
25460 25495 * will reclaim the reservation.
25461 25496 *
25462 25497 * Arguments: arg - driver soft state (unit) structure
25463 25498 */
25464 25499
25465 25500 static void
25466 25501 sd_mhd_reset_notify_cb(caddr_t arg)
25467 25502 {
25468 25503 struct sd_lun *un = (struct sd_lun *)arg;
25469 25504
25470 25505 mutex_enter(SD_MUTEX(un));
25471 25506 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25472 25507 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE);
25473 25508 SD_INFO(SD_LOG_IOCTL_MHD, un,
25474 25509 "sd_mhd_reset_notify_cb: Lost Reservation\n");
25475 25510 }
25476 25511 mutex_exit(SD_MUTEX(un));
25477 25512 }
25478 25513
25479 25514
25480 25515 /*
25481 25516 * Function: sd_take_ownership()
25482 25517 *
25483 25518 * Description: This routine implements an algorithm to achieve a stable
25484 25519 * reservation on disks which don't implement priority reserve,
25485 25520 * and makes sure that other host lose re-reservation attempts.
25486 25521 * This algorithm contains of a loop that keeps issuing the RESERVE
25487 25522 * for some period of time (min_ownership_delay, default 6 seconds)
25488 25523 * During that loop, it looks to see if there has been a bus device
25489 25524 * reset or bus reset (both of which cause an existing reservation
25490 25525 * to be lost). If the reservation is lost issue RESERVE until a
25491 25526 * period of min_ownership_delay with no resets has gone by, or
25492 25527 * until max_ownership_delay has expired. This loop ensures that
25493 25528 * the host really did manage to reserve the device, in spite of
25494 25529 * resets. The looping for min_ownership_delay (default six
25495 25530 * seconds) is important to early generation clustering products,
25496 25531 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an
25497 25532 * MHIOCENFAILFAST periodic timer of two seconds. By having
25498 25533 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having
25499 25534 * MHIOCENFAILFAST poll every two seconds, the idea is that by the
25500 25535 * time the MHIOCTKOWN ioctl returns, the other host (if any) will
25501 25536 * have already noticed, via the MHIOCENFAILFAST polling, that it
25502 25537 * no longer "owns" the disk and will have panicked itself. Thus,
25503 25538 * the host issuing the MHIOCTKOWN is assured (with timing
25504 25539 * dependencies) that by the time it actually starts to use the
25505 25540 * disk for real work, the old owner is no longer accessing it.
25506 25541 *
25507 25542 * min_ownership_delay is the minimum amount of time for which the
25508 25543 * disk must be reserved continuously devoid of resets before the
25509 25544 * MHIOCTKOWN ioctl will return success.
25510 25545 *
25511 25546 * max_ownership_delay indicates the amount of time by which the
25512 25547 * take ownership should succeed or timeout with an error.
25513 25548 *
25514 25549 * Arguments: dev - the device 'dev_t'
25515 25550 * *p - struct containing timing info.
25516 25551 *
25517 25552 * Return Code: 0 for success or error code
25518 25553 */
25519 25554
25520 25555 static int
25521 25556 sd_take_ownership(dev_t dev, struct mhioctkown *p)
25522 25557 {
25523 25558 struct sd_lun *un;
25524 25559 int rval;
25525 25560 int err;
25526 25561 int reservation_count = 0;
25527 25562 int min_ownership_delay = 6000000; /* in usec */
25528 25563 int max_ownership_delay = 30000000; /* in usec */
25529 25564 clock_t start_time; /* starting time of this algorithm */
25530 25565 clock_t end_time; /* time limit for giving up */
25531 25566 clock_t ownership_time; /* time limit for stable ownership */
25532 25567 clock_t current_time;
25533 25568 clock_t previous_current_time;
25534 25569
25535 25570 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25536 25571 return (ENXIO);
25537 25572 }
25538 25573
25539 25574 /*
25540 25575 * Attempt a device reservation. A priority reservation is requested.
25541 25576 */
25542 25577 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE))
25543 25578 != SD_SUCCESS) {
25544 25579 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25545 25580 "sd_take_ownership: return(1)=%d\n", rval);
25546 25581 return (rval);
25547 25582 }
25548 25583
25549 25584 /* Update the softstate reserved status to indicate the reservation */
25550 25585 mutex_enter(SD_MUTEX(un));
25551 25586 un->un_resvd_status |= SD_RESERVE;
25552 25587 un->un_resvd_status &=
25553 25588 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT);
25554 25589 mutex_exit(SD_MUTEX(un));
25555 25590
25556 25591 if (p != NULL) {
25557 25592 if (p->min_ownership_delay != 0) {
25558 25593 min_ownership_delay = p->min_ownership_delay * 1000;
25559 25594 }
25560 25595 if (p->max_ownership_delay != 0) {
25561 25596 max_ownership_delay = p->max_ownership_delay * 1000;
25562 25597 }
25563 25598 }
25564 25599 SD_INFO(SD_LOG_IOCTL_MHD, un,
25565 25600 "sd_take_ownership: min, max delays: %d, %d\n",
25566 25601 min_ownership_delay, max_ownership_delay);
25567 25602
25568 25603 start_time = ddi_get_lbolt();
25569 25604 current_time = start_time;
25570 25605 ownership_time = current_time + drv_usectohz(min_ownership_delay);
25571 25606 end_time = start_time + drv_usectohz(max_ownership_delay);
25572 25607
25573 25608 while (current_time - end_time < 0) {
25574 25609 delay(drv_usectohz(500000));
25575 25610
25576 25611 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) {
25577 25612 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) {
25578 25613 mutex_enter(SD_MUTEX(un));
25579 25614 rval = (un->un_resvd_status &
25580 25615 SD_RESERVATION_CONFLICT) ? EACCES : EIO;
25581 25616 mutex_exit(SD_MUTEX(un));
25582 25617 break;
25583 25618 }
25584 25619 }
25585 25620 previous_current_time = current_time;
25586 25621 current_time = ddi_get_lbolt();
25587 25622 mutex_enter(SD_MUTEX(un));
25588 25623 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) {
25589 25624 ownership_time = ddi_get_lbolt() +
25590 25625 drv_usectohz(min_ownership_delay);
25591 25626 reservation_count = 0;
25592 25627 } else {
25593 25628 reservation_count++;
25594 25629 }
25595 25630 un->un_resvd_status |= SD_RESERVE;
25596 25631 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE);
25597 25632 mutex_exit(SD_MUTEX(un));
25598 25633
25599 25634 SD_INFO(SD_LOG_IOCTL_MHD, un,
25600 25635 "sd_take_ownership: ticks for loop iteration=%ld, "
25601 25636 "reservation=%s\n", (current_time - previous_current_time),
25602 25637 reservation_count ? "ok" : "reclaimed");
25603 25638
25604 25639 if (current_time - ownership_time >= 0 &&
25605 25640 reservation_count >= 4) {
25606 25641 rval = 0; /* Achieved a stable ownership */
25607 25642 break;
25608 25643 }
25609 25644 if (current_time - end_time >= 0) {
25610 25645 rval = EACCES; /* No ownership in max possible time */
25611 25646 break;
25612 25647 }
25613 25648 }
25614 25649 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25615 25650 "sd_take_ownership: return(2)=%d\n", rval);
25616 25651 return (rval);
25617 25652 }
25618 25653
25619 25654
25620 25655 /*
25621 25656 * Function: sd_reserve_release()
25622 25657 *
25623 25658 * Description: This function builds and sends scsi RESERVE, RELEASE, and
25624 25659 * PRIORITY RESERVE commands based on a user specified command type
25625 25660 *
25626 25661 * Arguments: dev - the device 'dev_t'
25627 25662 * cmd - user specified command type; one of SD_PRIORITY_RESERVE,
25628 25663 * SD_RESERVE, SD_RELEASE
25629 25664 *
25630 25665 * Return Code: 0 or Error Code
25631 25666 */
25632 25667
25633 25668 static int
25634 25669 sd_reserve_release(dev_t dev, int cmd)
25635 25670 {
25636 25671 struct uscsi_cmd *com = NULL;
25637 25672 struct sd_lun *un = NULL;
25638 25673 char cdb[CDB_GROUP0];
25639 25674 int rval;
25640 25675
25641 25676 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) ||
25642 25677 (cmd == SD_PRIORITY_RESERVE));
25643 25678
25644 25679 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25645 25680 return (ENXIO);
25646 25681 }
25647 25682
25648 25683 /* instantiate and initialize the command and cdb */
25649 25684 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
25650 25685 bzero(cdb, CDB_GROUP0);
25651 25686 com->uscsi_flags = USCSI_SILENT;
25652 25687 com->uscsi_timeout = un->un_reserve_release_time;
25653 25688 com->uscsi_cdblen = CDB_GROUP0;
25654 25689 com->uscsi_cdb = cdb;
25655 25690 if (cmd == SD_RELEASE) {
25656 25691 cdb[0] = SCMD_RELEASE;
25657 25692 } else {
25658 25693 cdb[0] = SCMD_RESERVE;
25659 25694 }
25660 25695
25661 25696 /* Send the command. */
25662 25697 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25663 25698 SD_PATH_STANDARD);
25664 25699
25665 25700 /*
25666 25701 * "break" a reservation that is held by another host, by issuing a
25667 25702 * reset if priority reserve is desired, and we could not get the
25668 25703 * device.
25669 25704 */
25670 25705 if ((cmd == SD_PRIORITY_RESERVE) &&
25671 25706 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25672 25707 /*
25673 25708 * First try to reset the LUN. If we cannot, then try a target
25674 25709 * reset, followed by a bus reset if the target reset fails.
25675 25710 */
25676 25711 int reset_retval = 0;
25677 25712 if (un->un_f_lun_reset_enabled == TRUE) {
25678 25713 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
25679 25714 }
25680 25715 if (reset_retval == 0) {
25681 25716 /* The LUN reset either failed or was not issued */
25682 25717 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
25683 25718 }
25684 25719 if ((reset_retval == 0) &&
25685 25720 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) {
25686 25721 rval = EIO;
25687 25722 kmem_free(com, sizeof (*com));
25688 25723 return (rval);
25689 25724 }
25690 25725
25691 25726 bzero(com, sizeof (struct uscsi_cmd));
25692 25727 com->uscsi_flags = USCSI_SILENT;
25693 25728 com->uscsi_cdb = cdb;
25694 25729 com->uscsi_cdblen = CDB_GROUP0;
25695 25730 com->uscsi_timeout = 5;
25696 25731
25697 25732 /*
25698 25733 * Reissue the last reserve command, this time without request
25699 25734 * sense. Assume that it is just a regular reserve command.
25700 25735 */
25701 25736 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25702 25737 SD_PATH_STANDARD);
25703 25738 }
25704 25739
25705 25740 /* Return an error if still getting a reservation conflict. */
25706 25741 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25707 25742 rval = EACCES;
25708 25743 }
25709 25744
25710 25745 kmem_free(com, sizeof (*com));
25711 25746 return (rval);
25712 25747 }
25713 25748
25714 25749
25715 25750 #define SD_NDUMP_RETRIES 12
25716 25751 /*
25717 25752 * System Crash Dump routine
25718 25753 */
25719 25754
25720 25755 static int
25721 25756 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
25722 25757 {
25723 25758 int instance;
25724 25759 int partition;
25725 25760 int i;
25726 25761 int err;
25727 25762 struct sd_lun *un;
25728 25763 struct scsi_pkt *wr_pktp;
25729 25764 struct buf *wr_bp;
25730 25765 struct buf wr_buf;
25731 25766 daddr_t tgt_byte_offset; /* rmw - byte offset for target */
25732 25767 daddr_t tgt_blkno; /* rmw - blkno for target */
25733 25768 size_t tgt_byte_count; /* rmw - # of bytes to xfer */
25734 25769 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */
|
↓ open down ↓ |
1415 lines elided |
↑ open up ↑ |
25735 25770 size_t io_start_offset;
25736 25771 int doing_rmw = FALSE;
25737 25772 int rval;
25738 25773 ssize_t dma_resid;
25739 25774 daddr_t oblkno;
25740 25775 diskaddr_t nblks = 0;
25741 25776 diskaddr_t start_block;
25742 25777
25743 25778 instance = SDUNIT(dev);
25744 25779 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
25780 + (un->un_state == SD_STATE_ATTACHING) ||
25781 + (un->un_state == SD_STATE_ATTACH_FAILED) ||
25745 25782 !SD_IS_VALID_LABEL(un) || ISCD(un)) {
25746 25783 return (ENXIO);
25747 25784 }
25748 25785
25749 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
25750 -
25751 25786 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
25752 25787
25753 25788 partition = SDPART(dev);
25754 25789 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
25755 25790
25756 25791 if (!(NOT_DEVBSIZE(un))) {
25757 25792 int secmask = 0;
25758 25793 int blknomask = 0;
25759 25794
25760 25795 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
25761 25796 secmask = un->un_tgt_blocksize - 1;
25762 25797
25763 25798 if (blkno & blknomask) {
25764 25799 SD_TRACE(SD_LOG_DUMP, un,
25765 25800 "sddump: dump start block not modulo %d\n",
25766 25801 un->un_tgt_blocksize);
25767 25802 return (EINVAL);
25768 25803 }
25769 25804
25770 25805 if ((nblk * DEV_BSIZE) & secmask) {
25771 25806 SD_TRACE(SD_LOG_DUMP, un,
25772 25807 "sddump: dump length not modulo %d\n",
25773 25808 un->un_tgt_blocksize);
25774 25809 return (EINVAL);
25775 25810 }
25776 25811
25777 25812 }
25778 25813
25779 25814 /* Validate blocks to dump at against partition size. */
25780 25815
25781 25816 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
25782 25817 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT);
25783 25818
25784 25819 if (NOT_DEVBSIZE(un)) {
25785 25820 if ((blkno + nblk) > nblks) {
25786 25821 SD_TRACE(SD_LOG_DUMP, un,
25787 25822 "sddump: dump range larger than partition: "
25788 25823 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25789 25824 blkno, nblk, nblks);
25790 25825 return (EINVAL);
25791 25826 }
25792 25827 } else {
25793 25828 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) +
25794 25829 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) {
25795 25830 SD_TRACE(SD_LOG_DUMP, un,
25796 25831 "sddump: dump range larger than partition: "
25797 25832 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25798 25833 blkno, nblk, nblks);
25799 25834 return (EINVAL);
25800 25835 }
25801 25836 }
25802 25837
25803 25838 mutex_enter(&un->un_pm_mutex);
25804 25839 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
25805 25840 struct scsi_pkt *start_pktp;
25806 25841
25807 25842 mutex_exit(&un->un_pm_mutex);
25808 25843
25809 25844 /*
25810 25845 * use pm framework to power on HBA 1st
25811 25846 */
25812 25847 (void) pm_raise_power(SD_DEVINFO(un), 0,
25813 25848 SD_PM_STATE_ACTIVE(un));
25814 25849
25815 25850 /*
25816 25851 * Dump no long uses sdpower to power on a device, it's
25817 25852 * in-line here so it can be done in polled mode.
25818 25853 */
25819 25854
25820 25855 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n");
25821 25856
25822 25857 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL,
25823 25858 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL);
25824 25859
25825 25860 if (start_pktp == NULL) {
25826 25861 /* We were not given a SCSI packet, fail. */
25827 25862 return (EIO);
25828 25863 }
25829 25864 bzero(start_pktp->pkt_cdbp, CDB_GROUP0);
25830 25865 start_pktp->pkt_cdbp[0] = SCMD_START_STOP;
25831 25866 start_pktp->pkt_cdbp[4] = SD_TARGET_START;
25832 25867 start_pktp->pkt_flags = FLAG_NOINTR;
25833 25868
25834 25869 mutex_enter(SD_MUTEX(un));
25835 25870 SD_FILL_SCSI1_LUN(un, start_pktp);
25836 25871 mutex_exit(SD_MUTEX(un));
25837 25872 /*
25838 25873 * Scsi_poll returns 0 (success) if the command completes and
25839 25874 * the status block is STATUS_GOOD.
25840 25875 */
25841 25876 if (sd_scsi_poll(un, start_pktp) != 0) {
25842 25877 scsi_destroy_pkt(start_pktp);
25843 25878 return (EIO);
25844 25879 }
25845 25880 scsi_destroy_pkt(start_pktp);
25846 25881 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un),
25847 25882 SD_PM_STATE_CHANGE);
25848 25883 } else {
25849 25884 mutex_exit(&un->un_pm_mutex);
25850 25885 }
25851 25886
25852 25887 mutex_enter(SD_MUTEX(un));
25853 25888 un->un_throttle = 0;
25854 25889
25855 25890 /*
25856 25891 * The first time through, reset the specific target device.
25857 25892 * However, when cpr calls sddump we know that sd is in a
25858 25893 * a good state so no bus reset is required.
25859 25894 * Clear sense data via Request Sense cmd.
25860 25895 * In sddump we don't care about allow_bus_device_reset anymore
25861 25896 */
25862 25897
25863 25898 if ((un->un_state != SD_STATE_SUSPENDED) &&
25864 25899 (un->un_state != SD_STATE_DUMPING)) {
25865 25900
25866 25901 New_state(un, SD_STATE_DUMPING);
25867 25902
25868 25903 if (un->un_f_is_fibre == FALSE) {
25869 25904 mutex_exit(SD_MUTEX(un));
25870 25905 /*
25871 25906 * Attempt a bus reset for parallel scsi.
25872 25907 *
25873 25908 * Note: A bus reset is required because on some host
25874 25909 * systems (i.e. E420R) a bus device reset is
25875 25910 * insufficient to reset the state of the target.
25876 25911 *
25877 25912 * Note: Don't issue the reset for fibre-channel,
25878 25913 * because this tends to hang the bus (loop) for
25879 25914 * too long while everyone is logging out and in
25880 25915 * and the deadman timer for dumping will fire
25881 25916 * before the dump is complete.
25882 25917 */
25883 25918 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) {
25884 25919 mutex_enter(SD_MUTEX(un));
25885 25920 Restore_state(un);
25886 25921 mutex_exit(SD_MUTEX(un));
25887 25922 return (EIO);
25888 25923 }
25889 25924
25890 25925 /* Delay to give the device some recovery time. */
25891 25926 drv_usecwait(10000);
25892 25927
25893 25928 if (sd_send_polled_RQS(un) == SD_FAILURE) {
25894 25929 SD_INFO(SD_LOG_DUMP, un,
25895 25930 "sddump: sd_send_polled_RQS failed\n");
25896 25931 }
25897 25932 mutex_enter(SD_MUTEX(un));
25898 25933 }
25899 25934 }
25900 25935
25901 25936 /*
25902 25937 * Convert the partition-relative block number to a
25903 25938 * disk physical block number.
25904 25939 */
25905 25940 if (NOT_DEVBSIZE(un)) {
25906 25941 blkno += start_block;
25907 25942 } else {
25908 25943 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE);
25909 25944 blkno += start_block;
25910 25945 }
25911 25946
25912 25947 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno);
25913 25948
25914 25949
25915 25950 /*
25916 25951 * Check if the device has a non-512 block size.
25917 25952 */
25918 25953 wr_bp = NULL;
25919 25954 if (NOT_DEVBSIZE(un)) {
25920 25955 tgt_byte_offset = blkno * un->un_sys_blocksize;
25921 25956 tgt_byte_count = nblk * un->un_sys_blocksize;
25922 25957 if ((tgt_byte_offset % un->un_tgt_blocksize) ||
25923 25958 (tgt_byte_count % un->un_tgt_blocksize)) {
25924 25959 doing_rmw = TRUE;
25925 25960 /*
25926 25961 * Calculate the block number and number of block
25927 25962 * in terms of the media block size.
25928 25963 */
25929 25964 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25930 25965 tgt_nblk =
25931 25966 ((tgt_byte_offset + tgt_byte_count +
25932 25967 (un->un_tgt_blocksize - 1)) /
25933 25968 un->un_tgt_blocksize) - tgt_blkno;
25934 25969
25935 25970 /*
25936 25971 * Invoke the routine which is going to do read part
25937 25972 * of read-modify-write.
25938 25973 * Note that this routine returns a pointer to
25939 25974 * a valid bp in wr_bp.
25940 25975 */
25941 25976 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk,
25942 25977 &wr_bp);
25943 25978 if (err) {
25944 25979 mutex_exit(SD_MUTEX(un));
25945 25980 return (err);
25946 25981 }
25947 25982 /*
25948 25983 * Offset is being calculated as -
25949 25984 * (original block # * system block size) -
25950 25985 * (new block # * target block size)
25951 25986 */
25952 25987 io_start_offset =
25953 25988 ((uint64_t)(blkno * un->un_sys_blocksize)) -
25954 25989 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize));
25955 25990
25956 25991 ASSERT(io_start_offset < un->un_tgt_blocksize);
25957 25992 /*
25958 25993 * Do the modify portion of read modify write.
25959 25994 */
25960 25995 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset],
25961 25996 (size_t)nblk * un->un_sys_blocksize);
25962 25997 } else {
25963 25998 doing_rmw = FALSE;
25964 25999 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25965 26000 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize;
25966 26001 }
25967 26002
25968 26003 /* Convert blkno and nblk to target blocks */
25969 26004 blkno = tgt_blkno;
25970 26005 nblk = tgt_nblk;
25971 26006 } else {
25972 26007 wr_bp = &wr_buf;
25973 26008 bzero(wr_bp, sizeof (struct buf));
25974 26009 wr_bp->b_flags = B_BUSY;
25975 26010 wr_bp->b_un.b_addr = addr;
25976 26011 wr_bp->b_bcount = nblk << DEV_BSHIFT;
25977 26012 wr_bp->b_resid = 0;
25978 26013 }
25979 26014
25980 26015 mutex_exit(SD_MUTEX(un));
25981 26016
25982 26017 /*
25983 26018 * Obtain a SCSI packet for the write command.
25984 26019 * It should be safe to call the allocator here without
25985 26020 * worrying about being locked for DVMA mapping because
25986 26021 * the address we're passed is already a DVMA mapping
25987 26022 *
25988 26023 * We are also not going to worry about semaphore ownership
25989 26024 * in the dump buffer. Dumping is single threaded at present.
25990 26025 */
25991 26026
25992 26027 wr_pktp = NULL;
25993 26028
25994 26029 dma_resid = wr_bp->b_bcount;
25995 26030 oblkno = blkno;
25996 26031
25997 26032 if (!(NOT_DEVBSIZE(un))) {
25998 26033 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE);
25999 26034 }
26000 26035
26001 26036 while (dma_resid != 0) {
26002 26037
26003 26038 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26004 26039 wr_bp->b_flags &= ~B_ERROR;
26005 26040
26006 26041 if (un->un_partial_dma_supported == 1) {
26007 26042 blkno = oblkno +
26008 26043 ((wr_bp->b_bcount - dma_resid) /
26009 26044 un->un_tgt_blocksize);
26010 26045 nblk = dma_resid / un->un_tgt_blocksize;
26011 26046
26012 26047 if (wr_pktp) {
26013 26048 /*
26014 26049 * Partial DMA transfers after initial transfer
26015 26050 */
26016 26051 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp,
26017 26052 blkno, nblk);
26018 26053 } else {
26019 26054 /* Initial transfer */
26020 26055 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26021 26056 un->un_pkt_flags, NULL_FUNC, NULL,
26022 26057 blkno, nblk);
26023 26058 }
26024 26059 } else {
26025 26060 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26026 26061 0, NULL_FUNC, NULL, blkno, nblk);
26027 26062 }
26028 26063
26029 26064 if (rval == 0) {
26030 26065 /* We were given a SCSI packet, continue. */
26031 26066 break;
26032 26067 }
26033 26068
26034 26069 if (i == 0) {
26035 26070 if (wr_bp->b_flags & B_ERROR) {
26036 26071 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26037 26072 "no resources for dumping; "
26038 26073 "error code: 0x%x, retrying",
26039 26074 geterror(wr_bp));
26040 26075 } else {
26041 26076 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26042 26077 "no resources for dumping; retrying");
26043 26078 }
26044 26079 } else if (i != (SD_NDUMP_RETRIES - 1)) {
26045 26080 if (wr_bp->b_flags & B_ERROR) {
26046 26081 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26047 26082 "no resources for dumping; error code: "
26048 26083 "0x%x, retrying\n", geterror(wr_bp));
26049 26084 }
26050 26085 } else {
26051 26086 if (wr_bp->b_flags & B_ERROR) {
26052 26087 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26053 26088 "no resources for dumping; "
26054 26089 "error code: 0x%x, retries failed, "
26055 26090 "giving up.\n", geterror(wr_bp));
26056 26091 } else {
26057 26092 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26058 26093 "no resources for dumping; "
26059 26094 "retries failed, giving up.\n");
26060 26095 }
26061 26096 mutex_enter(SD_MUTEX(un));
26062 26097 Restore_state(un);
26063 26098 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) {
26064 26099 mutex_exit(SD_MUTEX(un));
26065 26100 scsi_free_consistent_buf(wr_bp);
26066 26101 } else {
26067 26102 mutex_exit(SD_MUTEX(un));
26068 26103 }
26069 26104 return (EIO);
26070 26105 }
26071 26106 drv_usecwait(10000);
26072 26107 }
26073 26108
26074 26109 if (un->un_partial_dma_supported == 1) {
26075 26110 /*
26076 26111 * save the resid from PARTIAL_DMA
26077 26112 */
26078 26113 dma_resid = wr_pktp->pkt_resid;
26079 26114 if (dma_resid != 0)
26080 26115 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid);
26081 26116 wr_pktp->pkt_resid = 0;
26082 26117 } else {
26083 26118 dma_resid = 0;
26084 26119 }
26085 26120
26086 26121 /* SunBug 1222170 */
26087 26122 wr_pktp->pkt_flags = FLAG_NOINTR;
26088 26123
26089 26124 err = EIO;
26090 26125 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26091 26126
26092 26127 /*
26093 26128 * Scsi_poll returns 0 (success) if the command completes and
26094 26129 * the status block is STATUS_GOOD. We should only check
26095 26130 * errors if this condition is not true. Even then we should
26096 26131 * send our own request sense packet only if we have a check
26097 26132 * condition and auto request sense has not been performed by
26098 26133 * the hba.
26099 26134 */
26100 26135 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n");
26101 26136
26102 26137 if ((sd_scsi_poll(un, wr_pktp) == 0) &&
26103 26138 (wr_pktp->pkt_resid == 0)) {
26104 26139 err = SD_SUCCESS;
26105 26140 break;
26106 26141 }
26107 26142
26108 26143 /*
26109 26144 * Check CMD_DEV_GONE 1st, give up if device is gone.
26110 26145 */
26111 26146 if (wr_pktp->pkt_reason == CMD_DEV_GONE) {
26112 26147 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26113 26148 "Error while dumping state...Device is gone\n");
26114 26149 break;
26115 26150 }
26116 26151
26117 26152 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) {
26118 26153 SD_INFO(SD_LOG_DUMP, un,
26119 26154 "sddump: write failed with CHECK, try # %d\n", i);
26120 26155 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) {
26121 26156 (void) sd_send_polled_RQS(un);
26122 26157 }
26123 26158
26124 26159 continue;
26125 26160 }
26126 26161
26127 26162 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) {
26128 26163 int reset_retval = 0;
26129 26164
26130 26165 SD_INFO(SD_LOG_DUMP, un,
26131 26166 "sddump: write failed with BUSY, try # %d\n", i);
26132 26167
26133 26168 if (un->un_f_lun_reset_enabled == TRUE) {
26134 26169 reset_retval = scsi_reset(SD_ADDRESS(un),
26135 26170 RESET_LUN);
26136 26171 }
26137 26172 if (reset_retval == 0) {
26138 26173 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
26139 26174 }
26140 26175 (void) sd_send_polled_RQS(un);
26141 26176
26142 26177 } else {
26143 26178 SD_INFO(SD_LOG_DUMP, un,
26144 26179 "sddump: write failed with 0x%x, try # %d\n",
26145 26180 SD_GET_PKT_STATUS(wr_pktp), i);
26146 26181 mutex_enter(SD_MUTEX(un));
26147 26182 sd_reset_target(un, wr_pktp);
26148 26183 mutex_exit(SD_MUTEX(un));
26149 26184 }
26150 26185
26151 26186 /*
26152 26187 * If we are not getting anywhere with lun/target resets,
26153 26188 * let's reset the bus.
26154 26189 */
26155 26190 if (i == SD_NDUMP_RETRIES/2) {
26156 26191 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
26157 26192 (void) sd_send_polled_RQS(un);
26158 26193 }
26159 26194 }
26160 26195 }
26161 26196
26162 26197 scsi_destroy_pkt(wr_pktp);
26163 26198 mutex_enter(SD_MUTEX(un));
26164 26199 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) {
26165 26200 mutex_exit(SD_MUTEX(un));
26166 26201 scsi_free_consistent_buf(wr_bp);
26167 26202 } else {
26168 26203 mutex_exit(SD_MUTEX(un));
26169 26204 }
26170 26205 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err);
26171 26206 return (err);
26172 26207 }
26173 26208
26174 26209 /*
26175 26210 * Function: sd_scsi_poll()
26176 26211 *
26177 26212 * Description: This is a wrapper for the scsi_poll call.
26178 26213 *
26179 26214 * Arguments: sd_lun - The unit structure
26180 26215 * scsi_pkt - The scsi packet being sent to the device.
26181 26216 *
26182 26217 * Return Code: 0 - Command completed successfully with good status
26183 26218 * -1 - Command failed. This could indicate a check condition
26184 26219 * or other status value requiring recovery action.
26185 26220 *
26186 26221 * NOTE: This code is only called off sddump().
26187 26222 */
26188 26223
26189 26224 static int
26190 26225 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp)
26191 26226 {
26192 26227 int status;
26193 26228
26194 26229 ASSERT(un != NULL);
26195 26230 ASSERT(!mutex_owned(SD_MUTEX(un)));
26196 26231 ASSERT(pktp != NULL);
26197 26232
26198 26233 status = SD_SUCCESS;
26199 26234
26200 26235 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) {
26201 26236 pktp->pkt_flags |= un->un_tagflags;
26202 26237 pktp->pkt_flags &= ~FLAG_NODISCON;
26203 26238 }
26204 26239
26205 26240 status = sd_ddi_scsi_poll(pktp);
26206 26241 /*
26207 26242 * Scsi_poll returns 0 (success) if the command completes and the
26208 26243 * status block is STATUS_GOOD. We should only check errors if this
26209 26244 * condition is not true. Even then we should send our own request
26210 26245 * sense packet only if we have a check condition and auto
26211 26246 * request sense has not been performed by the hba.
26212 26247 * Don't get RQS data if pkt_reason is CMD_DEV_GONE.
26213 26248 */
26214 26249 if ((status != SD_SUCCESS) &&
26215 26250 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) &&
26216 26251 (pktp->pkt_state & STATE_ARQ_DONE) == 0 &&
26217 26252 (pktp->pkt_reason != CMD_DEV_GONE))
26218 26253 (void) sd_send_polled_RQS(un);
26219 26254
26220 26255 return (status);
26221 26256 }
26222 26257
26223 26258 /*
26224 26259 * Function: sd_send_polled_RQS()
26225 26260 *
26226 26261 * Description: This sends the request sense command to a device.
26227 26262 *
26228 26263 * Arguments: sd_lun - The unit structure
26229 26264 *
26230 26265 * Return Code: 0 - Command completed successfully with good status
26231 26266 * -1 - Command failed.
26232 26267 *
26233 26268 */
26234 26269
26235 26270 static int
26236 26271 sd_send_polled_RQS(struct sd_lun *un)
26237 26272 {
26238 26273 int ret_val;
26239 26274 struct scsi_pkt *rqs_pktp;
26240 26275 struct buf *rqs_bp;
26241 26276
26242 26277 ASSERT(un != NULL);
26243 26278 ASSERT(!mutex_owned(SD_MUTEX(un)));
26244 26279
26245 26280 ret_val = SD_SUCCESS;
26246 26281
26247 26282 rqs_pktp = un->un_rqs_pktp;
26248 26283 rqs_bp = un->un_rqs_bp;
26249 26284
26250 26285 mutex_enter(SD_MUTEX(un));
26251 26286
26252 26287 if (un->un_sense_isbusy) {
26253 26288 ret_val = SD_FAILURE;
26254 26289 mutex_exit(SD_MUTEX(un));
26255 26290 return (ret_val);
26256 26291 }
26257 26292
26258 26293 /*
26259 26294 * If the request sense buffer (and packet) is not in use,
26260 26295 * let's set the un_sense_isbusy and send our packet
26261 26296 */
26262 26297 un->un_sense_isbusy = 1;
26263 26298 rqs_pktp->pkt_resid = 0;
26264 26299 rqs_pktp->pkt_reason = 0;
26265 26300 rqs_pktp->pkt_flags |= FLAG_NOINTR;
26266 26301 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH);
26267 26302
26268 26303 mutex_exit(SD_MUTEX(un));
26269 26304
26270 26305 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at"
26271 26306 " 0x%p\n", rqs_bp->b_un.b_addr);
26272 26307
26273 26308 /*
26274 26309 * Can't send this to sd_scsi_poll, we wrap ourselves around the
26275 26310 * axle - it has a call into us!
26276 26311 */
26277 26312 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) {
26278 26313 SD_INFO(SD_LOG_COMMON, un,
26279 26314 "sd_send_polled_RQS: RQS failed\n");
26280 26315 }
26281 26316
26282 26317 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:",
26283 26318 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX);
26284 26319
26285 26320 mutex_enter(SD_MUTEX(un));
26286 26321 un->un_sense_isbusy = 0;
26287 26322 mutex_exit(SD_MUTEX(un));
26288 26323
26289 26324 return (ret_val);
26290 26325 }
26291 26326
26292 26327 /*
26293 26328 * Defines needed for localized version of the scsi_poll routine.
26294 26329 */
26295 26330 #define CSEC 10000 /* usecs */
26296 26331 #define SEC_TO_CSEC (1000000/CSEC)
26297 26332
26298 26333 /*
26299 26334 * Function: sd_ddi_scsi_poll()
26300 26335 *
26301 26336 * Description: Localized version of the scsi_poll routine. The purpose is to
26302 26337 * send a scsi_pkt to a device as a polled command. This version
26303 26338 * is to ensure more robust handling of transport errors.
26304 26339 * Specifically this routine cures not ready, coming ready
26305 26340 * transition for power up and reset of sonoma's. This can take
26306 26341 * up to 45 seconds for power-on and 20 seconds for reset of a
26307 26342 * sonoma lun.
26308 26343 *
26309 26344 * Arguments: scsi_pkt - The scsi_pkt being sent to a device
26310 26345 *
26311 26346 * Return Code: 0 - Command completed successfully with good status
26312 26347 * -1 - Command failed.
26313 26348 *
26314 26349 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can
26315 26350 * be fixed (removing this code), we need to determine how to handle the
26316 26351 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump().
26317 26352 *
26318 26353 * NOTE: This code is only called off sddump().
26319 26354 */
26320 26355 static int
26321 26356 sd_ddi_scsi_poll(struct scsi_pkt *pkt)
26322 26357 {
26323 26358 int rval = -1;
26324 26359 int savef;
26325 26360 long savet;
26326 26361 void (*savec)();
26327 26362 int timeout;
26328 26363 int busy_count;
26329 26364 int poll_delay;
26330 26365 int rc;
26331 26366 uint8_t *sensep;
26332 26367 struct scsi_arq_status *arqstat;
26333 26368 extern int do_polled_io;
26334 26369
26335 26370 ASSERT(pkt->pkt_scbp);
26336 26371
26337 26372 /*
26338 26373 * save old flags..
26339 26374 */
26340 26375 savef = pkt->pkt_flags;
26341 26376 savec = pkt->pkt_comp;
26342 26377 savet = pkt->pkt_time;
26343 26378
26344 26379 pkt->pkt_flags |= FLAG_NOINTR;
26345 26380
26346 26381 /*
26347 26382 * XXX there is nothing in the SCSA spec that states that we should not
26348 26383 * do a callback for polled cmds; however, removing this will break sd
26349 26384 * and probably other target drivers
26350 26385 */
26351 26386 pkt->pkt_comp = NULL;
26352 26387
26353 26388 /*
26354 26389 * we don't like a polled command without timeout.
26355 26390 * 60 seconds seems long enough.
26356 26391 */
26357 26392 if (pkt->pkt_time == 0)
26358 26393 pkt->pkt_time = SCSI_POLL_TIMEOUT;
26359 26394
26360 26395 /*
26361 26396 * Send polled cmd.
26362 26397 *
26363 26398 * We do some error recovery for various errors. Tran_busy,
26364 26399 * queue full, and non-dispatched commands are retried every 10 msec.
26365 26400 * as they are typically transient failures. Busy status and Not
26366 26401 * Ready are retried every second as this status takes a while to
26367 26402 * change.
26368 26403 */
26369 26404 timeout = pkt->pkt_time * SEC_TO_CSEC;
26370 26405
26371 26406 for (busy_count = 0; busy_count < timeout; busy_count++) {
26372 26407 /*
26373 26408 * Initialize pkt status variables.
26374 26409 */
26375 26410 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0;
26376 26411
26377 26412 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) {
26378 26413 if (rc != TRAN_BUSY) {
26379 26414 /* Transport failed - give up. */
26380 26415 break;
26381 26416 } else {
26382 26417 /* Transport busy - try again. */
26383 26418 poll_delay = 1 * CSEC; /* 10 msec. */
26384 26419 }
26385 26420 } else {
26386 26421 /*
26387 26422 * Transport accepted - check pkt status.
26388 26423 */
26389 26424 rc = (*pkt->pkt_scbp) & STATUS_MASK;
26390 26425 if ((pkt->pkt_reason == CMD_CMPLT) &&
26391 26426 (rc == STATUS_CHECK) &&
26392 26427 (pkt->pkt_state & STATE_ARQ_DONE)) {
26393 26428 arqstat =
26394 26429 (struct scsi_arq_status *)(pkt->pkt_scbp);
26395 26430 sensep = (uint8_t *)&arqstat->sts_sensedata;
26396 26431 } else {
26397 26432 sensep = NULL;
26398 26433 }
26399 26434
26400 26435 if ((pkt->pkt_reason == CMD_CMPLT) &&
26401 26436 (rc == STATUS_GOOD)) {
26402 26437 /* No error - we're done */
26403 26438 rval = 0;
26404 26439 break;
26405 26440
26406 26441 } else if (pkt->pkt_reason == CMD_DEV_GONE) {
26407 26442 /* Lost connection - give up */
26408 26443 break;
26409 26444
26410 26445 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
26411 26446 (pkt->pkt_state == 0)) {
26412 26447 /* Pkt not dispatched - try again. */
26413 26448 poll_delay = 1 * CSEC; /* 10 msec. */
26414 26449
26415 26450 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26416 26451 (rc == STATUS_QFULL)) {
26417 26452 /* Queue full - try again. */
26418 26453 poll_delay = 1 * CSEC; /* 10 msec. */
26419 26454
26420 26455 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26421 26456 (rc == STATUS_BUSY)) {
26422 26457 /* Busy - try again. */
26423 26458 poll_delay = 100 * CSEC; /* 1 sec. */
26424 26459 busy_count += (SEC_TO_CSEC - 1);
26425 26460
26426 26461 } else if ((sensep != NULL) &&
26427 26462 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) {
26428 26463 /*
26429 26464 * Unit Attention - try again.
26430 26465 * Pretend it took 1 sec.
26431 26466 * NOTE: 'continue' avoids poll_delay
26432 26467 */
26433 26468 busy_count += (SEC_TO_CSEC - 1);
26434 26469 continue;
26435 26470
26436 26471 } else if ((sensep != NULL) &&
26437 26472 (scsi_sense_key(sensep) == KEY_NOT_READY) &&
26438 26473 (scsi_sense_asc(sensep) == 0x04) &&
26439 26474 (scsi_sense_ascq(sensep) == 0x01)) {
26440 26475 /*
26441 26476 * Not ready -> ready - try again.
26442 26477 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY
26443 26478 * ...same as STATUS_BUSY
26444 26479 */
26445 26480 poll_delay = 100 * CSEC; /* 1 sec. */
26446 26481 busy_count += (SEC_TO_CSEC - 1);
26447 26482
26448 26483 } else {
26449 26484 /* BAD status - give up. */
26450 26485 break;
26451 26486 }
26452 26487 }
26453 26488
26454 26489 if (((curthread->t_flag & T_INTR_THREAD) == 0) &&
26455 26490 !do_polled_io) {
26456 26491 delay(drv_usectohz(poll_delay));
26457 26492 } else {
26458 26493 /* we busy wait during cpr_dump or interrupt threads */
26459 26494 drv_usecwait(poll_delay);
26460 26495 }
26461 26496 }
26462 26497
26463 26498 pkt->pkt_flags = savef;
26464 26499 pkt->pkt_comp = savec;
26465 26500 pkt->pkt_time = savet;
26466 26501
26467 26502 /* return on error */
26468 26503 if (rval)
26469 26504 return (rval);
26470 26505
26471 26506 /*
26472 26507 * This is not a performance critical code path.
26473 26508 *
26474 26509 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync()
26475 26510 * issues associated with looking at DMA memory prior to
26476 26511 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return.
26477 26512 */
26478 26513 scsi_sync_pkt(pkt);
26479 26514 return (0);
26480 26515 }
26481 26516
26482 26517
26483 26518
26484 26519 /*
26485 26520 * Function: sd_persistent_reservation_in_read_keys
26486 26521 *
26487 26522 * Description: This routine is the driver entry point for handling CD-ROM
26488 26523 * multi-host persistent reservation requests (MHIOCGRP_INKEYS)
26489 26524 * by sending the SCSI-3 PRIN commands to the device.
26490 26525 * Processes the read keys command response by copying the
26491 26526 * reservation key information into the user provided buffer.
26492 26527 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented.
26493 26528 *
26494 26529 * Arguments: un - Pointer to soft state struct for the target.
26495 26530 * usrp - user provided pointer to multihost Persistent In Read
26496 26531 * Keys structure (mhioc_inkeys_t)
26497 26532 * flag - this argument is a pass through to ddi_copyxxx()
26498 26533 * directly from the mode argument of ioctl().
26499 26534 *
26500 26535 * Return Code: 0 - Success
26501 26536 * EACCES
26502 26537 * ENOTSUP
26503 26538 * errno return code from sd_send_scsi_cmd()
26504 26539 *
26505 26540 * Context: Can sleep. Does not return until command is completed.
26506 26541 */
26507 26542
26508 26543 static int
26509 26544 sd_persistent_reservation_in_read_keys(struct sd_lun *un,
26510 26545 mhioc_inkeys_t *usrp, int flag)
26511 26546 {
26512 26547 #ifdef _MULTI_DATAMODEL
26513 26548 struct mhioc_key_list32 li32;
26514 26549 #endif
26515 26550 sd_prin_readkeys_t *in;
26516 26551 mhioc_inkeys_t *ptr;
26517 26552 mhioc_key_list_t li;
26518 26553 uchar_t *data_bufp = NULL;
26519 26554 int data_len = 0;
26520 26555 int rval = 0;
26521 26556 size_t copysz = 0;
26522 26557 sd_ssc_t *ssc;
26523 26558
26524 26559 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) {
26525 26560 return (EINVAL);
26526 26561 }
26527 26562 bzero(&li, sizeof (mhioc_key_list_t));
26528 26563
26529 26564 ssc = sd_ssc_init(un);
26530 26565
26531 26566 /*
26532 26567 * Get the listsize from user
26533 26568 */
26534 26569 #ifdef _MULTI_DATAMODEL
26535 26570 switch (ddi_model_convert_from(flag & FMODELS)) {
26536 26571 case DDI_MODEL_ILP32:
26537 26572 copysz = sizeof (struct mhioc_key_list32);
26538 26573 if (ddi_copyin(ptr->li, &li32, copysz, flag)) {
26539 26574 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26540 26575 "sd_persistent_reservation_in_read_keys: "
26541 26576 "failed ddi_copyin: mhioc_key_list32_t\n");
26542 26577 rval = EFAULT;
26543 26578 goto done;
26544 26579 }
26545 26580 li.listsize = li32.listsize;
26546 26581 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list;
26547 26582 break;
26548 26583
26549 26584 case DDI_MODEL_NONE:
26550 26585 copysz = sizeof (mhioc_key_list_t);
26551 26586 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26552 26587 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26553 26588 "sd_persistent_reservation_in_read_keys: "
26554 26589 "failed ddi_copyin: mhioc_key_list_t\n");
26555 26590 rval = EFAULT;
26556 26591 goto done;
26557 26592 }
26558 26593 break;
26559 26594 }
26560 26595
26561 26596 #else /* ! _MULTI_DATAMODEL */
26562 26597 copysz = sizeof (mhioc_key_list_t);
26563 26598 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26564 26599 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26565 26600 "sd_persistent_reservation_in_read_keys: "
26566 26601 "failed ddi_copyin: mhioc_key_list_t\n");
26567 26602 rval = EFAULT;
26568 26603 goto done;
26569 26604 }
26570 26605 #endif
26571 26606
26572 26607 data_len = li.listsize * MHIOC_RESV_KEY_SIZE;
26573 26608 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t));
26574 26609 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26575 26610
26576 26611 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS,
26577 26612 data_len, data_bufp);
26578 26613 if (rval != 0) {
26579 26614 if (rval == EIO)
26580 26615 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26581 26616 else
26582 26617 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26583 26618 goto done;
26584 26619 }
26585 26620 in = (sd_prin_readkeys_t *)data_bufp;
26586 26621 ptr->generation = BE_32(in->generation);
26587 26622 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE;
26588 26623
26589 26624 /*
26590 26625 * Return the min(listsize, listlen) keys
26591 26626 */
26592 26627 #ifdef _MULTI_DATAMODEL
26593 26628
26594 26629 switch (ddi_model_convert_from(flag & FMODELS)) {
26595 26630 case DDI_MODEL_ILP32:
26596 26631 li32.listlen = li.listlen;
26597 26632 if (ddi_copyout(&li32, ptr->li, copysz, flag)) {
26598 26633 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26599 26634 "sd_persistent_reservation_in_read_keys: "
26600 26635 "failed ddi_copyout: mhioc_key_list32_t\n");
26601 26636 rval = EFAULT;
26602 26637 goto done;
26603 26638 }
26604 26639 break;
26605 26640
26606 26641 case DDI_MODEL_NONE:
26607 26642 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26608 26643 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26609 26644 "sd_persistent_reservation_in_read_keys: "
26610 26645 "failed ddi_copyout: mhioc_key_list_t\n");
26611 26646 rval = EFAULT;
26612 26647 goto done;
26613 26648 }
26614 26649 break;
26615 26650 }
26616 26651
26617 26652 #else /* ! _MULTI_DATAMODEL */
26618 26653
26619 26654 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26620 26655 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26621 26656 "sd_persistent_reservation_in_read_keys: "
26622 26657 "failed ddi_copyout: mhioc_key_list_t\n");
26623 26658 rval = EFAULT;
26624 26659 goto done;
26625 26660 }
26626 26661
26627 26662 #endif /* _MULTI_DATAMODEL */
26628 26663
26629 26664 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE,
26630 26665 li.listsize * MHIOC_RESV_KEY_SIZE);
26631 26666 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) {
26632 26667 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26633 26668 "sd_persistent_reservation_in_read_keys: "
26634 26669 "failed ddi_copyout: keylist\n");
26635 26670 rval = EFAULT;
26636 26671 }
26637 26672 done:
26638 26673 sd_ssc_fini(ssc);
26639 26674 kmem_free(data_bufp, data_len);
26640 26675 return (rval);
26641 26676 }
26642 26677
26643 26678
26644 26679 /*
26645 26680 * Function: sd_persistent_reservation_in_read_resv
26646 26681 *
26647 26682 * Description: This routine is the driver entry point for handling CD-ROM
26648 26683 * multi-host persistent reservation requests (MHIOCGRP_INRESV)
26649 26684 * by sending the SCSI-3 PRIN commands to the device.
26650 26685 * Process the read persistent reservations command response by
26651 26686 * copying the reservation information into the user provided
26652 26687 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented.
26653 26688 *
26654 26689 * Arguments: un - Pointer to soft state struct for the target.
26655 26690 * usrp - user provided pointer to multihost Persistent In Read
26656 26691 * Keys structure (mhioc_inkeys_t)
26657 26692 * flag - this argument is a pass through to ddi_copyxxx()
26658 26693 * directly from the mode argument of ioctl().
26659 26694 *
26660 26695 * Return Code: 0 - Success
26661 26696 * EACCES
26662 26697 * ENOTSUP
26663 26698 * errno return code from sd_send_scsi_cmd()
26664 26699 *
26665 26700 * Context: Can sleep. Does not return until command is completed.
26666 26701 */
26667 26702
26668 26703 static int
26669 26704 sd_persistent_reservation_in_read_resv(struct sd_lun *un,
26670 26705 mhioc_inresvs_t *usrp, int flag)
26671 26706 {
26672 26707 #ifdef _MULTI_DATAMODEL
26673 26708 struct mhioc_resv_desc_list32 resvlist32;
26674 26709 #endif
26675 26710 sd_prin_readresv_t *in;
26676 26711 mhioc_inresvs_t *ptr;
26677 26712 sd_readresv_desc_t *readresv_ptr;
26678 26713 mhioc_resv_desc_list_t resvlist;
26679 26714 mhioc_resv_desc_t resvdesc;
26680 26715 uchar_t *data_bufp = NULL;
26681 26716 int data_len;
26682 26717 int rval = 0;
26683 26718 int i;
26684 26719 size_t copysz = 0;
26685 26720 mhioc_resv_desc_t *bufp;
26686 26721 sd_ssc_t *ssc;
26687 26722
26688 26723 if ((ptr = usrp) == NULL) {
26689 26724 return (EINVAL);
26690 26725 }
26691 26726
26692 26727 ssc = sd_ssc_init(un);
26693 26728
26694 26729 /*
26695 26730 * Get the listsize from user
26696 26731 */
26697 26732 #ifdef _MULTI_DATAMODEL
26698 26733 switch (ddi_model_convert_from(flag & FMODELS)) {
26699 26734 case DDI_MODEL_ILP32:
26700 26735 copysz = sizeof (struct mhioc_resv_desc_list32);
26701 26736 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) {
26702 26737 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26703 26738 "sd_persistent_reservation_in_read_resv: "
26704 26739 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26705 26740 rval = EFAULT;
26706 26741 goto done;
26707 26742 }
26708 26743 resvlist.listsize = resvlist32.listsize;
26709 26744 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list;
26710 26745 break;
26711 26746
26712 26747 case DDI_MODEL_NONE:
26713 26748 copysz = sizeof (mhioc_resv_desc_list_t);
26714 26749 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26715 26750 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26716 26751 "sd_persistent_reservation_in_read_resv: "
26717 26752 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26718 26753 rval = EFAULT;
26719 26754 goto done;
26720 26755 }
26721 26756 break;
26722 26757 }
26723 26758 #else /* ! _MULTI_DATAMODEL */
26724 26759 copysz = sizeof (mhioc_resv_desc_list_t);
26725 26760 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26726 26761 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26727 26762 "sd_persistent_reservation_in_read_resv: "
26728 26763 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26729 26764 rval = EFAULT;
26730 26765 goto done;
26731 26766 }
26732 26767 #endif /* ! _MULTI_DATAMODEL */
26733 26768
26734 26769 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN;
26735 26770 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t));
26736 26771 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26737 26772
26738 26773 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV,
26739 26774 data_len, data_bufp);
26740 26775 if (rval != 0) {
26741 26776 if (rval == EIO)
26742 26777 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26743 26778 else
26744 26779 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26745 26780 goto done;
26746 26781 }
26747 26782 in = (sd_prin_readresv_t *)data_bufp;
26748 26783 ptr->generation = BE_32(in->generation);
26749 26784 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN;
26750 26785
26751 26786 /*
26752 26787 * Return the min(listsize, listlen( keys
26753 26788 */
26754 26789 #ifdef _MULTI_DATAMODEL
26755 26790
26756 26791 switch (ddi_model_convert_from(flag & FMODELS)) {
26757 26792 case DDI_MODEL_ILP32:
26758 26793 resvlist32.listlen = resvlist.listlen;
26759 26794 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) {
26760 26795 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26761 26796 "sd_persistent_reservation_in_read_resv: "
26762 26797 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26763 26798 rval = EFAULT;
26764 26799 goto done;
26765 26800 }
26766 26801 break;
26767 26802
26768 26803 case DDI_MODEL_NONE:
26769 26804 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26770 26805 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26771 26806 "sd_persistent_reservation_in_read_resv: "
26772 26807 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26773 26808 rval = EFAULT;
26774 26809 goto done;
26775 26810 }
26776 26811 break;
26777 26812 }
26778 26813
26779 26814 #else /* ! _MULTI_DATAMODEL */
26780 26815
26781 26816 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26782 26817 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26783 26818 "sd_persistent_reservation_in_read_resv: "
26784 26819 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26785 26820 rval = EFAULT;
26786 26821 goto done;
26787 26822 }
26788 26823
26789 26824 #endif /* ! _MULTI_DATAMODEL */
26790 26825
26791 26826 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc;
26792 26827 bufp = resvlist.list;
26793 26828 copysz = sizeof (mhioc_resv_desc_t);
26794 26829 for (i = 0; i < min(resvlist.listlen, resvlist.listsize);
26795 26830 i++, readresv_ptr++, bufp++) {
26796 26831
26797 26832 bcopy(&readresv_ptr->resvkey, &resvdesc.key,
26798 26833 MHIOC_RESV_KEY_SIZE);
26799 26834 resvdesc.type = readresv_ptr->type;
26800 26835 resvdesc.scope = readresv_ptr->scope;
26801 26836 resvdesc.scope_specific_addr =
26802 26837 BE_32(readresv_ptr->scope_specific_addr);
26803 26838
26804 26839 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) {
26805 26840 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26806 26841 "sd_persistent_reservation_in_read_resv: "
26807 26842 "failed ddi_copyout: resvlist\n");
26808 26843 rval = EFAULT;
26809 26844 goto done;
26810 26845 }
26811 26846 }
26812 26847 done:
26813 26848 sd_ssc_fini(ssc);
26814 26849 /* only if data_bufp is allocated, we need to free it */
26815 26850 if (data_bufp) {
26816 26851 kmem_free(data_bufp, data_len);
26817 26852 }
26818 26853 return (rval);
26819 26854 }
26820 26855
26821 26856
26822 26857 /*
26823 26858 * Function: sr_change_blkmode()
26824 26859 *
26825 26860 * Description: This routine is the driver entry point for handling CD-ROM
26826 26861 * block mode ioctl requests. Support for returning and changing
26827 26862 * the current block size in use by the device is implemented. The
26828 26863 * LBA size is changed via a MODE SELECT Block Descriptor.
26829 26864 *
26830 26865 * This routine issues a mode sense with an allocation length of
26831 26866 * 12 bytes for the mode page header and a single block descriptor.
26832 26867 *
26833 26868 * Arguments: dev - the device 'dev_t'
26834 26869 * cmd - the request type; one of CDROMGBLKMODE (get) or
26835 26870 * CDROMSBLKMODE (set)
26836 26871 * data - current block size or requested block size
26837 26872 * flag - this argument is a pass through to ddi_copyxxx() directly
26838 26873 * from the mode argument of ioctl().
26839 26874 *
26840 26875 * Return Code: the code returned by sd_send_scsi_cmd()
26841 26876 * EINVAL if invalid arguments are provided
26842 26877 * EFAULT if ddi_copyxxx() fails
26843 26878 * ENXIO if fail ddi_get_soft_state
26844 26879 * EIO if invalid mode sense block descriptor length
26845 26880 *
26846 26881 */
26847 26882
26848 26883 static int
26849 26884 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag)
26850 26885 {
26851 26886 struct sd_lun *un = NULL;
26852 26887 struct mode_header *sense_mhp, *select_mhp;
26853 26888 struct block_descriptor *sense_desc, *select_desc;
26854 26889 int current_bsize;
26855 26890 int rval = EINVAL;
26856 26891 uchar_t *sense = NULL;
26857 26892 uchar_t *select = NULL;
26858 26893 sd_ssc_t *ssc;
26859 26894
26860 26895 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE));
26861 26896
26862 26897 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
26863 26898 return (ENXIO);
26864 26899 }
26865 26900
26866 26901 /*
26867 26902 * The block length is changed via the Mode Select block descriptor, the
26868 26903 * "Read/Write Error Recovery" mode page (0x1) contents are not actually
26869 26904 * required as part of this routine. Therefore the mode sense allocation
26870 26905 * length is specified to be the length of a mode page header and a
26871 26906 * block descriptor.
26872 26907 */
26873 26908 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26874 26909
26875 26910 ssc = sd_ssc_init(un);
26876 26911 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
26877 26912 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD);
26878 26913 sd_ssc_fini(ssc);
26879 26914 if (rval != 0) {
26880 26915 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26881 26916 "sr_change_blkmode: Mode Sense Failed\n");
26882 26917 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26883 26918 return (rval);
26884 26919 }
26885 26920
26886 26921 /* Check the block descriptor len to handle only 1 block descriptor */
26887 26922 sense_mhp = (struct mode_header *)sense;
26888 26923 if ((sense_mhp->bdesc_length == 0) ||
26889 26924 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) {
26890 26925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26891 26926 "sr_change_blkmode: Mode Sense returned invalid block"
26892 26927 " descriptor length\n");
26893 26928 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26894 26929 return (EIO);
26895 26930 }
26896 26931 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH);
26897 26932 current_bsize = ((sense_desc->blksize_hi << 16) |
26898 26933 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo);
26899 26934
26900 26935 /* Process command */
26901 26936 switch (cmd) {
26902 26937 case CDROMGBLKMODE:
26903 26938 /* Return the block size obtained during the mode sense */
26904 26939 if (ddi_copyout(¤t_bsize, (void *)data,
26905 26940 sizeof (int), flag) != 0)
26906 26941 rval = EFAULT;
26907 26942 break;
26908 26943 case CDROMSBLKMODE:
26909 26944 /* Validate the requested block size */
26910 26945 switch (data) {
26911 26946 case CDROM_BLK_512:
26912 26947 case CDROM_BLK_1024:
26913 26948 case CDROM_BLK_2048:
26914 26949 case CDROM_BLK_2056:
26915 26950 case CDROM_BLK_2336:
26916 26951 case CDROM_BLK_2340:
26917 26952 case CDROM_BLK_2352:
26918 26953 case CDROM_BLK_2368:
26919 26954 case CDROM_BLK_2448:
26920 26955 case CDROM_BLK_2646:
26921 26956 case CDROM_BLK_2647:
26922 26957 break;
26923 26958 default:
26924 26959 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26925 26960 "sr_change_blkmode: "
26926 26961 "Block Size '%ld' Not Supported\n", data);
26927 26962 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26928 26963 return (EINVAL);
26929 26964 }
26930 26965
26931 26966 /*
26932 26967 * The current block size matches the requested block size so
26933 26968 * there is no need to send the mode select to change the size
26934 26969 */
26935 26970 if (current_bsize == data) {
26936 26971 break;
26937 26972 }
26938 26973
26939 26974 /* Build the select data for the requested block size */
26940 26975 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26941 26976 select_mhp = (struct mode_header *)select;
26942 26977 select_desc =
26943 26978 (struct block_descriptor *)(select + MODE_HEADER_LENGTH);
26944 26979 /*
26945 26980 * The LBA size is changed via the block descriptor, so the
26946 26981 * descriptor is built according to the user data
26947 26982 */
26948 26983 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH;
26949 26984 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16);
26950 26985 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8);
26951 26986 select_desc->blksize_lo = (char)((data) & 0x000000ff);
26952 26987
26953 26988 /* Send the mode select for the requested block size */
26954 26989 ssc = sd_ssc_init(un);
26955 26990 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26956 26991 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26957 26992 SD_PATH_STANDARD);
26958 26993 sd_ssc_fini(ssc);
26959 26994 if (rval != 0) {
26960 26995 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26961 26996 "sr_change_blkmode: Mode Select Failed\n");
26962 26997 /*
26963 26998 * The mode select failed for the requested block size,
26964 26999 * so reset the data for the original block size and
26965 27000 * send it to the target. The error is indicated by the
26966 27001 * return value for the failed mode select.
26967 27002 */
26968 27003 select_desc->blksize_hi = sense_desc->blksize_hi;
26969 27004 select_desc->blksize_mid = sense_desc->blksize_mid;
26970 27005 select_desc->blksize_lo = sense_desc->blksize_lo;
26971 27006 ssc = sd_ssc_init(un);
26972 27007 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26973 27008 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26974 27009 SD_PATH_STANDARD);
26975 27010 sd_ssc_fini(ssc);
26976 27011 } else {
26977 27012 ASSERT(!mutex_owned(SD_MUTEX(un)));
26978 27013 mutex_enter(SD_MUTEX(un));
26979 27014 sd_update_block_info(un, (uint32_t)data, 0);
26980 27015 mutex_exit(SD_MUTEX(un));
26981 27016 }
26982 27017 break;
26983 27018 default:
26984 27019 /* should not reach here, but check anyway */
26985 27020 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26986 27021 "sr_change_blkmode: Command '%x' Not Supported\n", cmd);
26987 27022 rval = EINVAL;
26988 27023 break;
26989 27024 }
26990 27025
26991 27026 if (select) {
26992 27027 kmem_free(select, BUFLEN_CHG_BLK_MODE);
26993 27028 }
26994 27029 if (sense) {
26995 27030 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26996 27031 }
26997 27032 return (rval);
26998 27033 }
26999 27034
27000 27035
27001 27036 /*
27002 27037 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines
27003 27038 * implement driver support for getting and setting the CD speed. The command
27004 27039 * set used will be based on the device type. If the device has not been
27005 27040 * identified as MMC the Toshiba vendor specific mode page will be used. If
27006 27041 * the device is MMC but does not support the Real Time Streaming feature
27007 27042 * the SET CD SPEED command will be used to set speed and mode page 0x2A will
27008 27043 * be used to read the speed.
27009 27044 */
27010 27045
27011 27046 /*
27012 27047 * Function: sr_change_speed()
27013 27048 *
27014 27049 * Description: This routine is the driver entry point for handling CD-ROM
27015 27050 * drive speed ioctl requests for devices supporting the Toshiba
27016 27051 * vendor specific drive speed mode page. Support for returning
27017 27052 * and changing the current drive speed in use by the device is
27018 27053 * implemented.
27019 27054 *
27020 27055 * Arguments: dev - the device 'dev_t'
27021 27056 * cmd - the request type; one of CDROMGDRVSPEED (get) or
27022 27057 * CDROMSDRVSPEED (set)
27023 27058 * data - current drive speed or requested drive speed
27024 27059 * flag - this argument is a pass through to ddi_copyxxx() directly
27025 27060 * from the mode argument of ioctl().
27026 27061 *
27027 27062 * Return Code: the code returned by sd_send_scsi_cmd()
27028 27063 * EINVAL if invalid arguments are provided
27029 27064 * EFAULT if ddi_copyxxx() fails
27030 27065 * ENXIO if fail ddi_get_soft_state
27031 27066 * EIO if invalid mode sense block descriptor length
27032 27067 */
27033 27068
27034 27069 static int
27035 27070 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27036 27071 {
27037 27072 struct sd_lun *un = NULL;
27038 27073 struct mode_header *sense_mhp, *select_mhp;
27039 27074 struct mode_speed *sense_page, *select_page;
27040 27075 int current_speed;
27041 27076 int rval = EINVAL;
27042 27077 int bd_len;
27043 27078 uchar_t *sense = NULL;
27044 27079 uchar_t *select = NULL;
27045 27080 sd_ssc_t *ssc;
27046 27081
27047 27082 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27048 27083 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27049 27084 return (ENXIO);
27050 27085 }
27051 27086
27052 27087 /*
27053 27088 * Note: The drive speed is being modified here according to a Toshiba
27054 27089 * vendor specific mode page (0x31).
27055 27090 */
27056 27091 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27057 27092
27058 27093 ssc = sd_ssc_init(un);
27059 27094 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
27060 27095 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED,
27061 27096 SD_PATH_STANDARD);
27062 27097 sd_ssc_fini(ssc);
27063 27098 if (rval != 0) {
27064 27099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27065 27100 "sr_change_speed: Mode Sense Failed\n");
27066 27101 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27067 27102 return (rval);
27068 27103 }
27069 27104 sense_mhp = (struct mode_header *)sense;
27070 27105
27071 27106 /* Check the block descriptor len to handle only 1 block descriptor */
27072 27107 bd_len = sense_mhp->bdesc_length;
27073 27108 if (bd_len > MODE_BLK_DESC_LENGTH) {
27074 27109 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27075 27110 "sr_change_speed: Mode Sense returned invalid block "
27076 27111 "descriptor length\n");
27077 27112 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27078 27113 return (EIO);
27079 27114 }
27080 27115
27081 27116 sense_page = (struct mode_speed *)
27082 27117 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
27083 27118 current_speed = sense_page->speed;
27084 27119
27085 27120 /* Process command */
27086 27121 switch (cmd) {
27087 27122 case CDROMGDRVSPEED:
27088 27123 /* Return the drive speed obtained during the mode sense */
27089 27124 if (current_speed == 0x2) {
27090 27125 current_speed = CDROM_TWELVE_SPEED;
27091 27126 }
27092 27127 if (ddi_copyout(¤t_speed, (void *)data,
27093 27128 sizeof (int), flag) != 0) {
27094 27129 rval = EFAULT;
27095 27130 }
27096 27131 break;
27097 27132 case CDROMSDRVSPEED:
27098 27133 /* Validate the requested drive speed */
27099 27134 switch ((uchar_t)data) {
27100 27135 case CDROM_TWELVE_SPEED:
27101 27136 data = 0x2;
27102 27137 /*FALLTHROUGH*/
27103 27138 case CDROM_NORMAL_SPEED:
27104 27139 case CDROM_DOUBLE_SPEED:
27105 27140 case CDROM_QUAD_SPEED:
27106 27141 case CDROM_MAXIMUM_SPEED:
27107 27142 break;
27108 27143 default:
27109 27144 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27110 27145 "sr_change_speed: "
27111 27146 "Drive Speed '%d' Not Supported\n", (uchar_t)data);
27112 27147 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27113 27148 return (EINVAL);
27114 27149 }
27115 27150
27116 27151 /*
27117 27152 * The current drive speed matches the requested drive speed so
27118 27153 * there is no need to send the mode select to change the speed
27119 27154 */
27120 27155 if (current_speed == data) {
27121 27156 break;
27122 27157 }
27123 27158
27124 27159 /* Build the select data for the requested drive speed */
27125 27160 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27126 27161 select_mhp = (struct mode_header *)select;
27127 27162 select_mhp->bdesc_length = 0;
27128 27163 select_page =
27129 27164 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27130 27165 select_page =
27131 27166 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27132 27167 select_page->mode_page.code = CDROM_MODE_SPEED;
27133 27168 select_page->mode_page.length = 2;
27134 27169 select_page->speed = (uchar_t)data;
27135 27170
27136 27171 /* Send the mode select for the requested block size */
27137 27172 ssc = sd_ssc_init(un);
27138 27173 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27139 27174 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27140 27175 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27141 27176 sd_ssc_fini(ssc);
27142 27177 if (rval != 0) {
27143 27178 /*
27144 27179 * The mode select failed for the requested drive speed,
27145 27180 * so reset the data for the original drive speed and
27146 27181 * send it to the target. The error is indicated by the
27147 27182 * return value for the failed mode select.
27148 27183 */
27149 27184 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27150 27185 "sr_drive_speed: Mode Select Failed\n");
27151 27186 select_page->speed = sense_page->speed;
27152 27187 ssc = sd_ssc_init(un);
27153 27188 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27154 27189 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27155 27190 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27156 27191 sd_ssc_fini(ssc);
27157 27192 }
27158 27193 break;
27159 27194 default:
27160 27195 /* should not reach here, but check anyway */
27161 27196 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27162 27197 "sr_change_speed: Command '%x' Not Supported\n", cmd);
27163 27198 rval = EINVAL;
27164 27199 break;
27165 27200 }
27166 27201
27167 27202 if (select) {
27168 27203 kmem_free(select, BUFLEN_MODE_CDROM_SPEED);
27169 27204 }
27170 27205 if (sense) {
27171 27206 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27172 27207 }
27173 27208
27174 27209 return (rval);
27175 27210 }
27176 27211
27177 27212
27178 27213 /*
27179 27214 * Function: sr_atapi_change_speed()
27180 27215 *
27181 27216 * Description: This routine is the driver entry point for handling CD-ROM
27182 27217 * drive speed ioctl requests for MMC devices that do not support
27183 27218 * the Real Time Streaming feature (0x107).
27184 27219 *
27185 27220 * Note: This routine will use the SET SPEED command which may not
27186 27221 * be supported by all devices.
27187 27222 *
27188 27223 * Arguments: dev- the device 'dev_t'
27189 27224 * cmd- the request type; one of CDROMGDRVSPEED (get) or
27190 27225 * CDROMSDRVSPEED (set)
27191 27226 * data- current drive speed or requested drive speed
27192 27227 * flag- this argument is a pass through to ddi_copyxxx() directly
27193 27228 * from the mode argument of ioctl().
27194 27229 *
27195 27230 * Return Code: the code returned by sd_send_scsi_cmd()
27196 27231 * EINVAL if invalid arguments are provided
27197 27232 * EFAULT if ddi_copyxxx() fails
27198 27233 * ENXIO if fail ddi_get_soft_state
27199 27234 * EIO if invalid mode sense block descriptor length
27200 27235 */
27201 27236
27202 27237 static int
27203 27238 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27204 27239 {
27205 27240 struct sd_lun *un;
27206 27241 struct uscsi_cmd *com = NULL;
27207 27242 struct mode_header_grp2 *sense_mhp;
27208 27243 uchar_t *sense_page;
27209 27244 uchar_t *sense = NULL;
27210 27245 char cdb[CDB_GROUP5];
27211 27246 int bd_len;
27212 27247 int current_speed = 0;
27213 27248 int max_speed = 0;
27214 27249 int rval;
27215 27250 sd_ssc_t *ssc;
27216 27251
27217 27252 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27218 27253
27219 27254 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27220 27255 return (ENXIO);
27221 27256 }
27222 27257
27223 27258 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
27224 27259
27225 27260 ssc = sd_ssc_init(un);
27226 27261 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
27227 27262 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP,
27228 27263 SD_PATH_STANDARD);
27229 27264 sd_ssc_fini(ssc);
27230 27265 if (rval != 0) {
27231 27266 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27232 27267 "sr_atapi_change_speed: Mode Sense Failed\n");
27233 27268 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27234 27269 return (rval);
27235 27270 }
27236 27271
27237 27272 /* Check the block descriptor len to handle only 1 block descriptor */
27238 27273 sense_mhp = (struct mode_header_grp2 *)sense;
27239 27274 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
27240 27275 if (bd_len > MODE_BLK_DESC_LENGTH) {
27241 27276 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27242 27277 "sr_atapi_change_speed: Mode Sense returned invalid "
27243 27278 "block descriptor length\n");
27244 27279 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27245 27280 return (EIO);
27246 27281 }
27247 27282
27248 27283 /* Calculate the current and maximum drive speeds */
27249 27284 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
27250 27285 current_speed = (sense_page[14] << 8) | sense_page[15];
27251 27286 max_speed = (sense_page[8] << 8) | sense_page[9];
27252 27287
27253 27288 /* Process the command */
27254 27289 switch (cmd) {
27255 27290 case CDROMGDRVSPEED:
27256 27291 current_speed /= SD_SPEED_1X;
27257 27292 if (ddi_copyout(¤t_speed, (void *)data,
27258 27293 sizeof (int), flag) != 0)
27259 27294 rval = EFAULT;
27260 27295 break;
27261 27296 case CDROMSDRVSPEED:
27262 27297 /* Convert the speed code to KB/sec */
27263 27298 switch ((uchar_t)data) {
27264 27299 case CDROM_NORMAL_SPEED:
27265 27300 current_speed = SD_SPEED_1X;
27266 27301 break;
27267 27302 case CDROM_DOUBLE_SPEED:
27268 27303 current_speed = 2 * SD_SPEED_1X;
27269 27304 break;
27270 27305 case CDROM_QUAD_SPEED:
27271 27306 current_speed = 4 * SD_SPEED_1X;
27272 27307 break;
27273 27308 case CDROM_TWELVE_SPEED:
27274 27309 current_speed = 12 * SD_SPEED_1X;
27275 27310 break;
27276 27311 case CDROM_MAXIMUM_SPEED:
27277 27312 current_speed = 0xffff;
27278 27313 break;
27279 27314 default:
27280 27315 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27281 27316 "sr_atapi_change_speed: invalid drive speed %d\n",
27282 27317 (uchar_t)data);
27283 27318 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27284 27319 return (EINVAL);
27285 27320 }
27286 27321
27287 27322 /* Check the request against the drive's max speed. */
27288 27323 if (current_speed != 0xffff) {
27289 27324 if (current_speed > max_speed) {
27290 27325 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27291 27326 return (EINVAL);
27292 27327 }
27293 27328 }
27294 27329
27295 27330 /*
27296 27331 * Build and send the SET SPEED command
27297 27332 *
27298 27333 * Note: The SET SPEED (0xBB) command used in this routine is
27299 27334 * obsolete per the SCSI MMC spec but still supported in the
27300 27335 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27301 27336 * therefore the command is still implemented in this routine.
27302 27337 */
27303 27338 bzero(cdb, sizeof (cdb));
27304 27339 cdb[0] = (char)SCMD_SET_CDROM_SPEED;
27305 27340 cdb[2] = (uchar_t)(current_speed >> 8);
27306 27341 cdb[3] = (uchar_t)current_speed;
27307 27342 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27308 27343 com->uscsi_cdb = (caddr_t)cdb;
27309 27344 com->uscsi_cdblen = CDB_GROUP5;
27310 27345 com->uscsi_bufaddr = NULL;
27311 27346 com->uscsi_buflen = 0;
27312 27347 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27313 27348 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD);
27314 27349 break;
27315 27350 default:
27316 27351 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27317 27352 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd);
27318 27353 rval = EINVAL;
27319 27354 }
27320 27355
27321 27356 if (sense) {
27322 27357 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27323 27358 }
27324 27359 if (com) {
27325 27360 kmem_free(com, sizeof (*com));
27326 27361 }
27327 27362 return (rval);
27328 27363 }
27329 27364
27330 27365
27331 27366 /*
27332 27367 * Function: sr_pause_resume()
27333 27368 *
27334 27369 * Description: This routine is the driver entry point for handling CD-ROM
27335 27370 * pause/resume ioctl requests. This only affects the audio play
27336 27371 * operation.
27337 27372 *
27338 27373 * Arguments: dev - the device 'dev_t'
27339 27374 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used
27340 27375 * for setting the resume bit of the cdb.
27341 27376 *
27342 27377 * Return Code: the code returned by sd_send_scsi_cmd()
27343 27378 * EINVAL if invalid mode specified
27344 27379 *
27345 27380 */
27346 27381
27347 27382 static int
27348 27383 sr_pause_resume(dev_t dev, int cmd)
27349 27384 {
27350 27385 struct sd_lun *un;
27351 27386 struct uscsi_cmd *com;
27352 27387 char cdb[CDB_GROUP1];
27353 27388 int rval;
27354 27389
27355 27390 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27356 27391 return (ENXIO);
27357 27392 }
27358 27393
27359 27394 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27360 27395 bzero(cdb, CDB_GROUP1);
27361 27396 cdb[0] = SCMD_PAUSE_RESUME;
27362 27397 switch (cmd) {
27363 27398 case CDROMRESUME:
27364 27399 cdb[8] = 1;
27365 27400 break;
27366 27401 case CDROMPAUSE:
27367 27402 cdb[8] = 0;
27368 27403 break;
27369 27404 default:
27370 27405 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:"
27371 27406 " Command '%x' Not Supported\n", cmd);
27372 27407 rval = EINVAL;
27373 27408 goto done;
27374 27409 }
27375 27410
27376 27411 com->uscsi_cdb = cdb;
27377 27412 com->uscsi_cdblen = CDB_GROUP1;
27378 27413 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27379 27414
27380 27415 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27381 27416 SD_PATH_STANDARD);
27382 27417
27383 27418 done:
27384 27419 kmem_free(com, sizeof (*com));
27385 27420 return (rval);
27386 27421 }
27387 27422
27388 27423
27389 27424 /*
27390 27425 * Function: sr_play_msf()
27391 27426 *
27392 27427 * Description: This routine is the driver entry point for handling CD-ROM
27393 27428 * ioctl requests to output the audio signals at the specified
27394 27429 * starting address and continue the audio play until the specified
27395 27430 * ending address (CDROMPLAYMSF) The address is in Minute Second
27396 27431 * Frame (MSF) format.
27397 27432 *
27398 27433 * Arguments: dev - the device 'dev_t'
27399 27434 * data - pointer to user provided audio msf structure,
27400 27435 * specifying start/end addresses.
27401 27436 * flag - this argument is a pass through to ddi_copyxxx()
27402 27437 * directly from the mode argument of ioctl().
27403 27438 *
27404 27439 * Return Code: the code returned by sd_send_scsi_cmd()
27405 27440 * EFAULT if ddi_copyxxx() fails
27406 27441 * ENXIO if fail ddi_get_soft_state
27407 27442 * EINVAL if data pointer is NULL
27408 27443 */
27409 27444
27410 27445 static int
27411 27446 sr_play_msf(dev_t dev, caddr_t data, int flag)
27412 27447 {
27413 27448 struct sd_lun *un;
27414 27449 struct uscsi_cmd *com;
27415 27450 struct cdrom_msf msf_struct;
27416 27451 struct cdrom_msf *msf = &msf_struct;
27417 27452 char cdb[CDB_GROUP1];
27418 27453 int rval;
27419 27454
27420 27455 if (data == NULL) {
27421 27456 return (EINVAL);
27422 27457 }
27423 27458
27424 27459 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27425 27460 return (ENXIO);
27426 27461 }
27427 27462
27428 27463 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) {
27429 27464 return (EFAULT);
27430 27465 }
27431 27466
27432 27467 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27433 27468 bzero(cdb, CDB_GROUP1);
27434 27469 cdb[0] = SCMD_PLAYAUDIO_MSF;
27435 27470 if (un->un_f_cfg_playmsf_bcd == TRUE) {
27436 27471 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0);
27437 27472 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0);
27438 27473 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0);
27439 27474 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1);
27440 27475 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1);
27441 27476 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1);
27442 27477 } else {
27443 27478 cdb[3] = msf->cdmsf_min0;
27444 27479 cdb[4] = msf->cdmsf_sec0;
27445 27480 cdb[5] = msf->cdmsf_frame0;
27446 27481 cdb[6] = msf->cdmsf_min1;
27447 27482 cdb[7] = msf->cdmsf_sec1;
27448 27483 cdb[8] = msf->cdmsf_frame1;
27449 27484 }
27450 27485 com->uscsi_cdb = cdb;
27451 27486 com->uscsi_cdblen = CDB_GROUP1;
27452 27487 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27453 27488 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27454 27489 SD_PATH_STANDARD);
27455 27490 kmem_free(com, sizeof (*com));
27456 27491 return (rval);
27457 27492 }
27458 27493
27459 27494
27460 27495 /*
27461 27496 * Function: sr_play_trkind()
27462 27497 *
27463 27498 * Description: This routine is the driver entry point for handling CD-ROM
27464 27499 * ioctl requests to output the audio signals at the specified
27465 27500 * starting address and continue the audio play until the specified
27466 27501 * ending address (CDROMPLAYTRKIND). The address is in Track Index
27467 27502 * format.
27468 27503 *
27469 27504 * Arguments: dev - the device 'dev_t'
27470 27505 * data - pointer to user provided audio track/index structure,
27471 27506 * specifying start/end addresses.
27472 27507 * flag - this argument is a pass through to ddi_copyxxx()
27473 27508 * directly from the mode argument of ioctl().
27474 27509 *
27475 27510 * Return Code: the code returned by sd_send_scsi_cmd()
27476 27511 * EFAULT if ddi_copyxxx() fails
27477 27512 * ENXIO if fail ddi_get_soft_state
27478 27513 * EINVAL if data pointer is NULL
27479 27514 */
27480 27515
27481 27516 static int
27482 27517 sr_play_trkind(dev_t dev, caddr_t data, int flag)
27483 27518 {
27484 27519 struct cdrom_ti ti_struct;
27485 27520 struct cdrom_ti *ti = &ti_struct;
27486 27521 struct uscsi_cmd *com = NULL;
27487 27522 char cdb[CDB_GROUP1];
27488 27523 int rval;
27489 27524
27490 27525 if (data == NULL) {
27491 27526 return (EINVAL);
27492 27527 }
27493 27528
27494 27529 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) {
27495 27530 return (EFAULT);
27496 27531 }
27497 27532
27498 27533 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27499 27534 bzero(cdb, CDB_GROUP1);
27500 27535 cdb[0] = SCMD_PLAYAUDIO_TI;
27501 27536 cdb[4] = ti->cdti_trk0;
27502 27537 cdb[5] = ti->cdti_ind0;
27503 27538 cdb[7] = ti->cdti_trk1;
27504 27539 cdb[8] = ti->cdti_ind1;
27505 27540 com->uscsi_cdb = cdb;
27506 27541 com->uscsi_cdblen = CDB_GROUP1;
27507 27542 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27508 27543 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27509 27544 SD_PATH_STANDARD);
27510 27545 kmem_free(com, sizeof (*com));
27511 27546 return (rval);
27512 27547 }
27513 27548
27514 27549
27515 27550 /*
27516 27551 * Function: sr_read_all_subcodes()
27517 27552 *
27518 27553 * Description: This routine is the driver entry point for handling CD-ROM
27519 27554 * ioctl requests to return raw subcode data while the target is
27520 27555 * playing audio (CDROMSUBCODE).
27521 27556 *
27522 27557 * Arguments: dev - the device 'dev_t'
27523 27558 * data - pointer to user provided cdrom subcode structure,
27524 27559 * specifying the transfer length and address.
27525 27560 * flag - this argument is a pass through to ddi_copyxxx()
27526 27561 * directly from the mode argument of ioctl().
27527 27562 *
27528 27563 * Return Code: the code returned by sd_send_scsi_cmd()
27529 27564 * EFAULT if ddi_copyxxx() fails
27530 27565 * ENXIO if fail ddi_get_soft_state
27531 27566 * EINVAL if data pointer is NULL
27532 27567 */
27533 27568
27534 27569 static int
27535 27570 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag)
27536 27571 {
27537 27572 struct sd_lun *un = NULL;
27538 27573 struct uscsi_cmd *com = NULL;
27539 27574 struct cdrom_subcode *subcode = NULL;
27540 27575 int rval;
27541 27576 size_t buflen;
27542 27577 char cdb[CDB_GROUP5];
27543 27578
27544 27579 #ifdef _MULTI_DATAMODEL
27545 27580 /* To support ILP32 applications in an LP64 world */
27546 27581 struct cdrom_subcode32 cdrom_subcode32;
27547 27582 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32;
27548 27583 #endif
27549 27584 if (data == NULL) {
27550 27585 return (EINVAL);
27551 27586 }
27552 27587
27553 27588 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27554 27589 return (ENXIO);
27555 27590 }
27556 27591
27557 27592 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP);
27558 27593
27559 27594 #ifdef _MULTI_DATAMODEL
27560 27595 switch (ddi_model_convert_from(flag & FMODELS)) {
27561 27596 case DDI_MODEL_ILP32:
27562 27597 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) {
27563 27598 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27564 27599 "sr_read_all_subcodes: ddi_copyin Failed\n");
27565 27600 kmem_free(subcode, sizeof (struct cdrom_subcode));
27566 27601 return (EFAULT);
27567 27602 }
27568 27603 /* Convert the ILP32 uscsi data from the application to LP64 */
27569 27604 cdrom_subcode32tocdrom_subcode(cdsc32, subcode);
27570 27605 break;
27571 27606 case DDI_MODEL_NONE:
27572 27607 if (ddi_copyin(data, subcode,
27573 27608 sizeof (struct cdrom_subcode), flag)) {
27574 27609 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27575 27610 "sr_read_all_subcodes: ddi_copyin Failed\n");
27576 27611 kmem_free(subcode, sizeof (struct cdrom_subcode));
27577 27612 return (EFAULT);
27578 27613 }
27579 27614 break;
27580 27615 }
27581 27616 #else /* ! _MULTI_DATAMODEL */
27582 27617 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) {
27583 27618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27584 27619 "sr_read_all_subcodes: ddi_copyin Failed\n");
27585 27620 kmem_free(subcode, sizeof (struct cdrom_subcode));
27586 27621 return (EFAULT);
27587 27622 }
27588 27623 #endif /* _MULTI_DATAMODEL */
27589 27624
27590 27625 /*
27591 27626 * Since MMC-2 expects max 3 bytes for length, check if the
27592 27627 * length input is greater than 3 bytes
27593 27628 */
27594 27629 if ((subcode->cdsc_length & 0xFF000000) != 0) {
27595 27630 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27596 27631 "sr_read_all_subcodes: "
27597 27632 "cdrom transfer length too large: %d (limit %d)\n",
27598 27633 subcode->cdsc_length, 0xFFFFFF);
27599 27634 kmem_free(subcode, sizeof (struct cdrom_subcode));
27600 27635 return (EINVAL);
27601 27636 }
27602 27637
27603 27638 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length;
27604 27639 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27605 27640 bzero(cdb, CDB_GROUP5);
27606 27641
27607 27642 if (un->un_f_mmc_cap == TRUE) {
27608 27643 cdb[0] = (char)SCMD_READ_CD;
27609 27644 cdb[2] = (char)0xff;
27610 27645 cdb[3] = (char)0xff;
27611 27646 cdb[4] = (char)0xff;
27612 27647 cdb[5] = (char)0xff;
27613 27648 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27614 27649 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27615 27650 cdb[8] = ((subcode->cdsc_length) & 0x000000ff);
27616 27651 cdb[10] = 1;
27617 27652 } else {
27618 27653 /*
27619 27654 * Note: A vendor specific command (0xDF) is being used here to
27620 27655 * request a read of all subcodes.
27621 27656 */
27622 27657 cdb[0] = (char)SCMD_READ_ALL_SUBCODES;
27623 27658 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24);
27624 27659 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27625 27660 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27626 27661 cdb[9] = ((subcode->cdsc_length) & 0x000000ff);
27627 27662 }
27628 27663 com->uscsi_cdb = cdb;
27629 27664 com->uscsi_cdblen = CDB_GROUP5;
27630 27665 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr;
27631 27666 com->uscsi_buflen = buflen;
27632 27667 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27633 27668 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
27634 27669 SD_PATH_STANDARD);
27635 27670 kmem_free(subcode, sizeof (struct cdrom_subcode));
27636 27671 kmem_free(com, sizeof (*com));
27637 27672 return (rval);
27638 27673 }
27639 27674
27640 27675
27641 27676 /*
27642 27677 * Function: sr_read_subchannel()
27643 27678 *
27644 27679 * Description: This routine is the driver entry point for handling CD-ROM
27645 27680 * ioctl requests to return the Q sub-channel data of the CD
27646 27681 * current position block. (CDROMSUBCHNL) The data includes the
27647 27682 * track number, index number, absolute CD-ROM address (LBA or MSF
27648 27683 * format per the user) , track relative CD-ROM address (LBA or MSF
27649 27684 * format per the user), control data and audio status.
27650 27685 *
27651 27686 * Arguments: dev - the device 'dev_t'
27652 27687 * data - pointer to user provided cdrom sub-channel structure
27653 27688 * flag - this argument is a pass through to ddi_copyxxx()
27654 27689 * directly from the mode argument of ioctl().
27655 27690 *
27656 27691 * Return Code: the code returned by sd_send_scsi_cmd()
27657 27692 * EFAULT if ddi_copyxxx() fails
27658 27693 * ENXIO if fail ddi_get_soft_state
27659 27694 * EINVAL if data pointer is NULL
27660 27695 */
27661 27696
27662 27697 static int
27663 27698 sr_read_subchannel(dev_t dev, caddr_t data, int flag)
27664 27699 {
27665 27700 struct sd_lun *un;
27666 27701 struct uscsi_cmd *com;
27667 27702 struct cdrom_subchnl subchanel;
27668 27703 struct cdrom_subchnl *subchnl = &subchanel;
27669 27704 char cdb[CDB_GROUP1];
27670 27705 caddr_t buffer;
27671 27706 int rval;
27672 27707
27673 27708 if (data == NULL) {
27674 27709 return (EINVAL);
27675 27710 }
27676 27711
27677 27712 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27678 27713 (un->un_state == SD_STATE_OFFLINE)) {
27679 27714 return (ENXIO);
27680 27715 }
27681 27716
27682 27717 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) {
27683 27718 return (EFAULT);
27684 27719 }
27685 27720
27686 27721 buffer = kmem_zalloc((size_t)16, KM_SLEEP);
27687 27722 bzero(cdb, CDB_GROUP1);
27688 27723 cdb[0] = SCMD_READ_SUBCHANNEL;
27689 27724 /* Set the MSF bit based on the user requested address format */
27690 27725 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02;
27691 27726 /*
27692 27727 * Set the Q bit in byte 2 to indicate that Q sub-channel data be
27693 27728 * returned
27694 27729 */
27695 27730 cdb[2] = 0x40;
27696 27731 /*
27697 27732 * Set byte 3 to specify the return data format. A value of 0x01
27698 27733 * indicates that the CD-ROM current position should be returned.
27699 27734 */
27700 27735 cdb[3] = 0x01;
27701 27736 cdb[8] = 0x10;
27702 27737 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27703 27738 com->uscsi_cdb = cdb;
27704 27739 com->uscsi_cdblen = CDB_GROUP1;
27705 27740 com->uscsi_bufaddr = buffer;
27706 27741 com->uscsi_buflen = 16;
27707 27742 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27708 27743 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27709 27744 SD_PATH_STANDARD);
27710 27745 if (rval != 0) {
27711 27746 kmem_free(buffer, 16);
27712 27747 kmem_free(com, sizeof (*com));
27713 27748 return (rval);
27714 27749 }
27715 27750
27716 27751 /* Process the returned Q sub-channel data */
27717 27752 subchnl->cdsc_audiostatus = buffer[1];
27718 27753 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4;
27719 27754 subchnl->cdsc_ctrl = (buffer[5] & 0x0F);
27720 27755 subchnl->cdsc_trk = buffer[6];
27721 27756 subchnl->cdsc_ind = buffer[7];
27722 27757 if (subchnl->cdsc_format & CDROM_LBA) {
27723 27758 subchnl->cdsc_absaddr.lba =
27724 27759 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27725 27760 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27726 27761 subchnl->cdsc_reladdr.lba =
27727 27762 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) +
27728 27763 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]);
27729 27764 } else if (un->un_f_cfg_readsub_bcd == TRUE) {
27730 27765 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]);
27731 27766 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]);
27732 27767 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]);
27733 27768 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]);
27734 27769 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]);
27735 27770 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]);
27736 27771 } else {
27737 27772 subchnl->cdsc_absaddr.msf.minute = buffer[9];
27738 27773 subchnl->cdsc_absaddr.msf.second = buffer[10];
27739 27774 subchnl->cdsc_absaddr.msf.frame = buffer[11];
27740 27775 subchnl->cdsc_reladdr.msf.minute = buffer[13];
27741 27776 subchnl->cdsc_reladdr.msf.second = buffer[14];
27742 27777 subchnl->cdsc_reladdr.msf.frame = buffer[15];
27743 27778 }
27744 27779 kmem_free(buffer, 16);
27745 27780 kmem_free(com, sizeof (*com));
27746 27781 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag)
27747 27782 != 0) {
27748 27783 return (EFAULT);
27749 27784 }
27750 27785 return (rval);
27751 27786 }
27752 27787
27753 27788
27754 27789 /*
27755 27790 * Function: sr_read_tocentry()
27756 27791 *
27757 27792 * Description: This routine is the driver entry point for handling CD-ROM
27758 27793 * ioctl requests to read from the Table of Contents (TOC)
27759 27794 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL
27760 27795 * fields, the starting address (LBA or MSF format per the user)
27761 27796 * and the data mode if the user specified track is a data track.
27762 27797 *
27763 27798 * Note: The READ HEADER (0x44) command used in this routine is
27764 27799 * obsolete per the SCSI MMC spec but still supported in the
27765 27800 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27766 27801 * therefore the command is still implemented in this routine.
27767 27802 *
27768 27803 * Arguments: dev - the device 'dev_t'
27769 27804 * data - pointer to user provided toc entry structure,
27770 27805 * specifying the track # and the address format
27771 27806 * (LBA or MSF).
27772 27807 * flag - this argument is a pass through to ddi_copyxxx()
27773 27808 * directly from the mode argument of ioctl().
27774 27809 *
27775 27810 * Return Code: the code returned by sd_send_scsi_cmd()
27776 27811 * EFAULT if ddi_copyxxx() fails
27777 27812 * ENXIO if fail ddi_get_soft_state
27778 27813 * EINVAL if data pointer is NULL
27779 27814 */
27780 27815
27781 27816 static int
27782 27817 sr_read_tocentry(dev_t dev, caddr_t data, int flag)
27783 27818 {
27784 27819 struct sd_lun *un = NULL;
27785 27820 struct uscsi_cmd *com;
27786 27821 struct cdrom_tocentry toc_entry;
27787 27822 struct cdrom_tocentry *entry = &toc_entry;
27788 27823 caddr_t buffer;
27789 27824 int rval;
27790 27825 char cdb[CDB_GROUP1];
27791 27826
27792 27827 if (data == NULL) {
27793 27828 return (EINVAL);
27794 27829 }
27795 27830
27796 27831 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27797 27832 (un->un_state == SD_STATE_OFFLINE)) {
27798 27833 return (ENXIO);
27799 27834 }
27800 27835
27801 27836 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) {
27802 27837 return (EFAULT);
27803 27838 }
27804 27839
27805 27840 /* Validate the requested track and address format */
27806 27841 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) {
27807 27842 return (EINVAL);
27808 27843 }
27809 27844
27810 27845 if (entry->cdte_track == 0) {
27811 27846 return (EINVAL);
27812 27847 }
27813 27848
27814 27849 buffer = kmem_zalloc((size_t)12, KM_SLEEP);
27815 27850 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27816 27851 bzero(cdb, CDB_GROUP1);
27817 27852
27818 27853 cdb[0] = SCMD_READ_TOC;
27819 27854 /* Set the MSF bit based on the user requested address format */
27820 27855 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2);
27821 27856 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
27822 27857 cdb[6] = BYTE_TO_BCD(entry->cdte_track);
27823 27858 } else {
27824 27859 cdb[6] = entry->cdte_track;
27825 27860 }
27826 27861
27827 27862 /*
27828 27863 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
27829 27864 * (4 byte TOC response header + 8 byte track descriptor)
27830 27865 */
27831 27866 cdb[8] = 12;
27832 27867 com->uscsi_cdb = cdb;
27833 27868 com->uscsi_cdblen = CDB_GROUP1;
27834 27869 com->uscsi_bufaddr = buffer;
27835 27870 com->uscsi_buflen = 0x0C;
27836 27871 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ);
27837 27872 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27838 27873 SD_PATH_STANDARD);
27839 27874 if (rval != 0) {
27840 27875 kmem_free(buffer, 12);
27841 27876 kmem_free(com, sizeof (*com));
27842 27877 return (rval);
27843 27878 }
27844 27879
27845 27880 /* Process the toc entry */
27846 27881 entry->cdte_adr = (buffer[5] & 0xF0) >> 4;
27847 27882 entry->cdte_ctrl = (buffer[5] & 0x0F);
27848 27883 if (entry->cdte_format & CDROM_LBA) {
27849 27884 entry->cdte_addr.lba =
27850 27885 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27851 27886 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27852 27887 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) {
27853 27888 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]);
27854 27889 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]);
27855 27890 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]);
27856 27891 /*
27857 27892 * Send a READ TOC command using the LBA address format to get
27858 27893 * the LBA for the track requested so it can be used in the
27859 27894 * READ HEADER request
27860 27895 *
27861 27896 * Note: The MSF bit of the READ HEADER command specifies the
27862 27897 * output format. The block address specified in that command
27863 27898 * must be in LBA format.
27864 27899 */
27865 27900 cdb[1] = 0;
27866 27901 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27867 27902 SD_PATH_STANDARD);
27868 27903 if (rval != 0) {
27869 27904 kmem_free(buffer, 12);
27870 27905 kmem_free(com, sizeof (*com));
27871 27906 return (rval);
27872 27907 }
27873 27908 } else {
27874 27909 entry->cdte_addr.msf.minute = buffer[9];
27875 27910 entry->cdte_addr.msf.second = buffer[10];
27876 27911 entry->cdte_addr.msf.frame = buffer[11];
27877 27912 /*
27878 27913 * Send a READ TOC command using the LBA address format to get
27879 27914 * the LBA for the track requested so it can be used in the
27880 27915 * READ HEADER request
27881 27916 *
27882 27917 * Note: The MSF bit of the READ HEADER command specifies the
27883 27918 * output format. The block address specified in that command
27884 27919 * must be in LBA format.
27885 27920 */
27886 27921 cdb[1] = 0;
27887 27922 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27888 27923 SD_PATH_STANDARD);
27889 27924 if (rval != 0) {
27890 27925 kmem_free(buffer, 12);
27891 27926 kmem_free(com, sizeof (*com));
27892 27927 return (rval);
27893 27928 }
27894 27929 }
27895 27930
27896 27931 /*
27897 27932 * Build and send the READ HEADER command to determine the data mode of
27898 27933 * the user specified track.
27899 27934 */
27900 27935 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) &&
27901 27936 (entry->cdte_track != CDROM_LEADOUT)) {
27902 27937 bzero(cdb, CDB_GROUP1);
27903 27938 cdb[0] = SCMD_READ_HEADER;
27904 27939 cdb[2] = buffer[8];
27905 27940 cdb[3] = buffer[9];
27906 27941 cdb[4] = buffer[10];
27907 27942 cdb[5] = buffer[11];
27908 27943 cdb[8] = 0x08;
27909 27944 com->uscsi_buflen = 0x08;
27910 27945 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27911 27946 SD_PATH_STANDARD);
27912 27947 if (rval == 0) {
27913 27948 entry->cdte_datamode = buffer[0];
27914 27949 } else {
27915 27950 /*
27916 27951 * READ HEADER command failed, since this is
27917 27952 * obsoleted in one spec, its better to return
27918 27953 * -1 for an invlid track so that we can still
27919 27954 * receive the rest of the TOC data.
27920 27955 */
27921 27956 entry->cdte_datamode = (uchar_t)-1;
27922 27957 }
27923 27958 } else {
27924 27959 entry->cdte_datamode = (uchar_t)-1;
27925 27960 }
27926 27961
27927 27962 kmem_free(buffer, 12);
27928 27963 kmem_free(com, sizeof (*com));
27929 27964 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0)
27930 27965 return (EFAULT);
27931 27966
27932 27967 return (rval);
27933 27968 }
27934 27969
27935 27970
27936 27971 /*
27937 27972 * Function: sr_read_tochdr()
27938 27973 *
27939 27974 * Description: This routine is the driver entry point for handling CD-ROM
27940 27975 * ioctl requests to read the Table of Contents (TOC) header
27941 27976 * (CDROMREADTOHDR). The TOC header consists of the disk starting
27942 27977 * and ending track numbers
27943 27978 *
27944 27979 * Arguments: dev - the device 'dev_t'
27945 27980 * data - pointer to user provided toc header structure,
27946 27981 * specifying the starting and ending track numbers.
27947 27982 * flag - this argument is a pass through to ddi_copyxxx()
27948 27983 * directly from the mode argument of ioctl().
27949 27984 *
27950 27985 * Return Code: the code returned by sd_send_scsi_cmd()
27951 27986 * EFAULT if ddi_copyxxx() fails
27952 27987 * ENXIO if fail ddi_get_soft_state
27953 27988 * EINVAL if data pointer is NULL
27954 27989 */
27955 27990
27956 27991 static int
27957 27992 sr_read_tochdr(dev_t dev, caddr_t data, int flag)
27958 27993 {
27959 27994 struct sd_lun *un;
27960 27995 struct uscsi_cmd *com;
27961 27996 struct cdrom_tochdr toc_header;
27962 27997 struct cdrom_tochdr *hdr = &toc_header;
27963 27998 char cdb[CDB_GROUP1];
27964 27999 int rval;
27965 28000 caddr_t buffer;
27966 28001
27967 28002 if (data == NULL) {
27968 28003 return (EINVAL);
27969 28004 }
27970 28005
27971 28006 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27972 28007 (un->un_state == SD_STATE_OFFLINE)) {
27973 28008 return (ENXIO);
27974 28009 }
27975 28010
27976 28011 buffer = kmem_zalloc(4, KM_SLEEP);
27977 28012 bzero(cdb, CDB_GROUP1);
27978 28013 cdb[0] = SCMD_READ_TOC;
27979 28014 /*
27980 28015 * Specifying a track number of 0x00 in the READ TOC command indicates
27981 28016 * that the TOC header should be returned
27982 28017 */
27983 28018 cdb[6] = 0x00;
|
↓ open down ↓ |
2223 lines elided |
↑ open up ↑ |
27984 28019 /*
27985 28020 * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
27986 28021 * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
27987 28022 */
27988 28023 cdb[8] = 0x04;
27989 28024 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27990 28025 com->uscsi_cdb = cdb;
27991 28026 com->uscsi_cdblen = CDB_GROUP1;
27992 28027 com->uscsi_bufaddr = buffer;
27993 28028 com->uscsi_buflen = 0x04;
27994 - com->uscsi_timeout = 300;
28029 + com->uscsi_timeout = 3 * un->un_cmd_timeout;
27995 28030 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27996 28031
27997 28032 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27998 28033 SD_PATH_STANDARD);
27999 28034 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
28000 28035 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
28001 28036 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
28002 28037 } else {
28003 28038 hdr->cdth_trk0 = buffer[2];
28004 28039 hdr->cdth_trk1 = buffer[3];
28005 28040 }
28006 28041 kmem_free(buffer, 4);
28007 28042 kmem_free(com, sizeof (*com));
28008 28043 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) {
28009 28044 return (EFAULT);
28010 28045 }
28011 28046 return (rval);
28012 28047 }
28013 28048
28014 28049
28015 28050 /*
28016 28051 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(),
28017 28052 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for
28018 28053 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data,
28019 28054 * digital audio and extended architecture digital audio. These modes are
28020 28055 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3
28021 28056 * MMC specs.
28022 28057 *
28023 28058 * In addition to support for the various data formats these routines also
28024 28059 * include support for devices that implement only the direct access READ
28025 28060 * commands (0x08, 0x28), devices that implement the READ_CD commands
28026 28061 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and
28027 28062 * READ CDXA commands (0xD8, 0xDB)
28028 28063 */
28029 28064
28030 28065 /*
28031 28066 * Function: sr_read_mode1()
28032 28067 *
28033 28068 * Description: This routine is the driver entry point for handling CD-ROM
28034 28069 * ioctl read mode1 requests (CDROMREADMODE1).
28035 28070 *
28036 28071 * Arguments: dev - the device 'dev_t'
28037 28072 * data - pointer to user provided cd read structure specifying
28038 28073 * the lba buffer address and length.
28039 28074 * flag - this argument is a pass through to ddi_copyxxx()
28040 28075 * directly from the mode argument of ioctl().
28041 28076 *
28042 28077 * Return Code: the code returned by sd_send_scsi_cmd()
28043 28078 * EFAULT if ddi_copyxxx() fails
28044 28079 * ENXIO if fail ddi_get_soft_state
28045 28080 * EINVAL if data pointer is NULL
28046 28081 */
28047 28082
28048 28083 static int
28049 28084 sr_read_mode1(dev_t dev, caddr_t data, int flag)
28050 28085 {
28051 28086 struct sd_lun *un;
28052 28087 struct cdrom_read mode1_struct;
28053 28088 struct cdrom_read *mode1 = &mode1_struct;
28054 28089 int rval;
28055 28090 sd_ssc_t *ssc;
28056 28091
28057 28092 #ifdef _MULTI_DATAMODEL
28058 28093 /* To support ILP32 applications in an LP64 world */
28059 28094 struct cdrom_read32 cdrom_read32;
28060 28095 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28061 28096 #endif /* _MULTI_DATAMODEL */
28062 28097
28063 28098 if (data == NULL) {
28064 28099 return (EINVAL);
28065 28100 }
28066 28101
28067 28102 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28068 28103 (un->un_state == SD_STATE_OFFLINE)) {
28069 28104 return (ENXIO);
28070 28105 }
28071 28106
28072 28107 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28073 28108 "sd_read_mode1: entry: un:0x%p\n", un);
28074 28109
28075 28110 #ifdef _MULTI_DATAMODEL
28076 28111 switch (ddi_model_convert_from(flag & FMODELS)) {
28077 28112 case DDI_MODEL_ILP32:
28078 28113 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28079 28114 return (EFAULT);
28080 28115 }
28081 28116 /* Convert the ILP32 uscsi data from the application to LP64 */
28082 28117 cdrom_read32tocdrom_read(cdrd32, mode1);
28083 28118 break;
28084 28119 case DDI_MODEL_NONE:
28085 28120 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28086 28121 return (EFAULT);
28087 28122 }
28088 28123 }
28089 28124 #else /* ! _MULTI_DATAMODEL */
28090 28125 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28091 28126 return (EFAULT);
28092 28127 }
28093 28128 #endif /* _MULTI_DATAMODEL */
28094 28129
28095 28130 ssc = sd_ssc_init(un);
28096 28131 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr,
28097 28132 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD);
28098 28133 sd_ssc_fini(ssc);
28099 28134
28100 28135 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28101 28136 "sd_read_mode1: exit: un:0x%p\n", un);
28102 28137
28103 28138 return (rval);
28104 28139 }
28105 28140
28106 28141
28107 28142 /*
28108 28143 * Function: sr_read_cd_mode2()
28109 28144 *
28110 28145 * Description: This routine is the driver entry point for handling CD-ROM
28111 28146 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28112 28147 * support the READ CD (0xBE) command or the 1st generation
28113 28148 * READ CD (0xD4) command.
28114 28149 *
28115 28150 * Arguments: dev - the device 'dev_t'
28116 28151 * data - pointer to user provided cd read structure specifying
28117 28152 * the lba buffer address and length.
28118 28153 * flag - this argument is a pass through to ddi_copyxxx()
28119 28154 * directly from the mode argument of ioctl().
28120 28155 *
28121 28156 * Return Code: the code returned by sd_send_scsi_cmd()
28122 28157 * EFAULT if ddi_copyxxx() fails
28123 28158 * ENXIO if fail ddi_get_soft_state
28124 28159 * EINVAL if data pointer is NULL
28125 28160 */
28126 28161
28127 28162 static int
28128 28163 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag)
28129 28164 {
28130 28165 struct sd_lun *un;
28131 28166 struct uscsi_cmd *com;
28132 28167 struct cdrom_read mode2_struct;
28133 28168 struct cdrom_read *mode2 = &mode2_struct;
28134 28169 uchar_t cdb[CDB_GROUP5];
28135 28170 int nblocks;
28136 28171 int rval;
28137 28172 #ifdef _MULTI_DATAMODEL
28138 28173 /* To support ILP32 applications in an LP64 world */
28139 28174 struct cdrom_read32 cdrom_read32;
28140 28175 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28141 28176 #endif /* _MULTI_DATAMODEL */
28142 28177
28143 28178 if (data == NULL) {
28144 28179 return (EINVAL);
28145 28180 }
28146 28181
28147 28182 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28148 28183 (un->un_state == SD_STATE_OFFLINE)) {
28149 28184 return (ENXIO);
28150 28185 }
28151 28186
28152 28187 #ifdef _MULTI_DATAMODEL
28153 28188 switch (ddi_model_convert_from(flag & FMODELS)) {
28154 28189 case DDI_MODEL_ILP32:
28155 28190 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28156 28191 return (EFAULT);
28157 28192 }
28158 28193 /* Convert the ILP32 uscsi data from the application to LP64 */
28159 28194 cdrom_read32tocdrom_read(cdrd32, mode2);
28160 28195 break;
28161 28196 case DDI_MODEL_NONE:
28162 28197 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28163 28198 return (EFAULT);
28164 28199 }
28165 28200 break;
28166 28201 }
28167 28202
28168 28203 #else /* ! _MULTI_DATAMODEL */
28169 28204 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28170 28205 return (EFAULT);
28171 28206 }
28172 28207 #endif /* _MULTI_DATAMODEL */
28173 28208
28174 28209 bzero(cdb, sizeof (cdb));
28175 28210 if (un->un_f_cfg_read_cd_xd4 == TRUE) {
28176 28211 /* Read command supported by 1st generation atapi drives */
28177 28212 cdb[0] = SCMD_READ_CDD4;
28178 28213 } else {
28179 28214 /* Universal CD Access Command */
28180 28215 cdb[0] = SCMD_READ_CD;
28181 28216 }
28182 28217
28183 28218 /*
28184 28219 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book
28185 28220 */
28186 28221 cdb[1] = CDROM_SECTOR_TYPE_MODE2;
28187 28222
28188 28223 /* set the start address */
28189 28224 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF);
28190 28225 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF);
28191 28226 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28192 28227 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF);
28193 28228
28194 28229 /* set the transfer length */
28195 28230 nblocks = mode2->cdread_buflen / 2336;
28196 28231 cdb[6] = (uchar_t)(nblocks >> 16);
28197 28232 cdb[7] = (uchar_t)(nblocks >> 8);
28198 28233 cdb[8] = (uchar_t)nblocks;
28199 28234
28200 28235 /* set the filter bits */
28201 28236 cdb[9] = CDROM_READ_CD_USERDATA;
28202 28237
28203 28238 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28204 28239 com->uscsi_cdb = (caddr_t)cdb;
28205 28240 com->uscsi_cdblen = sizeof (cdb);
28206 28241 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28207 28242 com->uscsi_buflen = mode2->cdread_buflen;
28208 28243 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28209 28244
28210 28245 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28211 28246 SD_PATH_STANDARD);
28212 28247 kmem_free(com, sizeof (*com));
28213 28248 return (rval);
28214 28249 }
28215 28250
28216 28251
28217 28252 /*
28218 28253 * Function: sr_read_mode2()
28219 28254 *
28220 28255 * Description: This routine is the driver entry point for handling CD-ROM
28221 28256 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28222 28257 * do not support the READ CD (0xBE) command.
28223 28258 *
28224 28259 * Arguments: dev - the device 'dev_t'
28225 28260 * data - pointer to user provided cd read structure specifying
28226 28261 * the lba buffer address and length.
28227 28262 * flag - this argument is a pass through to ddi_copyxxx()
28228 28263 * directly from the mode argument of ioctl().
28229 28264 *
28230 28265 * Return Code: the code returned by sd_send_scsi_cmd()
28231 28266 * EFAULT if ddi_copyxxx() fails
28232 28267 * ENXIO if fail ddi_get_soft_state
28233 28268 * EINVAL if data pointer is NULL
28234 28269 * EIO if fail to reset block size
28235 28270 * EAGAIN if commands are in progress in the driver
28236 28271 */
28237 28272
28238 28273 static int
28239 28274 sr_read_mode2(dev_t dev, caddr_t data, int flag)
28240 28275 {
28241 28276 struct sd_lun *un;
28242 28277 struct cdrom_read mode2_struct;
28243 28278 struct cdrom_read *mode2 = &mode2_struct;
28244 28279 int rval;
28245 28280 uint32_t restore_blksize;
28246 28281 struct uscsi_cmd *com;
28247 28282 uchar_t cdb[CDB_GROUP0];
28248 28283 int nblocks;
28249 28284
28250 28285 #ifdef _MULTI_DATAMODEL
28251 28286 /* To support ILP32 applications in an LP64 world */
28252 28287 struct cdrom_read32 cdrom_read32;
28253 28288 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28254 28289 #endif /* _MULTI_DATAMODEL */
28255 28290
28256 28291 if (data == NULL) {
28257 28292 return (EINVAL);
28258 28293 }
28259 28294
28260 28295 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28261 28296 (un->un_state == SD_STATE_OFFLINE)) {
28262 28297 return (ENXIO);
28263 28298 }
28264 28299
28265 28300 /*
28266 28301 * Because this routine will update the device and driver block size
28267 28302 * being used we want to make sure there are no commands in progress.
28268 28303 * If commands are in progress the user will have to try again.
28269 28304 *
28270 28305 * We check for 1 instead of 0 because we increment un_ncmds_in_driver
28271 28306 * in sdioctl to protect commands from sdioctl through to the top of
28272 28307 * sd_uscsi_strategy. See sdioctl for details.
28273 28308 */
28274 28309 mutex_enter(SD_MUTEX(un));
28275 28310 if (un->un_ncmds_in_driver != 1) {
28276 28311 mutex_exit(SD_MUTEX(un));
28277 28312 return (EAGAIN);
28278 28313 }
28279 28314 mutex_exit(SD_MUTEX(un));
28280 28315
28281 28316 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28282 28317 "sd_read_mode2: entry: un:0x%p\n", un);
28283 28318
28284 28319 #ifdef _MULTI_DATAMODEL
28285 28320 switch (ddi_model_convert_from(flag & FMODELS)) {
28286 28321 case DDI_MODEL_ILP32:
28287 28322 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28288 28323 return (EFAULT);
28289 28324 }
28290 28325 /* Convert the ILP32 uscsi data from the application to LP64 */
28291 28326 cdrom_read32tocdrom_read(cdrd32, mode2);
28292 28327 break;
28293 28328 case DDI_MODEL_NONE:
28294 28329 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28295 28330 return (EFAULT);
28296 28331 }
28297 28332 break;
28298 28333 }
28299 28334 #else /* ! _MULTI_DATAMODEL */
28300 28335 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) {
28301 28336 return (EFAULT);
28302 28337 }
28303 28338 #endif /* _MULTI_DATAMODEL */
28304 28339
28305 28340 /* Store the current target block size for restoration later */
28306 28341 restore_blksize = un->un_tgt_blocksize;
28307 28342
28308 28343 /* Change the device and soft state target block size to 2336 */
28309 28344 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) {
28310 28345 rval = EIO;
28311 28346 goto done;
28312 28347 }
28313 28348
28314 28349
28315 28350 bzero(cdb, sizeof (cdb));
28316 28351
28317 28352 /* set READ operation */
28318 28353 cdb[0] = SCMD_READ;
28319 28354
28320 28355 /* adjust lba for 2kbyte blocks from 512 byte blocks */
28321 28356 mode2->cdread_lba >>= 2;
28322 28357
28323 28358 /* set the start address */
28324 28359 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F);
28325 28360 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28326 28361 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF);
28327 28362
28328 28363 /* set the transfer length */
28329 28364 nblocks = mode2->cdread_buflen / 2336;
28330 28365 cdb[4] = (uchar_t)nblocks & 0xFF;
28331 28366
28332 28367 /* build command */
28333 28368 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28334 28369 com->uscsi_cdb = (caddr_t)cdb;
28335 28370 com->uscsi_cdblen = sizeof (cdb);
28336 28371 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28337 28372 com->uscsi_buflen = mode2->cdread_buflen;
28338 28373 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28339 28374
28340 28375 /*
28341 28376 * Issue SCSI command with user space address for read buffer.
28342 28377 *
28343 28378 * This sends the command through main channel in the driver.
28344 28379 *
28345 28380 * Since this is accessed via an IOCTL call, we go through the
28346 28381 * standard path, so that if the device was powered down, then
28347 28382 * it would be 'awakened' to handle the command.
28348 28383 */
28349 28384 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28350 28385 SD_PATH_STANDARD);
28351 28386
28352 28387 kmem_free(com, sizeof (*com));
28353 28388
28354 28389 /* Restore the device and soft state target block size */
28355 28390 if (sr_sector_mode(dev, restore_blksize) != 0) {
28356 28391 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28357 28392 "can't do switch back to mode 1\n");
28358 28393 /*
28359 28394 * If sd_send_scsi_READ succeeded we still need to report
28360 28395 * an error because we failed to reset the block size
28361 28396 */
28362 28397 if (rval == 0) {
28363 28398 rval = EIO;
28364 28399 }
28365 28400 }
28366 28401
28367 28402 done:
28368 28403 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28369 28404 "sd_read_mode2: exit: un:0x%p\n", un);
28370 28405
28371 28406 return (rval);
28372 28407 }
28373 28408
28374 28409
28375 28410 /*
28376 28411 * Function: sr_sector_mode()
28377 28412 *
28378 28413 * Description: This utility function is used by sr_read_mode2 to set the target
28379 28414 * block size based on the user specified size. This is a legacy
28380 28415 * implementation based upon a vendor specific mode page
28381 28416 *
28382 28417 * Arguments: dev - the device 'dev_t'
28383 28418 * data - flag indicating if block size is being set to 2336 or
28384 28419 * 512.
28385 28420 *
28386 28421 * Return Code: the code returned by sd_send_scsi_cmd()
28387 28422 * EFAULT if ddi_copyxxx() fails
28388 28423 * ENXIO if fail ddi_get_soft_state
28389 28424 * EINVAL if data pointer is NULL
28390 28425 */
28391 28426
28392 28427 static int
28393 28428 sr_sector_mode(dev_t dev, uint32_t blksize)
28394 28429 {
28395 28430 struct sd_lun *un;
28396 28431 uchar_t *sense;
28397 28432 uchar_t *select;
28398 28433 int rval;
28399 28434 sd_ssc_t *ssc;
28400 28435
28401 28436 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28402 28437 (un->un_state == SD_STATE_OFFLINE)) {
28403 28438 return (ENXIO);
28404 28439 }
28405 28440
28406 28441 sense = kmem_zalloc(20, KM_SLEEP);
28407 28442
28408 28443 /* Note: This is a vendor specific mode page (0x81) */
28409 28444 ssc = sd_ssc_init(un);
28410 28445 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81,
28411 28446 SD_PATH_STANDARD);
28412 28447 sd_ssc_fini(ssc);
28413 28448 if (rval != 0) {
28414 28449 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28415 28450 "sr_sector_mode: Mode Sense failed\n");
28416 28451 kmem_free(sense, 20);
28417 28452 return (rval);
28418 28453 }
28419 28454 select = kmem_zalloc(20, KM_SLEEP);
28420 28455 select[3] = 0x08;
28421 28456 select[10] = ((blksize >> 8) & 0xff);
28422 28457 select[11] = (blksize & 0xff);
28423 28458 select[12] = 0x01;
28424 28459 select[13] = 0x06;
28425 28460 select[14] = sense[14];
28426 28461 select[15] = sense[15];
28427 28462 if (blksize == SD_MODE2_BLKSIZE) {
28428 28463 select[14] |= 0x01;
28429 28464 }
28430 28465
28431 28466 ssc = sd_ssc_init(un);
28432 28467 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20,
28433 28468 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
28434 28469 sd_ssc_fini(ssc);
28435 28470 if (rval != 0) {
28436 28471 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28437 28472 "sr_sector_mode: Mode Select failed\n");
28438 28473 } else {
28439 28474 /*
28440 28475 * Only update the softstate block size if we successfully
28441 28476 * changed the device block mode.
28442 28477 */
28443 28478 mutex_enter(SD_MUTEX(un));
28444 28479 sd_update_block_info(un, blksize, 0);
28445 28480 mutex_exit(SD_MUTEX(un));
28446 28481 }
28447 28482 kmem_free(sense, 20);
28448 28483 kmem_free(select, 20);
28449 28484 return (rval);
28450 28485 }
28451 28486
28452 28487
28453 28488 /*
28454 28489 * Function: sr_read_cdda()
28455 28490 *
28456 28491 * Description: This routine is the driver entry point for handling CD-ROM
28457 28492 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If
28458 28493 * the target supports CDDA these requests are handled via a vendor
28459 28494 * specific command (0xD8) If the target does not support CDDA
28460 28495 * these requests are handled via the READ CD command (0xBE).
28461 28496 *
28462 28497 * Arguments: dev - the device 'dev_t'
28463 28498 * data - pointer to user provided CD-DA structure specifying
28464 28499 * the track starting address, transfer length, and
28465 28500 * subcode options.
28466 28501 * flag - this argument is a pass through to ddi_copyxxx()
28467 28502 * directly from the mode argument of ioctl().
28468 28503 *
28469 28504 * Return Code: the code returned by sd_send_scsi_cmd()
28470 28505 * EFAULT if ddi_copyxxx() fails
28471 28506 * ENXIO if fail ddi_get_soft_state
28472 28507 * EINVAL if invalid arguments are provided
28473 28508 * ENOTTY
28474 28509 */
28475 28510
28476 28511 static int
28477 28512 sr_read_cdda(dev_t dev, caddr_t data, int flag)
28478 28513 {
28479 28514 struct sd_lun *un;
28480 28515 struct uscsi_cmd *com;
28481 28516 struct cdrom_cdda *cdda;
28482 28517 int rval;
28483 28518 size_t buflen;
28484 28519 char cdb[CDB_GROUP5];
28485 28520
28486 28521 #ifdef _MULTI_DATAMODEL
28487 28522 /* To support ILP32 applications in an LP64 world */
28488 28523 struct cdrom_cdda32 cdrom_cdda32;
28489 28524 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32;
28490 28525 #endif /* _MULTI_DATAMODEL */
28491 28526
28492 28527 if (data == NULL) {
28493 28528 return (EINVAL);
28494 28529 }
28495 28530
28496 28531 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28497 28532 return (ENXIO);
28498 28533 }
28499 28534
28500 28535 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP);
28501 28536
28502 28537 #ifdef _MULTI_DATAMODEL
28503 28538 switch (ddi_model_convert_from(flag & FMODELS)) {
28504 28539 case DDI_MODEL_ILP32:
28505 28540 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) {
28506 28541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28507 28542 "sr_read_cdda: ddi_copyin Failed\n");
28508 28543 kmem_free(cdda, sizeof (struct cdrom_cdda));
28509 28544 return (EFAULT);
28510 28545 }
28511 28546 /* Convert the ILP32 uscsi data from the application to LP64 */
28512 28547 cdrom_cdda32tocdrom_cdda(cdda32, cdda);
28513 28548 break;
28514 28549 case DDI_MODEL_NONE:
28515 28550 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28516 28551 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28517 28552 "sr_read_cdda: ddi_copyin Failed\n");
28518 28553 kmem_free(cdda, sizeof (struct cdrom_cdda));
28519 28554 return (EFAULT);
28520 28555 }
28521 28556 break;
28522 28557 }
28523 28558 #else /* ! _MULTI_DATAMODEL */
28524 28559 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28525 28560 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28526 28561 "sr_read_cdda: ddi_copyin Failed\n");
28527 28562 kmem_free(cdda, sizeof (struct cdrom_cdda));
28528 28563 return (EFAULT);
28529 28564 }
28530 28565 #endif /* _MULTI_DATAMODEL */
28531 28566
28532 28567 /*
28533 28568 * Since MMC-2 expects max 3 bytes for length, check if the
28534 28569 * length input is greater than 3 bytes
28535 28570 */
28536 28571 if ((cdda->cdda_length & 0xFF000000) != 0) {
28537 28572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: "
28538 28573 "cdrom transfer length too large: %d (limit %d)\n",
28539 28574 cdda->cdda_length, 0xFFFFFF);
28540 28575 kmem_free(cdda, sizeof (struct cdrom_cdda));
28541 28576 return (EINVAL);
28542 28577 }
28543 28578
28544 28579 switch (cdda->cdda_subcode) {
28545 28580 case CDROM_DA_NO_SUBCODE:
28546 28581 buflen = CDROM_BLK_2352 * cdda->cdda_length;
28547 28582 break;
28548 28583 case CDROM_DA_SUBQ:
28549 28584 buflen = CDROM_BLK_2368 * cdda->cdda_length;
28550 28585 break;
28551 28586 case CDROM_DA_ALL_SUBCODE:
28552 28587 buflen = CDROM_BLK_2448 * cdda->cdda_length;
28553 28588 break;
28554 28589 case CDROM_DA_SUBCODE_ONLY:
28555 28590 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length;
28556 28591 break;
28557 28592 default:
28558 28593 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28559 28594 "sr_read_cdda: Subcode '0x%x' Not Supported\n",
28560 28595 cdda->cdda_subcode);
28561 28596 kmem_free(cdda, sizeof (struct cdrom_cdda));
28562 28597 return (EINVAL);
28563 28598 }
28564 28599
28565 28600 /* Build and send the command */
28566 28601 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28567 28602 bzero(cdb, CDB_GROUP5);
28568 28603
28569 28604 if (un->un_f_cfg_cdda == TRUE) {
28570 28605 cdb[0] = (char)SCMD_READ_CD;
28571 28606 cdb[1] = 0x04;
28572 28607 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28573 28608 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28574 28609 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28575 28610 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28576 28611 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28577 28612 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28578 28613 cdb[8] = ((cdda->cdda_length) & 0x000000ff);
28579 28614 cdb[9] = 0x10;
28580 28615 switch (cdda->cdda_subcode) {
28581 28616 case CDROM_DA_NO_SUBCODE :
28582 28617 cdb[10] = 0x0;
28583 28618 break;
28584 28619 case CDROM_DA_SUBQ :
28585 28620 cdb[10] = 0x2;
28586 28621 break;
28587 28622 case CDROM_DA_ALL_SUBCODE :
28588 28623 cdb[10] = 0x1;
28589 28624 break;
28590 28625 case CDROM_DA_SUBCODE_ONLY :
28591 28626 /* FALLTHROUGH */
28592 28627 default :
28593 28628 kmem_free(cdda, sizeof (struct cdrom_cdda));
28594 28629 kmem_free(com, sizeof (*com));
28595 28630 return (ENOTTY);
28596 28631 }
28597 28632 } else {
28598 28633 cdb[0] = (char)SCMD_READ_CDDA;
28599 28634 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28600 28635 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28601 28636 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28602 28637 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28603 28638 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24);
28604 28639 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28605 28640 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28606 28641 cdb[9] = ((cdda->cdda_length) & 0x000000ff);
28607 28642 cdb[10] = cdda->cdda_subcode;
28608 28643 }
28609 28644
28610 28645 com->uscsi_cdb = cdb;
28611 28646 com->uscsi_cdblen = CDB_GROUP5;
28612 28647 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data;
28613 28648 com->uscsi_buflen = buflen;
28614 28649 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28615 28650
28616 28651 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28617 28652 SD_PATH_STANDARD);
28618 28653
28619 28654 kmem_free(cdda, sizeof (struct cdrom_cdda));
28620 28655 kmem_free(com, sizeof (*com));
28621 28656 return (rval);
28622 28657 }
28623 28658
28624 28659
28625 28660 /*
28626 28661 * Function: sr_read_cdxa()
28627 28662 *
28628 28663 * Description: This routine is the driver entry point for handling CD-ROM
28629 28664 * ioctl requests to return CD-XA (Extended Architecture) data.
28630 28665 * (CDROMCDXA).
28631 28666 *
28632 28667 * Arguments: dev - the device 'dev_t'
28633 28668 * data - pointer to user provided CD-XA structure specifying
28634 28669 * the data starting address, transfer length, and format
28635 28670 * flag - this argument is a pass through to ddi_copyxxx()
28636 28671 * directly from the mode argument of ioctl().
28637 28672 *
28638 28673 * Return Code: the code returned by sd_send_scsi_cmd()
28639 28674 * EFAULT if ddi_copyxxx() fails
28640 28675 * ENXIO if fail ddi_get_soft_state
28641 28676 * EINVAL if data pointer is NULL
28642 28677 */
28643 28678
28644 28679 static int
28645 28680 sr_read_cdxa(dev_t dev, caddr_t data, int flag)
28646 28681 {
28647 28682 struct sd_lun *un;
28648 28683 struct uscsi_cmd *com;
28649 28684 struct cdrom_cdxa *cdxa;
28650 28685 int rval;
28651 28686 size_t buflen;
28652 28687 char cdb[CDB_GROUP5];
28653 28688 uchar_t read_flags;
28654 28689
28655 28690 #ifdef _MULTI_DATAMODEL
28656 28691 /* To support ILP32 applications in an LP64 world */
28657 28692 struct cdrom_cdxa32 cdrom_cdxa32;
28658 28693 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32;
28659 28694 #endif /* _MULTI_DATAMODEL */
28660 28695
28661 28696 if (data == NULL) {
28662 28697 return (EINVAL);
28663 28698 }
28664 28699
28665 28700 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28666 28701 return (ENXIO);
28667 28702 }
28668 28703
28669 28704 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP);
28670 28705
28671 28706 #ifdef _MULTI_DATAMODEL
28672 28707 switch (ddi_model_convert_from(flag & FMODELS)) {
28673 28708 case DDI_MODEL_ILP32:
28674 28709 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) {
28675 28710 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28676 28711 return (EFAULT);
28677 28712 }
28678 28713 /*
28679 28714 * Convert the ILP32 uscsi data from the
28680 28715 * application to LP64 for internal use.
28681 28716 */
28682 28717 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa);
28683 28718 break;
28684 28719 case DDI_MODEL_NONE:
28685 28720 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28686 28721 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28687 28722 return (EFAULT);
28688 28723 }
28689 28724 break;
28690 28725 }
28691 28726 #else /* ! _MULTI_DATAMODEL */
28692 28727 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28693 28728 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28694 28729 return (EFAULT);
28695 28730 }
28696 28731 #endif /* _MULTI_DATAMODEL */
28697 28732
28698 28733 /*
28699 28734 * Since MMC-2 expects max 3 bytes for length, check if the
28700 28735 * length input is greater than 3 bytes
28701 28736 */
28702 28737 if ((cdxa->cdxa_length & 0xFF000000) != 0) {
28703 28738 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: "
28704 28739 "cdrom transfer length too large: %d (limit %d)\n",
28705 28740 cdxa->cdxa_length, 0xFFFFFF);
28706 28741 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28707 28742 return (EINVAL);
28708 28743 }
28709 28744
28710 28745 switch (cdxa->cdxa_format) {
28711 28746 case CDROM_XA_DATA:
28712 28747 buflen = CDROM_BLK_2048 * cdxa->cdxa_length;
28713 28748 read_flags = 0x10;
28714 28749 break;
28715 28750 case CDROM_XA_SECTOR_DATA:
28716 28751 buflen = CDROM_BLK_2352 * cdxa->cdxa_length;
28717 28752 read_flags = 0xf8;
28718 28753 break;
28719 28754 case CDROM_XA_DATA_W_ERROR:
28720 28755 buflen = CDROM_BLK_2646 * cdxa->cdxa_length;
28721 28756 read_flags = 0xfc;
28722 28757 break;
28723 28758 default:
28724 28759 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28725 28760 "sr_read_cdxa: Format '0x%x' Not Supported\n",
28726 28761 cdxa->cdxa_format);
28727 28762 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28728 28763 return (EINVAL);
28729 28764 }
28730 28765
28731 28766 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28732 28767 bzero(cdb, CDB_GROUP5);
28733 28768 if (un->un_f_mmc_cap == TRUE) {
28734 28769 cdb[0] = (char)SCMD_READ_CD;
28735 28770 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28736 28771 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28737 28772 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28738 28773 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28739 28774 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28740 28775 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28741 28776 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff);
28742 28777 cdb[9] = (char)read_flags;
28743 28778 } else {
28744 28779 /*
28745 28780 * Note: A vendor specific command (0xDB) is being used her to
28746 28781 * request a read of all subcodes.
28747 28782 */
28748 28783 cdb[0] = (char)SCMD_READ_CDXA;
28749 28784 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28750 28785 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28751 28786 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28752 28787 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28753 28788 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24);
28754 28789 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28755 28790 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28756 28791 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff);
28757 28792 cdb[10] = cdxa->cdxa_format;
28758 28793 }
28759 28794 com->uscsi_cdb = cdb;
28760 28795 com->uscsi_cdblen = CDB_GROUP5;
28761 28796 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data;
28762 28797 com->uscsi_buflen = buflen;
28763 28798 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28764 28799 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28765 28800 SD_PATH_STANDARD);
28766 28801 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28767 28802 kmem_free(com, sizeof (*com));
28768 28803 return (rval);
28769 28804 }
28770 28805
28771 28806
28772 28807 /*
28773 28808 * Function: sr_eject()
28774 28809 *
28775 28810 * Description: This routine is the driver entry point for handling CD-ROM
28776 28811 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT)
28777 28812 *
28778 28813 * Arguments: dev - the device 'dev_t'
28779 28814 *
28780 28815 * Return Code: the code returned by sd_send_scsi_cmd()
28781 28816 */
28782 28817
28783 28818 static int
28784 28819 sr_eject(dev_t dev)
28785 28820 {
28786 28821 struct sd_lun *un;
28787 28822 int rval;
28788 28823 sd_ssc_t *ssc;
28789 28824
28790 28825 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28791 28826 (un->un_state == SD_STATE_OFFLINE)) {
28792 28827 return (ENXIO);
28793 28828 }
28794 28829
28795 28830 /*
28796 28831 * To prevent race conditions with the eject
28797 28832 * command, keep track of an eject command as
28798 28833 * it progresses. If we are already handling
28799 28834 * an eject command in the driver for the given
28800 28835 * unit and another request to eject is received
28801 28836 * immediately return EAGAIN so we don't lose
28802 28837 * the command if the current eject command fails.
28803 28838 */
28804 28839 mutex_enter(SD_MUTEX(un));
28805 28840 if (un->un_f_ejecting == TRUE) {
28806 28841 mutex_exit(SD_MUTEX(un));
28807 28842 return (EAGAIN);
28808 28843 }
28809 28844 un->un_f_ejecting = TRUE;
28810 28845 mutex_exit(SD_MUTEX(un));
28811 28846
28812 28847 ssc = sd_ssc_init(un);
28813 28848 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
28814 28849 SD_PATH_STANDARD);
28815 28850 sd_ssc_fini(ssc);
28816 28851
28817 28852 if (rval != 0) {
28818 28853 mutex_enter(SD_MUTEX(un));
28819 28854 un->un_f_ejecting = FALSE;
28820 28855 mutex_exit(SD_MUTEX(un));
28821 28856 return (rval);
28822 28857 }
28823 28858
28824 28859 ssc = sd_ssc_init(un);
28825 28860 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
28826 28861 SD_TARGET_EJECT, SD_PATH_STANDARD);
28827 28862 sd_ssc_fini(ssc);
28828 28863
28829 28864 if (rval == 0) {
28830 28865 mutex_enter(SD_MUTEX(un));
28831 28866 sr_ejected(un);
28832 28867 un->un_mediastate = DKIO_EJECTED;
28833 28868 un->un_f_ejecting = FALSE;
28834 28869 cv_broadcast(&un->un_state_cv);
28835 28870 mutex_exit(SD_MUTEX(un));
28836 28871 } else {
28837 28872 mutex_enter(SD_MUTEX(un));
28838 28873 un->un_f_ejecting = FALSE;
28839 28874 mutex_exit(SD_MUTEX(un));
28840 28875 }
28841 28876 return (rval);
28842 28877 }
28843 28878
28844 28879
28845 28880 /*
28846 28881 * Function: sr_ejected()
28847 28882 *
28848 28883 * Description: This routine updates the soft state structure to invalidate the
28849 28884 * geometry information after the media has been ejected or a
28850 28885 * media eject has been detected.
28851 28886 *
28852 28887 * Arguments: un - driver soft state (unit) structure
28853 28888 */
28854 28889
28855 28890 static void
28856 28891 sr_ejected(struct sd_lun *un)
28857 28892 {
28858 28893 struct sd_errstats *stp;
28859 28894
28860 28895 ASSERT(un != NULL);
28861 28896 ASSERT(mutex_owned(SD_MUTEX(un)));
28862 28897
28863 28898 un->un_f_blockcount_is_valid = FALSE;
28864 28899 un->un_f_tgt_blocksize_is_valid = FALSE;
28865 28900 mutex_exit(SD_MUTEX(un));
28866 28901 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
28867 28902 mutex_enter(SD_MUTEX(un));
28868 28903
28869 28904 if (un->un_errstats != NULL) {
28870 28905 stp = (struct sd_errstats *)un->un_errstats->ks_data;
28871 28906 stp->sd_capacity.value.ui64 = 0;
28872 28907 }
28873 28908 }
28874 28909
28875 28910
28876 28911 /*
28877 28912 * Function: sr_check_wp()
28878 28913 *
28879 28914 * Description: This routine checks the write protection of a removable
28880 28915 * media disk and hotpluggable devices via the write protect bit of
28881 28916 * the Mode Page Header device specific field. Some devices choke
28882 28917 * on unsupported mode page. In order to workaround this issue,
28883 28918 * this routine has been implemented to use 0x3f mode page(request
28884 28919 * for all pages) for all device types.
28885 28920 *
28886 28921 * Arguments: dev - the device 'dev_t'
28887 28922 *
28888 28923 * Return Code: int indicating if the device is write protected (1) or not (0)
28889 28924 *
28890 28925 * Context: Kernel thread.
28891 28926 *
28892 28927 */
28893 28928
28894 28929 static int
28895 28930 sr_check_wp(dev_t dev)
28896 28931 {
28897 28932 struct sd_lun *un;
28898 28933 uchar_t device_specific;
28899 28934 uchar_t *sense;
28900 28935 int hdrlen;
28901 28936 int rval = FALSE;
28902 28937 int status;
28903 28938 sd_ssc_t *ssc;
28904 28939
28905 28940 /*
28906 28941 * Note: The return codes for this routine should be reworked to
28907 28942 * properly handle the case of a NULL softstate.
28908 28943 */
28909 28944 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28910 28945 return (FALSE);
28911 28946 }
28912 28947
28913 28948 if (un->un_f_cfg_is_atapi == TRUE) {
28914 28949 /*
28915 28950 * The mode page contents are not required; set the allocation
28916 28951 * length for the mode page header only
28917 28952 */
28918 28953 hdrlen = MODE_HEADER_LENGTH_GRP2;
28919 28954 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28920 28955 ssc = sd_ssc_init(un);
28921 28956 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen,
28922 28957 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28923 28958 sd_ssc_fini(ssc);
28924 28959 if (status != 0)
28925 28960 goto err_exit;
28926 28961 device_specific =
28927 28962 ((struct mode_header_grp2 *)sense)->device_specific;
28928 28963 } else {
28929 28964 hdrlen = MODE_HEADER_LENGTH;
28930 28965 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28931 28966 ssc = sd_ssc_init(un);
28932 28967 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen,
28933 28968 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28934 28969 sd_ssc_fini(ssc);
28935 28970 if (status != 0)
28936 28971 goto err_exit;
28937 28972 device_specific =
28938 28973 ((struct mode_header *)sense)->device_specific;
28939 28974 }
28940 28975
28941 28976
28942 28977 /*
28943 28978 * Write protect mode sense failed; not all disks
28944 28979 * understand this query. Return FALSE assuming that
28945 28980 * these devices are not writable.
28946 28981 */
28947 28982 if (device_specific & WRITE_PROTECT) {
28948 28983 rval = TRUE;
28949 28984 }
28950 28985
28951 28986 err_exit:
28952 28987 kmem_free(sense, hdrlen);
28953 28988 return (rval);
28954 28989 }
28955 28990
28956 28991 /*
28957 28992 * Function: sr_volume_ctrl()
28958 28993 *
28959 28994 * Description: This routine is the driver entry point for handling CD-ROM
28960 28995 * audio output volume ioctl requests. (CDROMVOLCTRL)
28961 28996 *
28962 28997 * Arguments: dev - the device 'dev_t'
28963 28998 * data - pointer to user audio volume control structure
28964 28999 * flag - this argument is a pass through to ddi_copyxxx()
28965 29000 * directly from the mode argument of ioctl().
28966 29001 *
28967 29002 * Return Code: the code returned by sd_send_scsi_cmd()
28968 29003 * EFAULT if ddi_copyxxx() fails
28969 29004 * ENXIO if fail ddi_get_soft_state
28970 29005 * EINVAL if data pointer is NULL
28971 29006 *
28972 29007 */
28973 29008
28974 29009 static int
28975 29010 sr_volume_ctrl(dev_t dev, caddr_t data, int flag)
28976 29011 {
28977 29012 struct sd_lun *un;
28978 29013 struct cdrom_volctrl volume;
28979 29014 struct cdrom_volctrl *vol = &volume;
28980 29015 uchar_t *sense_page;
28981 29016 uchar_t *select_page;
28982 29017 uchar_t *sense;
28983 29018 uchar_t *select;
28984 29019 int sense_buflen;
28985 29020 int select_buflen;
28986 29021 int rval;
28987 29022 sd_ssc_t *ssc;
28988 29023
28989 29024 if (data == NULL) {
28990 29025 return (EINVAL);
28991 29026 }
28992 29027
28993 29028 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28994 29029 (un->un_state == SD_STATE_OFFLINE)) {
28995 29030 return (ENXIO);
28996 29031 }
28997 29032
28998 29033 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) {
28999 29034 return (EFAULT);
29000 29035 }
29001 29036
29002 29037 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29003 29038 struct mode_header_grp2 *sense_mhp;
29004 29039 struct mode_header_grp2 *select_mhp;
29005 29040 int bd_len;
29006 29041
29007 29042 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN;
29008 29043 select_buflen = MODE_HEADER_LENGTH_GRP2 +
29009 29044 MODEPAGE_AUDIO_CTRL_LEN;
29010 29045 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29011 29046 select = kmem_zalloc(select_buflen, KM_SLEEP);
29012 29047 ssc = sd_ssc_init(un);
29013 29048 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
29014 29049 sense_buflen, MODEPAGE_AUDIO_CTRL,
29015 29050 SD_PATH_STANDARD);
29016 29051 sd_ssc_fini(ssc);
29017 29052
29018 29053 if (rval != 0) {
29019 29054 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
29020 29055 "sr_volume_ctrl: Mode Sense Failed\n");
29021 29056 kmem_free(sense, sense_buflen);
29022 29057 kmem_free(select, select_buflen);
29023 29058 return (rval);
29024 29059 }
29025 29060 sense_mhp = (struct mode_header_grp2 *)sense;
29026 29061 select_mhp = (struct mode_header_grp2 *)select;
29027 29062 bd_len = (sense_mhp->bdesc_length_hi << 8) |
29028 29063 sense_mhp->bdesc_length_lo;
29029 29064 if (bd_len > MODE_BLK_DESC_LENGTH) {
29030 29065 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29031 29066 "sr_volume_ctrl: Mode Sense returned invalid "
29032 29067 "block descriptor length\n");
29033 29068 kmem_free(sense, sense_buflen);
29034 29069 kmem_free(select, select_buflen);
29035 29070 return (EIO);
29036 29071 }
29037 29072 sense_page = (uchar_t *)
29038 29073 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
29039 29074 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2);
29040 29075 select_mhp->length_msb = 0;
29041 29076 select_mhp->length_lsb = 0;
29042 29077 select_mhp->bdesc_length_hi = 0;
29043 29078 select_mhp->bdesc_length_lo = 0;
29044 29079 } else {
29045 29080 struct mode_header *sense_mhp, *select_mhp;
29046 29081
29047 29082 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29048 29083 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29049 29084 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29050 29085 select = kmem_zalloc(select_buflen, KM_SLEEP);
29051 29086 ssc = sd_ssc_init(un);
29052 29087 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
29053 29088 sense_buflen, MODEPAGE_AUDIO_CTRL,
29054 29089 SD_PATH_STANDARD);
29055 29090 sd_ssc_fini(ssc);
29056 29091
29057 29092 if (rval != 0) {
29058 29093 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29059 29094 "sr_volume_ctrl: Mode Sense Failed\n");
29060 29095 kmem_free(sense, sense_buflen);
29061 29096 kmem_free(select, select_buflen);
29062 29097 return (rval);
29063 29098 }
29064 29099 sense_mhp = (struct mode_header *)sense;
29065 29100 select_mhp = (struct mode_header *)select;
29066 29101 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) {
29067 29102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29068 29103 "sr_volume_ctrl: Mode Sense returned invalid "
29069 29104 "block descriptor length\n");
29070 29105 kmem_free(sense, sense_buflen);
29071 29106 kmem_free(select, select_buflen);
29072 29107 return (EIO);
29073 29108 }
29074 29109 sense_page = (uchar_t *)
29075 29110 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
29076 29111 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH);
29077 29112 select_mhp->length = 0;
29078 29113 select_mhp->bdesc_length = 0;
29079 29114 }
29080 29115 /*
29081 29116 * Note: An audio control data structure could be created and overlayed
29082 29117 * on the following in place of the array indexing method implemented.
29083 29118 */
29084 29119
29085 29120 /* Build the select data for the user volume data */
29086 29121 select_page[0] = MODEPAGE_AUDIO_CTRL;
29087 29122 select_page[1] = 0xE;
29088 29123 /* Set the immediate bit */
29089 29124 select_page[2] = 0x04;
29090 29125 /* Zero out reserved fields */
29091 29126 select_page[3] = 0x00;
29092 29127 select_page[4] = 0x00;
29093 29128 /* Return sense data for fields not to be modified */
29094 29129 select_page[5] = sense_page[5];
29095 29130 select_page[6] = sense_page[6];
29096 29131 select_page[7] = sense_page[7];
29097 29132 /* Set the user specified volume levels for channel 0 and 1 */
29098 29133 select_page[8] = 0x01;
29099 29134 select_page[9] = vol->channel0;
29100 29135 select_page[10] = 0x02;
29101 29136 select_page[11] = vol->channel1;
29102 29137 /* Channel 2 and 3 are currently unsupported so return the sense data */
29103 29138 select_page[12] = sense_page[12];
29104 29139 select_page[13] = sense_page[13];
29105 29140 select_page[14] = sense_page[14];
29106 29141 select_page[15] = sense_page[15];
29107 29142
29108 29143 ssc = sd_ssc_init(un);
29109 29144 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29110 29145 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select,
29111 29146 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29112 29147 } else {
29113 29148 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
29114 29149 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29115 29150 }
29116 29151 sd_ssc_fini(ssc);
29117 29152
29118 29153 kmem_free(sense, sense_buflen);
29119 29154 kmem_free(select, select_buflen);
29120 29155 return (rval);
29121 29156 }
29122 29157
29123 29158
29124 29159 /*
29125 29160 * Function: sr_read_sony_session_offset()
29126 29161 *
29127 29162 * Description: This routine is the driver entry point for handling CD-ROM
29128 29163 * ioctl requests for session offset information. (CDROMREADOFFSET)
29129 29164 * The address of the first track in the last session of a
29130 29165 * multi-session CD-ROM is returned
29131 29166 *
29132 29167 * Note: This routine uses a vendor specific key value in the
29133 29168 * command control field without implementing any vendor check here
29134 29169 * or in the ioctl routine.
29135 29170 *
29136 29171 * Arguments: dev - the device 'dev_t'
29137 29172 * data - pointer to an int to hold the requested address
29138 29173 * flag - this argument is a pass through to ddi_copyxxx()
29139 29174 * directly from the mode argument of ioctl().
29140 29175 *
29141 29176 * Return Code: the code returned by sd_send_scsi_cmd()
29142 29177 * EFAULT if ddi_copyxxx() fails
29143 29178 * ENXIO if fail ddi_get_soft_state
29144 29179 * EINVAL if data pointer is NULL
29145 29180 */
29146 29181
29147 29182 static int
29148 29183 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag)
29149 29184 {
29150 29185 struct sd_lun *un;
29151 29186 struct uscsi_cmd *com;
29152 29187 caddr_t buffer;
29153 29188 char cdb[CDB_GROUP1];
29154 29189 int session_offset = 0;
29155 29190 int rval;
29156 29191
29157 29192 if (data == NULL) {
29158 29193 return (EINVAL);
29159 29194 }
29160 29195
29161 29196 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29162 29197 (un->un_state == SD_STATE_OFFLINE)) {
29163 29198 return (ENXIO);
29164 29199 }
29165 29200
29166 29201 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP);
29167 29202 bzero(cdb, CDB_GROUP1);
29168 29203 cdb[0] = SCMD_READ_TOC;
29169 29204 /*
29170 29205 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
29171 29206 * (4 byte TOC response header + 8 byte response data)
29172 29207 */
29173 29208 cdb[8] = SONY_SESSION_OFFSET_LEN;
29174 29209 /* Byte 9 is the control byte. A vendor specific value is used */
29175 29210 cdb[9] = SONY_SESSION_OFFSET_KEY;
29176 29211 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
29177 29212 com->uscsi_cdb = cdb;
29178 29213 com->uscsi_cdblen = CDB_GROUP1;
29179 29214 com->uscsi_bufaddr = buffer;
29180 29215 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN;
29181 29216 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
29182 29217
29183 29218 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
29184 29219 SD_PATH_STANDARD);
29185 29220 if (rval != 0) {
29186 29221 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29187 29222 kmem_free(com, sizeof (*com));
29188 29223 return (rval);
29189 29224 }
29190 29225 if (buffer[1] == SONY_SESSION_OFFSET_VALID) {
29191 29226 session_offset =
29192 29227 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
29193 29228 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
29194 29229 /*
29195 29230 * Offset returned offset in current lbasize block's. Convert to
29196 29231 * 2k block's to return to the user
29197 29232 */
29198 29233 if (un->un_tgt_blocksize == CDROM_BLK_512) {
29199 29234 session_offset >>= 2;
29200 29235 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) {
29201 29236 session_offset >>= 1;
29202 29237 }
29203 29238 }
29204 29239
29205 29240 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) {
29206 29241 rval = EFAULT;
29207 29242 }
29208 29243
29209 29244 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29210 29245 kmem_free(com, sizeof (*com));
29211 29246 return (rval);
29212 29247 }
29213 29248
29214 29249
29215 29250 /*
29216 29251 * Function: sd_wm_cache_constructor()
29217 29252 *
29218 29253 * Description: Cache Constructor for the wmap cache for the read/modify/write
29219 29254 * devices.
29220 29255 *
29221 29256 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29222 29257 * un - sd_lun structure for the device.
29223 29258 * flag - the km flags passed to constructor
29224 29259 *
29225 29260 * Return Code: 0 on success.
29226 29261 * -1 on failure.
29227 29262 */
29228 29263
29229 29264 /*ARGSUSED*/
29230 29265 static int
29231 29266 sd_wm_cache_constructor(void *wm, void *un, int flags)
29232 29267 {
29233 29268 bzero(wm, sizeof (struct sd_w_map));
29234 29269 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL);
29235 29270 return (0);
29236 29271 }
29237 29272
29238 29273
29239 29274 /*
29240 29275 * Function: sd_wm_cache_destructor()
29241 29276 *
29242 29277 * Description: Cache destructor for the wmap cache for the read/modify/write
29243 29278 * devices.
29244 29279 *
29245 29280 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29246 29281 * un - sd_lun structure for the device.
29247 29282 */
29248 29283 /*ARGSUSED*/
29249 29284 static void
29250 29285 sd_wm_cache_destructor(void *wm, void *un)
29251 29286 {
29252 29287 cv_destroy(&((struct sd_w_map *)wm)->wm_avail);
29253 29288 }
29254 29289
29255 29290
29256 29291 /*
29257 29292 * Function: sd_range_lock()
29258 29293 *
29259 29294 * Description: Lock the range of blocks specified as parameter to ensure
29260 29295 * that read, modify write is atomic and no other i/o writes
29261 29296 * to the same location. The range is specified in terms
29262 29297 * of start and end blocks. Block numbers are the actual
29263 29298 * media block numbers and not system.
29264 29299 *
29265 29300 * Arguments: un - sd_lun structure for the device.
29266 29301 * startb - The starting block number
29267 29302 * endb - The end block number
29268 29303 * typ - type of i/o - simple/read_modify_write
29269 29304 *
29270 29305 * Return Code: wm - pointer to the wmap structure.
29271 29306 *
29272 29307 * Context: This routine can sleep.
29273 29308 */
29274 29309
29275 29310 static struct sd_w_map *
29276 29311 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ)
29277 29312 {
29278 29313 struct sd_w_map *wmp = NULL;
29279 29314 struct sd_w_map *sl_wmp = NULL;
29280 29315 struct sd_w_map *tmp_wmp;
29281 29316 wm_state state = SD_WM_CHK_LIST;
29282 29317
29283 29318
29284 29319 ASSERT(un != NULL);
29285 29320 ASSERT(!mutex_owned(SD_MUTEX(un)));
29286 29321
29287 29322 mutex_enter(SD_MUTEX(un));
29288 29323
29289 29324 while (state != SD_WM_DONE) {
29290 29325
29291 29326 switch (state) {
29292 29327 case SD_WM_CHK_LIST:
29293 29328 /*
29294 29329 * This is the starting state. Check the wmap list
29295 29330 * to see if the range is currently available.
29296 29331 */
29297 29332 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) {
29298 29333 /*
29299 29334 * If this is a simple write and no rmw
29300 29335 * i/o is pending then try to lock the
29301 29336 * range as the range should be available.
29302 29337 */
29303 29338 state = SD_WM_LOCK_RANGE;
29304 29339 } else {
29305 29340 tmp_wmp = sd_get_range(un, startb, endb);
29306 29341 if (tmp_wmp != NULL) {
29307 29342 if ((wmp != NULL) && ONLIST(un, wmp)) {
29308 29343 /*
29309 29344 * Should not keep onlist wmps
29310 29345 * while waiting this macro
29311 29346 * will also do wmp = NULL;
29312 29347 */
29313 29348 FREE_ONLIST_WMAP(un, wmp);
29314 29349 }
29315 29350 /*
29316 29351 * sl_wmp is the wmap on which wait
29317 29352 * is done, since the tmp_wmp points
29318 29353 * to the inuse wmap, set sl_wmp to
29319 29354 * tmp_wmp and change the state to sleep
29320 29355 */
29321 29356 sl_wmp = tmp_wmp;
29322 29357 state = SD_WM_WAIT_MAP;
29323 29358 } else {
29324 29359 state = SD_WM_LOCK_RANGE;
29325 29360 }
29326 29361
29327 29362 }
29328 29363 break;
29329 29364
29330 29365 case SD_WM_LOCK_RANGE:
29331 29366 ASSERT(un->un_wm_cache);
29332 29367 /*
|
↓ open down ↓ |
1328 lines elided |
↑ open up ↑ |
29333 29368 * The range need to be locked, try to get a wmap.
29334 29369 * First attempt it with NO_SLEEP, want to avoid a sleep
29335 29370 * if possible as we will have to release the sd mutex
29336 29371 * if we have to sleep.
29337 29372 */
29338 29373 if (wmp == NULL)
29339 29374 wmp = kmem_cache_alloc(un->un_wm_cache,
29340 29375 KM_NOSLEEP);
29341 29376 if (wmp == NULL) {
29342 29377 mutex_exit(SD_MUTEX(un));
29343 - _NOTE(DATA_READABLE_WITHOUT_LOCK
29344 - (sd_lun::un_wm_cache))
29345 29378 wmp = kmem_cache_alloc(un->un_wm_cache,
29346 29379 KM_SLEEP);
29347 29380 mutex_enter(SD_MUTEX(un));
29348 29381 /*
29349 29382 * we released the mutex so recheck and go to
29350 29383 * check list state.
29351 29384 */
29352 29385 state = SD_WM_CHK_LIST;
29353 29386 } else {
29354 29387 /*
29355 29388 * We exit out of state machine since we
29356 29389 * have the wmap. Do the housekeeping first.
29357 29390 * place the wmap on the wmap list if it is not
29358 29391 * on it already and then set the state to done.
29359 29392 */
29360 29393 wmp->wm_start = startb;
29361 29394 wmp->wm_end = endb;
29362 29395 wmp->wm_flags = typ | SD_WM_BUSY;
29363 29396 if (typ & SD_WTYPE_RMW) {
29364 29397 un->un_rmw_count++;
29365 29398 }
29366 29399 /*
29367 29400 * If not already on the list then link
29368 29401 */
29369 29402 if (!ONLIST(un, wmp)) {
29370 29403 wmp->wm_next = un->un_wm;
29371 29404 wmp->wm_prev = NULL;
29372 29405 if (wmp->wm_next)
29373 29406 wmp->wm_next->wm_prev = wmp;
29374 29407 un->un_wm = wmp;
29375 29408 }
29376 29409 state = SD_WM_DONE;
29377 29410 }
29378 29411 break;
29379 29412
29380 29413 case SD_WM_WAIT_MAP:
29381 29414 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY);
29382 29415 /*
29383 29416 * Wait is done on sl_wmp, which is set in the
29384 29417 * check_list state.
29385 29418 */
29386 29419 sl_wmp->wm_wanted_count++;
29387 29420 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un));
29388 29421 sl_wmp->wm_wanted_count--;
29389 29422 /*
29390 29423 * We can reuse the memory from the completed sl_wmp
29391 29424 * lock range for our new lock, but only if noone is
29392 29425 * waiting for it.
29393 29426 */
29394 29427 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY));
29395 29428 if (sl_wmp->wm_wanted_count == 0) {
29396 29429 if (wmp != NULL) {
29397 29430 CHK_N_FREEWMP(un, wmp);
29398 29431 }
29399 29432 wmp = sl_wmp;
29400 29433 }
29401 29434 sl_wmp = NULL;
29402 29435 /*
29403 29436 * After waking up, need to recheck for availability of
29404 29437 * range.
29405 29438 */
29406 29439 state = SD_WM_CHK_LIST;
29407 29440 break;
29408 29441
29409 29442 default:
29410 29443 panic("sd_range_lock: "
29411 29444 "Unknown state %d in sd_range_lock", state);
29412 29445 /*NOTREACHED*/
29413 29446 } /* switch(state) */
29414 29447
29415 29448 } /* while(state != SD_WM_DONE) */
29416 29449
29417 29450 mutex_exit(SD_MUTEX(un));
29418 29451
29419 29452 ASSERT(wmp != NULL);
29420 29453
29421 29454 return (wmp);
29422 29455 }
29423 29456
29424 29457
29425 29458 /*
29426 29459 * Function: sd_get_range()
29427 29460 *
29428 29461 * Description: Find if there any overlapping I/O to this one
29429 29462 * Returns the write-map of 1st such I/O, NULL otherwise.
29430 29463 *
29431 29464 * Arguments: un - sd_lun structure for the device.
29432 29465 * startb - The starting block number
29433 29466 * endb - The end block number
29434 29467 *
29435 29468 * Return Code: wm - pointer to the wmap structure.
29436 29469 */
29437 29470
29438 29471 static struct sd_w_map *
29439 29472 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb)
29440 29473 {
29441 29474 struct sd_w_map *wmp;
29442 29475
29443 29476 ASSERT(un != NULL);
29444 29477
29445 29478 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) {
29446 29479 if (!(wmp->wm_flags & SD_WM_BUSY)) {
29447 29480 continue;
29448 29481 }
29449 29482 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) {
29450 29483 break;
29451 29484 }
29452 29485 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) {
29453 29486 break;
29454 29487 }
29455 29488 }
29456 29489
29457 29490 return (wmp);
29458 29491 }
29459 29492
29460 29493
29461 29494 /*
29462 29495 * Function: sd_free_inlist_wmap()
29463 29496 *
29464 29497 * Description: Unlink and free a write map struct.
29465 29498 *
29466 29499 * Arguments: un - sd_lun structure for the device.
29467 29500 * wmp - sd_w_map which needs to be unlinked.
29468 29501 */
29469 29502
29470 29503 static void
29471 29504 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp)
29472 29505 {
29473 29506 ASSERT(un != NULL);
29474 29507
29475 29508 if (un->un_wm == wmp) {
29476 29509 un->un_wm = wmp->wm_next;
29477 29510 } else {
29478 29511 wmp->wm_prev->wm_next = wmp->wm_next;
29479 29512 }
29480 29513
29481 29514 if (wmp->wm_next) {
29482 29515 wmp->wm_next->wm_prev = wmp->wm_prev;
29483 29516 }
29484 29517
29485 29518 wmp->wm_next = wmp->wm_prev = NULL;
29486 29519
29487 29520 kmem_cache_free(un->un_wm_cache, wmp);
29488 29521 }
29489 29522
29490 29523
29491 29524 /*
29492 29525 * Function: sd_range_unlock()
29493 29526 *
29494 29527 * Description: Unlock the range locked by wm.
29495 29528 * Free write map if nobody else is waiting on it.
29496 29529 *
29497 29530 * Arguments: un - sd_lun structure for the device.
29498 29531 * wmp - sd_w_map which needs to be unlinked.
29499 29532 */
29500 29533
29501 29534 static void
29502 29535 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm)
29503 29536 {
29504 29537 ASSERT(un != NULL);
29505 29538 ASSERT(wm != NULL);
29506 29539 ASSERT(!mutex_owned(SD_MUTEX(un)));
29507 29540
29508 29541 mutex_enter(SD_MUTEX(un));
29509 29542
29510 29543 if (wm->wm_flags & SD_WTYPE_RMW) {
29511 29544 un->un_rmw_count--;
29512 29545 }
29513 29546
29514 29547 if (wm->wm_wanted_count) {
29515 29548 wm->wm_flags = 0;
29516 29549 /*
29517 29550 * Broadcast that the wmap is available now.
29518 29551 */
29519 29552 cv_broadcast(&wm->wm_avail);
29520 29553 } else {
29521 29554 /*
29522 29555 * If no one is waiting on the map, it should be free'ed.
29523 29556 */
29524 29557 sd_free_inlist_wmap(un, wm);
29525 29558 }
29526 29559
29527 29560 mutex_exit(SD_MUTEX(un));
29528 29561 }
29529 29562
29530 29563
29531 29564 /*
29532 29565 * Function: sd_read_modify_write_task
29533 29566 *
29534 29567 * Description: Called from a taskq thread to initiate the write phase of
29535 29568 * a read-modify-write request. This is used for targets where
29536 29569 * un->un_sys_blocksize != un->un_tgt_blocksize.
29537 29570 *
29538 29571 * Arguments: arg - a pointer to the buf(9S) struct for the write command.
29539 29572 *
29540 29573 * Context: Called under taskq thread context.
29541 29574 */
29542 29575
29543 29576 static void
29544 29577 sd_read_modify_write_task(void *arg)
29545 29578 {
29546 29579 struct sd_mapblocksize_info *bsp;
29547 29580 struct buf *bp;
29548 29581 struct sd_xbuf *xp;
29549 29582 struct sd_lun *un;
29550 29583
29551 29584 bp = arg; /* The bp is given in arg */
29552 29585 ASSERT(bp != NULL);
29553 29586
29554 29587 /* Get the pointer to the layer-private data struct */
29555 29588 xp = SD_GET_XBUF(bp);
29556 29589 ASSERT(xp != NULL);
29557 29590 bsp = xp->xb_private;
29558 29591 ASSERT(bsp != NULL);
29559 29592
29560 29593 un = SD_GET_UN(bp);
29561 29594 ASSERT(un != NULL);
29562 29595 ASSERT(!mutex_owned(SD_MUTEX(un)));
29563 29596
29564 29597 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29565 29598 "sd_read_modify_write_task: entry: buf:0x%p\n", bp);
29566 29599
29567 29600 /*
29568 29601 * This is the write phase of a read-modify-write request, called
29569 29602 * under the context of a taskq thread in response to the completion
29570 29603 * of the read portion of the rmw request completing under interrupt
29571 29604 * context. The write request must be sent from here down the iostart
29572 29605 * chain as if it were being sent from sd_mapblocksize_iostart(), so
29573 29606 * we use the layer index saved in the layer-private data area.
29574 29607 */
29575 29608 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp);
29576 29609
29577 29610 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29578 29611 "sd_read_modify_write_task: exit: buf:0x%p\n", bp);
29579 29612 }
29580 29613
29581 29614
29582 29615 /*
29583 29616 * Function: sddump_do_read_of_rmw()
29584 29617 *
29585 29618 * Description: This routine will be called from sddump, If sddump is called
29586 29619 * with an I/O which not aligned on device blocksize boundary
29587 29620 * then the write has to be converted to read-modify-write.
29588 29621 * Do the read part here in order to keep sddump simple.
29589 29622 * Note - That the sd_mutex is held across the call to this
29590 29623 * routine.
29591 29624 *
29592 29625 * Arguments: un - sd_lun
29593 29626 * blkno - block number in terms of media block size.
29594 29627 * nblk - number of blocks.
29595 29628 * bpp - pointer to pointer to the buf structure. On return
29596 29629 * from this function, *bpp points to the valid buffer
29597 29630 * to which the write has to be done.
29598 29631 *
29599 29632 * Return Code: 0 for success or errno-type return code
29600 29633 */
29601 29634
29602 29635 static int
29603 29636 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
29604 29637 struct buf **bpp)
29605 29638 {
29606 29639 int err;
29607 29640 int i;
29608 29641 int rval;
29609 29642 struct buf *bp;
29610 29643 struct scsi_pkt *pkt = NULL;
29611 29644 uint32_t target_blocksize;
29612 29645
29613 29646 ASSERT(un != NULL);
29614 29647 ASSERT(mutex_owned(SD_MUTEX(un)));
29615 29648
29616 29649 target_blocksize = un->un_tgt_blocksize;
29617 29650
29618 29651 mutex_exit(SD_MUTEX(un));
29619 29652
29620 29653 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL,
29621 29654 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL);
29622 29655 if (bp == NULL) {
29623 29656 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29624 29657 "no resources for dumping; giving up");
29625 29658 err = ENOMEM;
29626 29659 goto done;
29627 29660 }
29628 29661
29629 29662 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL,
29630 29663 blkno, nblk);
29631 29664 if (rval != 0) {
29632 29665 scsi_free_consistent_buf(bp);
29633 29666 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29634 29667 "no resources for dumping; giving up");
29635 29668 err = ENOMEM;
29636 29669 goto done;
29637 29670 }
29638 29671
29639 29672 pkt->pkt_flags |= FLAG_NOINTR;
29640 29673
29641 29674 err = EIO;
29642 29675 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
29643 29676
29644 29677 /*
29645 29678 * Scsi_poll returns 0 (success) if the command completes and
29646 29679 * the status block is STATUS_GOOD. We should only check
29647 29680 * errors if this condition is not true. Even then we should
29648 29681 * send our own request sense packet only if we have a check
29649 29682 * condition and auto request sense has not been performed by
29650 29683 * the hba.
29651 29684 */
29652 29685 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n");
29653 29686
29654 29687 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) {
29655 29688 err = 0;
29656 29689 break;
29657 29690 }
29658 29691
29659 29692 /*
29660 29693 * Check CMD_DEV_GONE 1st, give up if device is gone,
29661 29694 * no need to read RQS data.
29662 29695 */
29663 29696 if (pkt->pkt_reason == CMD_DEV_GONE) {
29664 29697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29665 29698 "Error while dumping state with rmw..."
29666 29699 "Device is gone\n");
29667 29700 break;
29668 29701 }
29669 29702
29670 29703 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) {
29671 29704 SD_INFO(SD_LOG_DUMP, un,
29672 29705 "sddump: read failed with CHECK, try # %d\n", i);
29673 29706 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
29674 29707 (void) sd_send_polled_RQS(un);
29675 29708 }
29676 29709
29677 29710 continue;
29678 29711 }
29679 29712
29680 29713 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) {
29681 29714 int reset_retval = 0;
29682 29715
29683 29716 SD_INFO(SD_LOG_DUMP, un,
29684 29717 "sddump: read failed with BUSY, try # %d\n", i);
29685 29718
29686 29719 if (un->un_f_lun_reset_enabled == TRUE) {
29687 29720 reset_retval = scsi_reset(SD_ADDRESS(un),
29688 29721 RESET_LUN);
29689 29722 }
29690 29723 if (reset_retval == 0) {
29691 29724 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
29692 29725 }
29693 29726 (void) sd_send_polled_RQS(un);
29694 29727
29695 29728 } else {
29696 29729 SD_INFO(SD_LOG_DUMP, un,
29697 29730 "sddump: read failed with 0x%x, try # %d\n",
29698 29731 SD_GET_PKT_STATUS(pkt), i);
29699 29732 mutex_enter(SD_MUTEX(un));
29700 29733 sd_reset_target(un, pkt);
29701 29734 mutex_exit(SD_MUTEX(un));
29702 29735 }
29703 29736
29704 29737 /*
29705 29738 * If we are not getting anywhere with lun/target resets,
29706 29739 * let's reset the bus.
29707 29740 */
29708 29741 if (i > SD_NDUMP_RETRIES/2) {
29709 29742 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
29710 29743 (void) sd_send_polled_RQS(un);
29711 29744 }
29712 29745
29713 29746 }
29714 29747 scsi_destroy_pkt(pkt);
29715 29748
29716 29749 if (err != 0) {
29717 29750 scsi_free_consistent_buf(bp);
29718 29751 *bpp = NULL;
29719 29752 } else {
29720 29753 *bpp = bp;
29721 29754 }
29722 29755
29723 29756 done:
29724 29757 mutex_enter(SD_MUTEX(un));
29725 29758 return (err);
29726 29759 }
29727 29760
29728 29761
29729 29762 /*
29730 29763 * Function: sd_failfast_flushq
29731 29764 *
29732 29765 * Description: Take all bp's on the wait queue that have B_FAILFAST set
|
↓ open down ↓ |
378 lines elided |
↑ open up ↑ |
29733 29766 * in b_flags and move them onto the failfast queue, then kick
29734 29767 * off a thread to return all bp's on the failfast queue to
29735 29768 * their owners with an error set.
29736 29769 *
29737 29770 * Arguments: un - pointer to the soft state struct for the instance.
29738 29771 *
29739 29772 * Context: may execute in interrupt context.
29740 29773 */
29741 29774
29742 29775 static void
29743 -sd_failfast_flushq(struct sd_lun *un)
29776 +sd_failfast_flushq(struct sd_lun *un, boolean_t flush_all)
29744 29777 {
29745 29778 struct buf *bp;
29746 29779 struct buf *next_waitq_bp;
29747 29780 struct buf *prev_waitq_bp = NULL;
29748 29781
29749 29782 ASSERT(un != NULL);
29750 29783 ASSERT(mutex_owned(SD_MUTEX(un)));
29751 29784 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
29752 29785 ASSERT(un->un_failfast_bp == NULL);
29753 29786
29754 29787 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29755 29788 "sd_failfast_flushq: entry: un:0x%p\n", un);
29756 29789
29757 29790 /*
29758 29791 * Check if we should flush all bufs when entering failfast state, or
29759 29792 * just those with B_FAILFAST set.
29760 29793 */
29761 - if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
29794 + if ((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
29795 + flush_all) {
29762 29796 /*
29763 29797 * Move *all* bp's on the wait queue to the failfast flush
29764 29798 * queue, including those that do NOT have B_FAILFAST set.
29765 29799 */
29766 29800 if (un->un_failfast_headp == NULL) {
29767 29801 ASSERT(un->un_failfast_tailp == NULL);
29768 29802 un->un_failfast_headp = un->un_waitq_headp;
29769 29803 } else {
29770 29804 ASSERT(un->un_failfast_tailp != NULL);
29771 29805 un->un_failfast_tailp->av_forw = un->un_waitq_headp;
29772 29806 }
29773 29807
29774 29808 un->un_failfast_tailp = un->un_waitq_tailp;
29775 29809
29776 29810 /* update kstat for each bp moved out of the waitq */
29777 29811 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) {
29778 29812 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29779 29813 }
29780 29814
29781 29815 /* empty the waitq */
29782 29816 un->un_waitq_headp = un->un_waitq_tailp = NULL;
29783 29817
29784 29818 } else {
29785 29819 /*
29786 29820 * Go thru the wait queue, pick off all entries with
29787 29821 * B_FAILFAST set, and move these onto the failfast queue.
29788 29822 */
29789 29823 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) {
29790 29824 /*
29791 29825 * Save the pointer to the next bp on the wait queue,
29792 29826 * so we get to it on the next iteration of this loop.
29793 29827 */
29794 29828 next_waitq_bp = bp->av_forw;
29795 29829
29796 29830 /*
29797 29831 * If this bp from the wait queue does NOT have
29798 29832 * B_FAILFAST set, just move on to the next element
29799 29833 * in the wait queue. Note, this is the only place
29800 29834 * where it is correct to set prev_waitq_bp.
29801 29835 */
29802 29836 if ((bp->b_flags & B_FAILFAST) == 0) {
29803 29837 prev_waitq_bp = bp;
29804 29838 continue;
29805 29839 }
29806 29840
29807 29841 /*
29808 29842 * Remove the bp from the wait queue.
29809 29843 */
29810 29844 if (bp == un->un_waitq_headp) {
29811 29845 /* The bp is the first element of the waitq. */
29812 29846 un->un_waitq_headp = next_waitq_bp;
29813 29847 if (un->un_waitq_headp == NULL) {
29814 29848 /* The wait queue is now empty */
29815 29849 un->un_waitq_tailp = NULL;
29816 29850 }
29817 29851 } else {
29818 29852 /*
29819 29853 * The bp is either somewhere in the middle
29820 29854 * or at the end of the wait queue.
29821 29855 */
29822 29856 ASSERT(un->un_waitq_headp != NULL);
29823 29857 ASSERT(prev_waitq_bp != NULL);
29824 29858 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST)
29825 29859 == 0);
29826 29860 if (bp == un->un_waitq_tailp) {
29827 29861 /* bp is the last entry on the waitq. */
29828 29862 ASSERT(next_waitq_bp == NULL);
29829 29863 un->un_waitq_tailp = prev_waitq_bp;
29830 29864 }
29831 29865 prev_waitq_bp->av_forw = next_waitq_bp;
29832 29866 }
29833 29867 bp->av_forw = NULL;
29834 29868
29835 29869 /*
29836 29870 * update kstat since the bp is moved out of
29837 29871 * the waitq
29838 29872 */
29839 29873 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29840 29874
29841 29875 /*
29842 29876 * Now put the bp onto the failfast queue.
29843 29877 */
29844 29878 if (un->un_failfast_headp == NULL) {
29845 29879 /* failfast queue is currently empty */
29846 29880 ASSERT(un->un_failfast_tailp == NULL);
29847 29881 un->un_failfast_headp =
29848 29882 un->un_failfast_tailp = bp;
29849 29883 } else {
29850 29884 /* Add the bp to the end of the failfast q */
29851 29885 ASSERT(un->un_failfast_tailp != NULL);
29852 29886 ASSERT(un->un_failfast_tailp->b_flags &
29853 29887 B_FAILFAST);
29854 29888 un->un_failfast_tailp->av_forw = bp;
29855 29889 un->un_failfast_tailp = bp;
29856 29890 }
29857 29891 }
29858 29892 }
29859 29893
29860 29894 /*
29861 29895 * Now return all bp's on the failfast queue to their owners.
29862 29896 */
29863 29897 while ((bp = un->un_failfast_headp) != NULL) {
29864 29898
29865 29899 un->un_failfast_headp = bp->av_forw;
29866 29900 if (un->un_failfast_headp == NULL) {
29867 29901 un->un_failfast_tailp = NULL;
29868 29902 }
29869 29903
29870 29904 /*
29871 29905 * We want to return the bp with a failure error code, but
29872 29906 * we do not want a call to sd_start_cmds() to occur here,
29873 29907 * so use sd_return_failed_command_no_restart() instead of
29874 29908 * sd_return_failed_command().
29875 29909 */
29876 29910 sd_return_failed_command_no_restart(un, bp, EIO);
29877 29911 }
29878 29912
29879 29913 /* Flush the xbuf queues if required. */
29880 29914 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) {
29881 29915 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback);
29882 29916 }
29883 29917
29884 29918 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29885 29919 "sd_failfast_flushq: exit: un:0x%p\n", un);
29886 29920 }
29887 29921
29888 29922
29889 29923 /*
29890 29924 * Function: sd_failfast_flushq_callback
29891 29925 *
29892 29926 * Description: Return TRUE if the given bp meets the criteria for failfast
29893 29927 * flushing. Used with ddi_xbuf_flushq(9F).
29894 29928 *
29895 29929 * Arguments: bp - ptr to buf struct to be examined.
29896 29930 *
29897 29931 * Context: Any
29898 29932 */
29899 29933
29900 29934 static int
29901 29935 sd_failfast_flushq_callback(struct buf *bp)
29902 29936 {
29903 29937 /*
29904 29938 * Return TRUE if (1) we want to flush ALL bufs when the failfast
29905 29939 * state is entered; OR (2) the given bp has B_FAILFAST set.
29906 29940 */
29907 29941 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
29908 29942 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE);
29909 29943 }
29910 29944
29911 29945
29912 29946
29913 29947 /*
29914 29948 * Function: sd_setup_next_xfer
29915 29949 *
29916 29950 * Description: Prepare next I/O operation using DMA_PARTIAL
29917 29951 *
29918 29952 */
29919 29953
29920 29954 static int
29921 29955 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
29922 29956 struct scsi_pkt *pkt, struct sd_xbuf *xp)
29923 29957 {
29924 29958 ssize_t num_blks_not_xfered;
29925 29959 daddr_t strt_blk_num;
29926 29960 ssize_t bytes_not_xfered;
29927 29961 int rval;
29928 29962
29929 29963 ASSERT(pkt->pkt_resid == 0);
29930 29964
29931 29965 /*
29932 29966 * Calculate next block number and amount to be transferred.
29933 29967 *
29934 29968 * How much data NOT transfered to the HBA yet.
29935 29969 */
29936 29970 bytes_not_xfered = xp->xb_dma_resid;
29937 29971
29938 29972 /*
29939 29973 * figure how many blocks NOT transfered to the HBA yet.
29940 29974 */
29941 29975 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered);
29942 29976
29943 29977 /*
29944 29978 * set starting block number to the end of what WAS transfered.
29945 29979 */
29946 29980 strt_blk_num = xp->xb_blkno +
29947 29981 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered);
29948 29982
29949 29983 /*
29950 29984 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt
29951 29985 * will call scsi_initpkt with NULL_FUNC so we do not have to release
29952 29986 * the disk mutex here.
29953 29987 */
29954 29988 rval = sd_setup_next_rw_pkt(un, pkt, bp,
29955 29989 strt_blk_num, num_blks_not_xfered);
29956 29990
29957 29991 if (rval == 0) {
29958 29992
29959 29993 /*
29960 29994 * Success.
29961 29995 *
29962 29996 * Adjust things if there are still more blocks to be
29963 29997 * transfered.
29964 29998 */
29965 29999 xp->xb_dma_resid = pkt->pkt_resid;
29966 30000 pkt->pkt_resid = 0;
29967 30001
29968 30002 return (1);
29969 30003 }
29970 30004
29971 30005 /*
29972 30006 * There's really only one possible return value from
29973 30007 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt
29974 30008 * returns NULL.
29975 30009 */
29976 30010 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
29977 30011
29978 30012 bp->b_resid = bp->b_bcount;
29979 30013 bp->b_flags |= B_ERROR;
29980 30014
29981 30015 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29982 30016 "Error setting up next portion of DMA transfer\n");
29983 30017
29984 30018 return (0);
29985 30019 }
29986 30020
29987 30021 /*
29988 30022 * Function: sd_panic_for_res_conflict
29989 30023 *
29990 30024 * Description: Call panic with a string formatted with "Reservation Conflict"
29991 30025 * and a human readable identifier indicating the SD instance
29992 30026 * that experienced the reservation conflict.
29993 30027 *
29994 30028 * Arguments: un - pointer to the soft state struct for the instance.
29995 30029 *
29996 30030 * Context: may execute in interrupt context.
29997 30031 */
29998 30032
29999 30033 #define SD_RESV_CONFLICT_FMT_LEN 40
30000 30034 void
30001 30035 sd_panic_for_res_conflict(struct sd_lun *un)
30002 30036 {
30003 30037 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN];
30004 30038 char path_str[MAXPATHLEN];
30005 30039
30006 30040 (void) snprintf(panic_str, sizeof (panic_str),
30007 30041 "Reservation Conflict\nDisk: %s",
30008 30042 ddi_pathname(SD_DEVINFO(un), path_str));
30009 30043
30010 30044 panic(panic_str);
|
↓ open down ↓ |
239 lines elided |
↑ open up ↑ |
30011 30045 }
30012 30046
30013 30047 /*
30014 30048 * Note: The following sd_faultinjection_ioctl( ) routines implement
30015 30049 * driver support for handling fault injection for error analysis
30016 30050 * causing faults in multiple layers of the driver.
30017 30051 *
30018 30052 */
30019 30053
30020 30054 #ifdef SD_FAULT_INJECTION
30021 -static uint_t sd_fault_injection_on = 0;
30022 30055
30023 30056 /*
30024 30057 * Function: sd_faultinjection_ioctl()
30025 30058 *
30026 30059 * Description: This routine is the driver entry point for handling
30027 30060 * faultinjection ioctls to inject errors into the
30028 30061 * layer model
30029 30062 *
30030 30063 * Arguments: cmd - the ioctl cmd received
30031 30064 * arg - the arguments from user and returns
30032 30065 */
30033 30066
30034 -static void
30035 -sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un)
30067 +static int
30068 +sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un)
30036 30069 {
30037 30070 uint_t i = 0;
30038 30071 uint_t rval;
30072 + int ret = 0;
30039 30073
30040 30074 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
30041 30075
30042 30076 mutex_enter(SD_MUTEX(un));
30043 30077
30044 30078 switch (cmd) {
30045 30079 case SDIOCRUN:
30046 30080 /* Allow pushed faults to be injected */
30047 30081 SD_INFO(SD_LOG_SDTEST, un,
30048 30082 "sd_faultinjection_ioctl: Injecting Fault Run\n");
30049 30083
30050 30084 sd_fault_injection_on = 1;
30051 30085
30052 30086 SD_INFO(SD_LOG_IOERR, un,
30053 30087 "sd_faultinjection_ioctl: run finished\n");
30054 30088 break;
30055 30089
30056 30090 case SDIOCSTART:
30057 30091 /* Start Injection Session */
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
30058 30092 SD_INFO(SD_LOG_SDTEST, un,
30059 30093 "sd_faultinjection_ioctl: Injecting Fault Start\n");
30060 30094
30061 30095 sd_fault_injection_on = 0;
30062 30096 un->sd_injection_mask = 0xFFFFFFFF;
30063 30097 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30064 30098 un->sd_fi_fifo_pkt[i] = NULL;
30065 30099 un->sd_fi_fifo_xb[i] = NULL;
30066 30100 un->sd_fi_fifo_un[i] = NULL;
30067 30101 un->sd_fi_fifo_arq[i] = NULL;
30102 + un->sd_fi_fifo_tran[i] = NULL;
30068 30103 }
30069 30104 un->sd_fi_fifo_start = 0;
30070 30105 un->sd_fi_fifo_end = 0;
30071 30106
30072 30107 mutex_enter(&(un->un_fi_mutex));
30073 30108 un->sd_fi_log[0] = '\0';
30074 30109 un->sd_fi_buf_len = 0;
30075 30110 mutex_exit(&(un->un_fi_mutex));
30076 30111
30077 30112 SD_INFO(SD_LOG_IOERR, un,
30078 30113 "sd_faultinjection_ioctl: start finished\n");
30079 30114 break;
30080 30115
30081 30116 case SDIOCSTOP:
30082 30117 /* Stop Injection Session */
30083 30118 SD_INFO(SD_LOG_SDTEST, un,
30084 30119 "sd_faultinjection_ioctl: Injecting Fault Stop\n");
30085 30120 sd_fault_injection_on = 0;
30086 30121 un->sd_injection_mask = 0x0;
30087 30122
30088 30123 /* Empty stray or unuseds structs from fifo */
30089 30124 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30090 30125 if (un->sd_fi_fifo_pkt[i] != NULL) {
30091 30126 kmem_free(un->sd_fi_fifo_pkt[i],
30092 30127 sizeof (struct sd_fi_pkt));
30093 30128 }
30094 30129 if (un->sd_fi_fifo_xb[i] != NULL) {
30095 30130 kmem_free(un->sd_fi_fifo_xb[i],
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
30096 30131 sizeof (struct sd_fi_xb));
30097 30132 }
30098 30133 if (un->sd_fi_fifo_un[i] != NULL) {
30099 30134 kmem_free(un->sd_fi_fifo_un[i],
30100 30135 sizeof (struct sd_fi_un));
30101 30136 }
30102 30137 if (un->sd_fi_fifo_arq[i] != NULL) {
30103 30138 kmem_free(un->sd_fi_fifo_arq[i],
30104 30139 sizeof (struct sd_fi_arq));
30105 30140 }
30141 + if (un->sd_fi_fifo_tran[i] != NULL) {
30142 + kmem_free(un->sd_fi_fifo_tran[i],
30143 + sizeof (struct sd_fi_tran));
30144 + }
30106 30145 un->sd_fi_fifo_pkt[i] = NULL;
30107 30146 un->sd_fi_fifo_un[i] = NULL;
30108 30147 un->sd_fi_fifo_xb[i] = NULL;
30109 30148 un->sd_fi_fifo_arq[i] = NULL;
30149 + un->sd_fi_fifo_tran[i] = NULL;
30110 30150 }
30111 30151 un->sd_fi_fifo_start = 0;
30112 30152 un->sd_fi_fifo_end = 0;
30113 30153
30114 30154 SD_INFO(SD_LOG_IOERR, un,
30115 30155 "sd_faultinjection_ioctl: stop finished\n");
30116 30156 break;
30117 30157
30118 30158 case SDIOCINSERTPKT:
30119 30159 /* Store a packet struct to be pushed onto fifo */
30120 30160 SD_INFO(SD_LOG_SDTEST, un,
30121 30161 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30122 30162
30123 30163 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30124 30164
30165 + if (un->sd_fi_fifo_tran[i] != NULL) {
30166 + ret = EBUSY;
30167 + break;
30168 + }
30169 +
30125 30170 sd_fault_injection_on = 0;
30126 30171
30127 30172 /* No more that SD_FI_MAX_ERROR allowed in Queue */
30128 30173 if (un->sd_fi_fifo_pkt[i] != NULL) {
30129 30174 kmem_free(un->sd_fi_fifo_pkt[i],
30130 30175 sizeof (struct sd_fi_pkt));
30131 30176 }
30132 30177 if (arg != NULL) {
30133 30178 un->sd_fi_fifo_pkt[i] =
30134 30179 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
30135 30180 if (un->sd_fi_fifo_pkt[i] == NULL) {
30136 30181 /* Alloc failed don't store anything */
30182 + ret = ENOMEM;
30137 30183 break;
30138 30184 }
30139 30185 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
30140 30186 sizeof (struct sd_fi_pkt), 0);
30141 30187 if (rval == -1) {
30142 30188 kmem_free(un->sd_fi_fifo_pkt[i],
30143 30189 sizeof (struct sd_fi_pkt));
30144 30190 un->sd_fi_fifo_pkt[i] = NULL;
30191 + ret = EFAULT;
30192 + break;
30145 30193 }
30146 30194 } else {
30147 30195 SD_INFO(SD_LOG_IOERR, un,
30148 30196 "sd_faultinjection_ioctl: pkt null\n");
30149 30197 }
30150 30198 break;
30151 30199
30200 + case SDIOCINSERTTRAN:
30201 + /* Store a tran packet struct to be pushed onto fifo. */
30202 + SD_INFO(SD_LOG_SDTEST, un,
30203 + "sd_faultinjection_ioctl: Injecting Fault Insert TRAN\n");
30204 + i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30205 +
30206 + /*
30207 + * HBA-related fault injections can't be mixed with target-level
30208 + * fault injections.
30209 + */
30210 + if (un->sd_fi_fifo_pkt[i] != NULL ||
30211 + un->sd_fi_fifo_xb[i] != NULL ||
30212 + un->sd_fi_fifo_un[i] != NULL ||
30213 + un->sd_fi_fifo_arq[i] != NULL) {
30214 + ret = EBUSY;
30215 + break;
30216 + }
30217 +
30218 + sd_fault_injection_on = 0;
30219 +
30220 + if (un->sd_fi_fifo_tran[i] != NULL) {
30221 + kmem_free(un->sd_fi_fifo_tran[i],
30222 + sizeof (struct sd_fi_tran));
30223 + un->sd_fi_fifo_tran[i] = NULL;
30224 + }
30225 + if (arg != NULL) {
30226 + un->sd_fi_fifo_tran[i] =
30227 + kmem_alloc(sizeof (struct sd_fi_tran), KM_NOSLEEP);
30228 + if (un->sd_fi_fifo_tran[i] == NULL) {
30229 + /* Alloc failed don't store anything */
30230 + ret = ENOMEM;
30231 + break;
30232 + }
30233 + rval = ddi_copyin((void *)arg, un->sd_fi_fifo_tran[i],
30234 + sizeof (struct sd_fi_tran), 0);
30235 +
30236 + if (rval == 0) {
30237 + switch (un->sd_fi_fifo_tran[i]->tran_cmd) {
30238 + case SD_FLTINJ_CMD_BUSY:
30239 + case SD_FLTINJ_CMD_TIMEOUT:
30240 + break;
30241 + default:
30242 + ret = EINVAL;
30243 + break;
30244 + }
30245 + } else {
30246 + ret = EFAULT;
30247 + }
30248 +
30249 + if (ret != 0) {
30250 + kmem_free(un->sd_fi_fifo_tran[i],
30251 + sizeof (struct sd_fi_tran));
30252 + un->sd_fi_fifo_tran[i] = NULL;
30253 + break;
30254 + }
30255 + } else {
30256 + SD_INFO(SD_LOG_IOERR, un,
30257 + "sd_faultinjection_ioctl: tran null\n");
30258 + }
30259 + break;
30260 +
30152 30261 case SDIOCINSERTXB:
30153 30262 /* Store a xb struct to be pushed onto fifo */
30154 30263 SD_INFO(SD_LOG_SDTEST, un,
30155 30264 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30156 30265
30157 30266 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30158 30267
30268 + if (un->sd_fi_fifo_tran[i] != NULL) {
30269 + ret = EBUSY;
30270 + break;
30271 + }
30272 +
30159 30273 sd_fault_injection_on = 0;
30160 30274
30161 30275 if (un->sd_fi_fifo_xb[i] != NULL) {
30162 30276 kmem_free(un->sd_fi_fifo_xb[i],
30163 30277 sizeof (struct sd_fi_xb));
30164 30278 un->sd_fi_fifo_xb[i] = NULL;
30165 30279 }
30166 30280 if (arg != NULL) {
30167 30281 un->sd_fi_fifo_xb[i] =
30168 30282 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
30169 30283 if (un->sd_fi_fifo_xb[i] == NULL) {
30170 30284 /* Alloc failed don't store anything */
30285 + ret = ENOMEM;
30171 30286 break;
30172 30287 }
30173 30288 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
30174 30289 sizeof (struct sd_fi_xb), 0);
30175 30290
30176 30291 if (rval == -1) {
30177 30292 kmem_free(un->sd_fi_fifo_xb[i],
30178 30293 sizeof (struct sd_fi_xb));
30179 30294 un->sd_fi_fifo_xb[i] = NULL;
30295 + ret = EFAULT;
30296 + break;
30180 30297 }
30181 30298 } else {
30182 30299 SD_INFO(SD_LOG_IOERR, un,
30183 30300 "sd_faultinjection_ioctl: xb null\n");
30184 30301 }
30185 30302 break;
30186 30303
30187 30304 case SDIOCINSERTUN:
30188 30305 /* Store a un struct to be pushed onto fifo */
30189 30306 SD_INFO(SD_LOG_SDTEST, un,
30190 30307 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30191 30308
30192 30309 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30310 + if (un->sd_fi_fifo_tran[i] != NULL) {
30311 + ret = EBUSY;
30312 + break;
30313 + }
30193 30314
30194 30315 sd_fault_injection_on = 0;
30195 30316
30196 30317 if (un->sd_fi_fifo_un[i] != NULL) {
30197 30318 kmem_free(un->sd_fi_fifo_un[i],
30198 30319 sizeof (struct sd_fi_un));
30199 30320 un->sd_fi_fifo_un[i] = NULL;
30200 30321 }
30201 30322 if (arg != NULL) {
30202 30323 un->sd_fi_fifo_un[i] =
30203 30324 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
30204 30325 if (un->sd_fi_fifo_un[i] == NULL) {
30205 30326 /* Alloc failed don't store anything */
30327 + ret = ENOMEM;
30206 30328 break;
30207 30329 }
30208 30330 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
30209 30331 sizeof (struct sd_fi_un), 0);
30210 30332 if (rval == -1) {
30211 30333 kmem_free(un->sd_fi_fifo_un[i],
30212 30334 sizeof (struct sd_fi_un));
30213 30335 un->sd_fi_fifo_un[i] = NULL;
30336 + ret = EFAULT;
30337 + break;
30214 30338 }
30215 30339
30216 30340 } else {
30217 30341 SD_INFO(SD_LOG_IOERR, un,
30218 30342 "sd_faultinjection_ioctl: un null\n");
30219 30343 }
30220 30344
30221 30345 break;
30222 30346
30223 30347 case SDIOCINSERTARQ:
30224 30348 /* Store a arq struct to be pushed onto fifo */
30225 30349 SD_INFO(SD_LOG_SDTEST, un,
30226 30350 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30227 30351 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30352 + if (un->sd_fi_fifo_tran[i] != NULL) {
30353 + ret = EBUSY;
30354 + break;
30355 + }
30228 30356
30229 30357 sd_fault_injection_on = 0;
30230 30358
30231 30359 if (un->sd_fi_fifo_arq[i] != NULL) {
30232 30360 kmem_free(un->sd_fi_fifo_arq[i],
30233 30361 sizeof (struct sd_fi_arq));
30234 30362 un->sd_fi_fifo_arq[i] = NULL;
30235 30363 }
30236 30364 if (arg != NULL) {
30237 30365 un->sd_fi_fifo_arq[i] =
30238 30366 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
30239 30367 if (un->sd_fi_fifo_arq[i] == NULL) {
30240 30368 /* Alloc failed don't store anything */
30369 + ret = ENOMEM;
30241 30370 break;
30242 30371 }
30243 30372 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
30244 30373 sizeof (struct sd_fi_arq), 0);
30245 30374 if (rval == -1) {
30246 30375 kmem_free(un->sd_fi_fifo_arq[i],
30247 30376 sizeof (struct sd_fi_arq));
30248 30377 un->sd_fi_fifo_arq[i] = NULL;
30378 + ret = EFAULT;
30379 + break;
30249 30380 }
30250 30381
30251 30382 } else {
30252 30383 SD_INFO(SD_LOG_IOERR, un,
30253 30384 "sd_faultinjection_ioctl: arq null\n");
30254 30385 }
30255 30386
30256 30387 break;
30257 30388
30258 30389 case SDIOCPUSH:
30259 - /* Push stored xb, pkt, un, and arq onto fifo */
30390 + /* Push stored xb, pkt, un, arq and tran onto fifo */
30260 30391 sd_fault_injection_on = 0;
30261 30392
30262 30393 if (arg != NULL) {
30263 30394 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
30264 30395 if (rval != -1 &&
30265 30396 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30266 30397 un->sd_fi_fifo_end += i;
30267 30398 }
30268 30399 } else {
30269 30400 SD_INFO(SD_LOG_IOERR, un,
30270 30401 "sd_faultinjection_ioctl: push arg null\n");
30271 30402 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30272 30403 un->sd_fi_fifo_end++;
30273 30404 }
30274 30405 }
30275 30406 SD_INFO(SD_LOG_IOERR, un,
30276 30407 "sd_faultinjection_ioctl: push to end=%d\n",
30277 30408 un->sd_fi_fifo_end);
30278 30409 break;
30279 30410
30280 30411 case SDIOCRETRIEVE:
30281 30412 /* Return buffer of log from Injection session */
30282 30413 SD_INFO(SD_LOG_SDTEST, un,
30283 30414 "sd_faultinjection_ioctl: Injecting Fault Retreive");
30284 30415
30285 30416 sd_fault_injection_on = 0;
30286 30417
30287 30418 mutex_enter(&(un->un_fi_mutex));
30288 30419 rval = ddi_copyout(un->sd_fi_log, (void *)arg,
30289 30420 un->sd_fi_buf_len+1, 0);
30290 30421 mutex_exit(&(un->un_fi_mutex));
30291 30422
30292 30423 if (rval == -1) {
30293 30424 /*
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
30294 30425 * arg is possibly invalid setting
30295 30426 * it to NULL for return
30296 30427 */
30297 30428 arg = NULL;
30298 30429 }
30299 30430 break;
30300 30431 }
30301 30432
30302 30433 mutex_exit(SD_MUTEX(un));
30303 30434 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n");
30435 + return (ret);
30304 30436 }
30305 30437
30306 30438
30307 30439 /*
30308 30440 * Function: sd_injection_log()
30309 30441 *
30310 30442 * Description: This routine adds buff to the already existing injection log
30311 30443 * for retrieval via faultinjection_ioctl for use in fault
30312 30444 * detection and recovery
30313 30445 *
30314 30446 * Arguments: buf - the string to add to the log
30315 30447 */
30316 30448
30317 30449 static void
30318 30450 sd_injection_log(char *buf, struct sd_lun *un)
30319 30451 {
30320 30452 uint_t len;
30321 30453
30322 30454 ASSERT(un != NULL);
30323 30455 ASSERT(buf != NULL);
30324 30456
30325 30457 mutex_enter(&(un->un_fi_mutex));
30326 30458
30327 30459 len = min(strlen(buf), 255);
30328 30460 /* Add logged value to Injection log to be returned later */
30329 30461 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) {
30330 30462 uint_t offset = strlen((char *)un->sd_fi_log);
30331 30463 char *destp = (char *)un->sd_fi_log + offset;
30332 30464 int i;
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
30333 30465 for (i = 0; i < len; i++) {
30334 30466 *destp++ = *buf++;
30335 30467 }
30336 30468 un->sd_fi_buf_len += len;
30337 30469 un->sd_fi_log[un->sd_fi_buf_len] = '\0';
30338 30470 }
30339 30471
30340 30472 mutex_exit(&(un->un_fi_mutex));
30341 30473 }
30342 30474
30475 +/*
30476 + * This function is called just before sending the packet to the HBA.
30477 + * Caller must hold per-LUN mutex. Mutex is held locked upon return.
30478 + */
30479 +static void
30480 +sd_prefaultinjection(struct scsi_pkt *pktp)
30481 +{
30482 + uint_t i;
30483 + struct buf *bp;
30484 + struct sd_lun *un;
30485 + struct sd_fi_tran *fi_tran;
30343 30486
30487 + ASSERT(pktp != NULL);
30488 +
30489 + /* pull bp and un from pktp */
30490 + bp = (struct buf *)pktp->pkt_private;
30491 + un = SD_GET_UN(bp);
30492 +
30493 + /* if injection is off return */
30494 + if (sd_fault_injection_on == 0 ||
30495 + un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
30496 + return;
30497 + }
30498 +
30499 + ASSERT(un != NULL);
30500 + ASSERT(mutex_owned(SD_MUTEX(un)));
30501 +
30502 + /* take next set off fifo */
30503 + i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
30504 +
30505 + fi_tran = un->sd_fi_fifo_tran[i];
30506 + if (fi_tran != NULL) {
30507 + switch (fi_tran->tran_cmd) {
30508 + case SD_FLTINJ_CMD_BUSY:
30509 + pktp->pkt_flags |= FLAG_PKT_BUSY;
30510 + break;
30511 + case SD_FLTINJ_CMD_TIMEOUT:
30512 + pktp->pkt_flags |= FLAG_PKT_TIMEOUT;
30513 + break;
30514 + default:
30515 + return;
30516 + }
30517 + }
30518 + /*
30519 + * We don't deallocate any data here - it will be deallocated after
30520 + * the packet has been processed by the HBA.
30521 + */
30522 +}
30523 +
30524 +
30344 30525 /*
30345 30526 * Function: sd_faultinjection()
30346 30527 *
30347 30528 * Description: This routine takes the pkt and changes its
30348 30529 * content based on error injection scenerio.
30349 30530 *
30350 30531 * Arguments: pktp - packet to be changed
30351 30532 */
30352 30533
30353 30534 static void
30354 30535 sd_faultinjection(struct scsi_pkt *pktp)
30355 30536 {
30356 30537 uint_t i;
30357 30538 struct sd_fi_pkt *fi_pkt;
30358 30539 struct sd_fi_xb *fi_xb;
30359 30540 struct sd_fi_un *fi_un;
30360 30541 struct sd_fi_arq *fi_arq;
30361 30542 struct buf *bp;
30362 30543 struct sd_xbuf *xb;
30363 30544 struct sd_lun *un;
30364 30545
30365 30546 ASSERT(pktp != NULL);
30366 30547
30367 30548 /* pull bp xb and un from pktp */
30368 30549 bp = (struct buf *)pktp->pkt_private;
30369 30550 xb = SD_GET_XBUF(bp);
30370 30551 un = SD_GET_UN(bp);
30371 30552
30372 30553 ASSERT(un != NULL);
30373 30554
30374 30555 mutex_enter(SD_MUTEX(un));
30375 30556
30376 30557 SD_TRACE(SD_LOG_SDTEST, un,
30377 30558 "sd_faultinjection: entry Injection from sdintr\n");
30378 30559
30379 30560 /* if injection is off return */
30380 30561 if (sd_fault_injection_on == 0 ||
30381 30562 un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
30382 30563 mutex_exit(SD_MUTEX(un));
30383 30564 return;
30384 30565 }
30385 30566
30386 30567 SD_INFO(SD_LOG_SDTEST, un,
30387 30568 "sd_faultinjection: is working for copying\n");
30388 30569
30389 30570 /* take next set off fifo */
30390 30571 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
30391 30572
30392 30573 fi_pkt = un->sd_fi_fifo_pkt[i];
30393 30574 fi_xb = un->sd_fi_fifo_xb[i];
30394 30575 fi_un = un->sd_fi_fifo_un[i];
30395 30576 fi_arq = un->sd_fi_fifo_arq[i];
30396 30577
30397 30578
30398 30579 /* set variables accordingly */
30399 30580 /* set pkt if it was on fifo */
30400 30581 if (fi_pkt != NULL) {
30401 30582 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags");
30402 30583 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp");
30403 30584 if (fi_pkt->pkt_cdbp != 0xff)
30404 30585 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp");
30405 30586 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state");
30406 30587 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics");
30407 30588 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason");
30408 30589
30409 30590 }
30410 30591 /* set xb if it was on fifo */
30411 30592 if (fi_xb != NULL) {
30412 30593 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno");
30413 30594 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid");
30414 30595 if (fi_xb->xb_retry_count != 0)
30415 30596 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count");
30416 30597 SD_CONDSET(xb, xb, xb_victim_retry_count,
30417 30598 "xb_victim_retry_count");
30418 30599 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status");
30419 30600 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state");
30420 30601 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid");
30421 30602
30422 30603 /* copy in block data from sense */
30423 30604 /*
30424 30605 * if (fi_xb->xb_sense_data[0] != -1) {
30425 30606 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data,
30426 30607 * SENSE_LENGTH);
30427 30608 * }
30428 30609 */
30429 30610 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH);
30430 30611
30431 30612 /* copy in extended sense codes */
30432 30613 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30433 30614 xb, es_code, "es_code");
30434 30615 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30435 30616 xb, es_key, "es_key");
30436 30617 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30437 30618 xb, es_add_code, "es_add_code");
30438 30619 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30439 30620 xb, es_qual_code, "es_qual_code");
30440 30621 struct scsi_extended_sense *esp;
30441 30622 esp = (struct scsi_extended_sense *)xb->xb_sense_data;
30442 30623 esp->es_class = CLASS_EXTENDED_SENSE;
30443 30624 }
30444 30625
30445 30626 /* set un if it was on fifo */
30446 30627 if (fi_un != NULL) {
30447 30628 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb");
30448 30629 SD_CONDSET(un, un, un_ctype, "un_ctype");
30449 30630 SD_CONDSET(un, un, un_reset_retry_count,
30450 30631 "un_reset_retry_count");
30451 30632 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type");
30452 30633 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status");
30453 30634 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled");
30454 30635 SD_CONDSET(un, un, un_f_allow_bus_device_reset,
30455 30636 "un_f_allow_bus_device_reset");
30456 30637 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing");
30457 30638
30458 30639 }
30459 30640
30460 30641 /* copy in auto request sense if it was on fifo */
30461 30642 if (fi_arq != NULL) {
30462 30643 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq));
30463 30644 }
30464 30645
30465 30646 /* free structs */
30466 30647 if (un->sd_fi_fifo_pkt[i] != NULL) {
30467 30648 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt));
30468 30649 }
30469 30650 if (un->sd_fi_fifo_xb[i] != NULL) {
30470 30651 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
30471 30652 }
30472 30653 if (un->sd_fi_fifo_un[i] != NULL) {
30473 30654 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un));
30474 30655 }
30475 30656 if (un->sd_fi_fifo_arq[i] != NULL) {
30476 30657 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq));
30477 30658 }
30478 30659
30479 30660 /*
30480 30661 * kmem_free does not gurantee to set to NULL
30481 30662 * since we uses these to determine if we set
30482 30663 * values or not lets confirm they are always
30483 30664 * NULL after free
30484 30665 */
30485 30666 un->sd_fi_fifo_pkt[i] = NULL;
30486 30667 un->sd_fi_fifo_un[i] = NULL;
30487 30668 un->sd_fi_fifo_xb[i] = NULL;
30488 30669 un->sd_fi_fifo_arq[i] = NULL;
30489 30670
30490 30671 un->sd_fi_fifo_start++;
30491 30672
30492 30673 mutex_exit(SD_MUTEX(un));
30493 30674
30494 30675 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n");
30495 30676 }
30496 30677
30497 30678 #endif /* SD_FAULT_INJECTION */
30498 30679
30499 30680 /*
30500 30681 * This routine is invoked in sd_unit_attach(). Before calling it, the
30501 30682 * properties in conf file should be processed already, and "hotpluggable"
30502 30683 * property was processed also.
30503 30684 *
30504 30685 * The sd driver distinguishes 3 different type of devices: removable media,
30505 30686 * non-removable media, and hotpluggable. Below the differences are defined:
30506 30687 *
30507 30688 * 1. Device ID
30508 30689 *
30509 30690 * The device ID of a device is used to identify this device. Refer to
30510 30691 * ddi_devid_register(9F).
30511 30692 *
30512 30693 * For a non-removable media disk device which can provide 0x80 or 0x83
30513 30694 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique
30514 30695 * device ID is created to identify this device. For other non-removable
30515 30696 * media devices, a default device ID is created only if this device has
30516 30697 * at least 2 alter cylinders. Otherwise, this device has no devid.
30517 30698 *
30518 30699 * -------------------------------------------------------
30519 30700 * removable media hotpluggable | Can Have Device ID
30520 30701 * -------------------------------------------------------
30521 30702 * false false | Yes
30522 30703 * false true | Yes
30523 30704 * true x | No
30524 30705 * ------------------------------------------------------
30525 30706 *
30526 30707 *
30527 30708 * 2. SCSI group 4 commands
30528 30709 *
30529 30710 * In SCSI specs, only some commands in group 4 command set can use
30530 30711 * 8-byte addresses that can be used to access >2TB storage spaces.
30531 30712 * Other commands have no such capability. Without supporting group4,
30532 30713 * it is impossible to make full use of storage spaces of a disk with
30533 30714 * capacity larger than 2TB.
30534 30715 *
30535 30716 * -----------------------------------------------
30536 30717 * removable media hotpluggable LP64 | Group
30537 30718 * -----------------------------------------------
30538 30719 * false false false | 1
30539 30720 * false false true | 4
30540 30721 * false true false | 1
30541 30722 * false true true | 4
30542 30723 * true x x | 5
30543 30724 * -----------------------------------------------
30544 30725 *
30545 30726 *
30546 30727 * 3. Check for VTOC Label
30547 30728 *
30548 30729 * If a direct-access disk has no EFI label, sd will check if it has a
30549 30730 * valid VTOC label. Now, sd also does that check for removable media
30550 30731 * and hotpluggable devices.
30551 30732 *
30552 30733 * --------------------------------------------------------------
30553 30734 * Direct-Access removable media hotpluggable | Check Label
30554 30735 * -------------------------------------------------------------
30555 30736 * false false false | No
30556 30737 * false false true | No
30557 30738 * false true false | Yes
|
↓ open down ↓ |
204 lines elided |
↑ open up ↑ |
30558 30739 * false true true | Yes
30559 30740 * true x x | Yes
30560 30741 * --------------------------------------------------------------
30561 30742 *
30562 30743 *
30563 30744 * 4. Building default VTOC label
30564 30745 *
30565 30746 * As section 3 says, sd checks if some kinds of devices have VTOC label.
30566 30747 * If those devices have no valid VTOC label, sd(7d) will attempt to
30567 30748 * create default VTOC for them. Currently sd creates default VTOC label
30568 - * for all devices on x86 platform (VTOC_16), but only for removable
30569 - * media devices on SPARC (VTOC_8).
30749 + * for all devices on x86 platform (VTOC_16).
30570 30750 *
30571 30751 * -----------------------------------------------------------
30572 30752 * removable media hotpluggable platform | Default Label
30573 30753 * -----------------------------------------------------------
30574 - * false false sparc | No
30575 30754 * false true x86 | Yes
30576 - * false true sparc | Yes
30577 30755 * true x x | Yes
30578 30756 * ----------------------------------------------------------
30579 30757 *
30580 30758 *
30581 30759 * 5. Supported blocksizes of target devices
30582 30760 *
30583 30761 * Sd supports non-512-byte blocksize for removable media devices only.
30584 30762 * For other devices, only 512-byte blocksize is supported. This may be
30585 30763 * changed in near future because some RAID devices require non-512-byte
30586 30764 * blocksize
30587 30765 *
30588 30766 * -----------------------------------------------------------
30589 30767 * removable media hotpluggable | non-512-byte blocksize
30590 30768 * -----------------------------------------------------------
30591 30769 * false false | No
30592 30770 * false true | No
30593 30771 * true x | Yes
30594 30772 * -----------------------------------------------------------
30595 30773 *
30596 30774 *
30597 30775 * 6. Automatic mount & unmount
30598 30776 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
30599 30777 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30600 30778 * if a device is removable media device. It return 1 for removable media
30601 30779 * devices, and 0 for others.
30602 30780 *
30603 30781 * The automatic mounting subsystem should distinguish between the types
30604 30782 * of devices and apply automounting policies to each.
30605 30783 *
30606 30784 *
30607 30785 * 7. fdisk partition management
30608 30786 *
30609 - * Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30610 - * just supports fdisk partitions on x86 platform. On sparc platform, sd
30611 - * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30612 - * fdisk partitions on both x86 and SPARC platform.
30787 + * Fdisk is traditional partition method on x86 platform. sd(7D) driver
30788 + * just supports fdisk partitions on x86 platform.
30613 30789 *
30614 30790 * -----------------------------------------------------------
30615 30791 * platform removable media USB/1394 | fdisk supported
30616 30792 * -----------------------------------------------------------
30617 30793 * x86 X X | true
30618 - * ------------------------------------------------------------
30619 - * sparc X X | false
30620 - * ------------------------------------------------------------
30794 + * -----------------------------------------------------------
30621 30795 *
30622 30796 *
30623 30797 * 8. MBOOT/MBR
30624 30798 *
30625 - * Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30626 - * read/write mboot for removable media devices on sparc platform.
30627 - *
30628 30799 * -----------------------------------------------------------
30629 30800 * platform removable media USB/1394 | mboot supported
30630 30801 * -----------------------------------------------------------
30631 30802 * x86 X X | true
30632 - * ------------------------------------------------------------
30633 - * sparc false false | false
30634 - * sparc false true | true
30635 - * sparc true false | true
30636 - * sparc true true | true
30637 - * ------------------------------------------------------------
30803 + * -----------------------------------------------------------
30638 30804 *
30639 30805 *
30640 30806 * 9. error handling during opening device
30641 30807 *
30642 30808 * If failed to open a disk device, an errno is returned. For some kinds
30643 30809 * of errors, different errno is returned depending on if this device is
30644 30810 * a removable media device. This brings USB/1394 hard disks in line with
30645 30811 * expected hard disk behavior. It is not expected that this breaks any
30646 30812 * application.
30647 30813 *
30648 30814 * ------------------------------------------------------
30649 30815 * removable media hotpluggable | errno
30650 30816 * ------------------------------------------------------
30651 30817 * false false | EIO
30652 30818 * false true | EIO
30653 30819 * true x | ENXIO
30654 30820 * ------------------------------------------------------
30655 30821 *
30656 30822 *
30657 30823 * 11. ioctls: DKIOCEJECT, CDROMEJECT
30658 30824 *
30659 30825 * These IOCTLs are applicable only to removable media devices.
30660 30826 *
30661 30827 * -----------------------------------------------------------
30662 30828 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT
30663 30829 * -----------------------------------------------------------
30664 30830 * false false | No
30665 30831 * false true | No
30666 30832 * true x | Yes
30667 30833 * -----------------------------------------------------------
30668 30834 *
30669 30835 *
30670 30836 * 12. Kstats for partitions
30671 30837 *
30672 30838 * sd creates partition kstat for non-removable media devices. USB and
30673 30839 * Firewire hard disks now have partition kstats
30674 30840 *
30675 30841 * ------------------------------------------------------
30676 30842 * removable media hotpluggable | kstat
30677 30843 * ------------------------------------------------------
30678 30844 * false false | Yes
30679 30845 * false true | Yes
30680 30846 * true x | No
30681 30847 * ------------------------------------------------------
30682 30848 *
30683 30849 *
30684 30850 * 13. Removable media & hotpluggable properties
30685 30851 *
30686 30852 * Sd driver creates a "removable-media" property for removable media
30687 30853 * devices. Parent nexus drivers create a "hotpluggable" property if
30688 30854 * it supports hotplugging.
30689 30855 *
30690 30856 * ---------------------------------------------------------------------
30691 30857 * removable media hotpluggable | "removable-media" " hotpluggable"
30692 30858 * ---------------------------------------------------------------------
30693 30859 * false false | No No
30694 30860 * false true | No Yes
30695 30861 * true false | Yes No
30696 30862 * true true | Yes Yes
30697 30863 * ---------------------------------------------------------------------
30698 30864 *
30699 30865 *
30700 30866 * 14. Power Management
30701 30867 *
30702 30868 * sd only power manages removable media devices or devices that support
30703 30869 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250)
30704 30870 *
30705 30871 * A parent nexus that supports hotplugging can also set "pm-capable"
30706 30872 * if the disk can be power managed.
30707 30873 *
30708 30874 * ------------------------------------------------------------
30709 30875 * removable media hotpluggable pm-capable | power manage
30710 30876 * ------------------------------------------------------------
30711 30877 * false false false | No
30712 30878 * false false true | Yes
30713 30879 * false true false | No
30714 30880 * false true true | Yes
30715 30881 * true x x | Yes
30716 30882 * ------------------------------------------------------------
30717 30883 *
30718 30884 * USB and firewire hard disks can now be power managed independently
30719 30885 * of the framebuffer
30720 30886 *
30721 30887 *
30722 30888 * 15. Support for USB disks with capacity larger than 1TB
30723 30889 *
30724 30890 * Currently, sd doesn't permit a fixed disk device with capacity
30725 30891 * larger than 1TB to be used in a 32-bit operating system environment.
30726 30892 * However, sd doesn't do that for removable media devices. Instead, it
30727 30893 * assumes that removable media devices cannot have a capacity larger
30728 30894 * than 1TB. Therefore, using those devices on 32-bit system is partially
30729 30895 * supported, which can cause some unexpected results.
30730 30896 *
30731 30897 * ---------------------------------------------------------------------
30732 30898 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env
30733 30899 * ---------------------------------------------------------------------
30734 30900 * false false | true | no
30735 30901 * false true | true | no
30736 30902 * true false | true | Yes
30737 30903 * true true | true | Yes
30738 30904 * ---------------------------------------------------------------------
30739 30905 *
30740 30906 *
30741 30907 * 16. Check write-protection at open time
30742 30908 *
30743 30909 * When a removable media device is being opened for writing without NDELAY
30744 30910 * flag, sd will check if this device is writable. If attempting to open
30745 30911 * without NDELAY flag a write-protected device, this operation will abort.
30746 30912 *
30747 30913 * ------------------------------------------------------------
30748 30914 * removable media USB/1394 | WP Check
30749 30915 * ------------------------------------------------------------
30750 30916 * false false | No
30751 30917 * false true | No
30752 30918 * true false | Yes
30753 30919 * true true | Yes
30754 30920 * ------------------------------------------------------------
30755 30921 *
30756 30922 *
30757 30923 * 17. syslog when corrupted VTOC is encountered
30758 30924 *
30759 30925 * Currently, if an invalid VTOC is encountered, sd only print syslog
30760 30926 * for fixed SCSI disks.
30761 30927 * ------------------------------------------------------------
30762 30928 * removable media USB/1394 | print syslog
30763 30929 * ------------------------------------------------------------
30764 30930 * false false | Yes
30765 30931 * false true | No
30766 30932 * true false | No
30767 30933 * true true | No
30768 30934 * ------------------------------------------------------------
30769 30935 */
30770 30936 static void
30771 30937 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi)
30772 30938 {
30773 30939 int pm_cap;
30774 30940
30775 30941 ASSERT(un->un_sd);
30776 30942 ASSERT(un->un_sd->sd_inq);
30777 30943
30778 30944 /*
30779 30945 * Enable SYNC CACHE support for all devices.
30780 30946 */
30781 30947 un->un_f_sync_cache_supported = TRUE;
30782 30948
30783 30949 /*
30784 30950 * Set the sync cache required flag to false.
30785 30951 * This would ensure that there is no SYNC CACHE
30786 30952 * sent when there are no writes
30787 30953 */
30788 30954 un->un_f_sync_cache_required = FALSE;
30789 30955
30790 30956 if (un->un_sd->sd_inq->inq_rmb) {
30791 30957 /*
30792 30958 * The media of this device is removable. And for this kind
30793 30959 * of devices, it is possible to change medium after opening
30794 30960 * devices. Thus we should support this operation.
30795 30961 */
30796 30962 un->un_f_has_removable_media = TRUE;
30797 30963
30798 30964 /*
30799 30965 * support non-512-byte blocksize of removable media devices
30800 30966 */
30801 30967 un->un_f_non_devbsize_supported = TRUE;
30802 30968
30803 30969 /*
30804 30970 * Assume that all removable media devices support DOOR_LOCK
30805 30971 */
30806 30972 un->un_f_doorlock_supported = TRUE;
30807 30973
30808 30974 /*
30809 30975 * For a removable media device, it is possible to be opened
30810 30976 * with NDELAY flag when there is no media in drive, in this
30811 30977 * case we don't care if device is writable. But if without
30812 30978 * NDELAY flag, we need to check if media is write-protected.
30813 30979 */
30814 30980 un->un_f_chk_wp_open = TRUE;
30815 30981
30816 30982 /*
30817 30983 * need to start a SCSI watch thread to monitor media state,
30818 30984 * when media is being inserted or ejected, notify syseventd.
30819 30985 */
30820 30986 un->un_f_monitor_media_state = TRUE;
30821 30987
30822 30988 /*
30823 30989 * Some devices don't support START_STOP_UNIT command.
30824 30990 * Therefore, we'd better check if a device supports it
30825 30991 * before sending it.
30826 30992 */
30827 30993 un->un_f_check_start_stop = TRUE;
30828 30994
30829 30995 /*
30830 30996 * support eject media ioctl:
30831 30997 * FDEJECT, DKIOCEJECT, CDROMEJECT
30832 30998 */
30833 30999 un->un_f_eject_media_supported = TRUE;
30834 31000
30835 31001 /*
30836 31002 * Because many removable-media devices don't support
30837 31003 * LOG_SENSE, we couldn't use this command to check if
30838 31004 * a removable media device support power-management.
30839 31005 * We assume that they support power-management via
30840 31006 * START_STOP_UNIT command and can be spun up and down
30841 31007 * without limitations.
30842 31008 */
30843 31009 un->un_f_pm_supported = TRUE;
30844 31010
30845 31011 /*
30846 31012 * Need to create a zero length (Boolean) property
30847 31013 * removable-media for the removable media devices.
30848 31014 * Note that the return value of the property is not being
30849 31015 * checked, since if unable to create the property
30850 31016 * then do not want the attach to fail altogether. Consistent
30851 31017 * with other property creation in attach.
30852 31018 */
30853 31019 (void) ddi_prop_create(DDI_DEV_T_NONE, devi,
30854 31020 DDI_PROP_CANSLEEP, "removable-media", NULL, 0);
30855 31021
30856 31022 } else {
30857 31023 /*
30858 31024 * create device ID for device
30859 31025 */
30860 31026 un->un_f_devid_supported = TRUE;
30861 31027
30862 31028 /*
30863 31029 * Spin up non-removable-media devices once it is attached
30864 31030 */
30865 31031 un->un_f_attach_spinup = TRUE;
30866 31032
30867 31033 /*
30868 31034 * According to SCSI specification, Sense data has two kinds of
30869 31035 * format: fixed format, and descriptor format. At present, we
30870 31036 * don't support descriptor format sense data for removable
30871 31037 * media.
30872 31038 */
30873 31039 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) {
30874 31040 un->un_f_descr_format_supported = TRUE;
30875 31041 }
30876 31042
30877 31043 /*
30878 31044 * kstats are created only for non-removable media devices.
30879 31045 *
30880 31046 * Set this in sd.conf to 0 in order to disable kstats. The
30881 31047 * default is 1, so they are enabled by default.
30882 31048 */
30883 31049 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY,
30884 31050 SD_DEVINFO(un), DDI_PROP_DONTPASS,
30885 31051 "enable-partition-kstats", 1));
30886 31052
30887 31053 /*
30888 31054 * Check if HBA has set the "pm-capable" property.
30889 31055 * If "pm-capable" exists and is non-zero then we can
30890 31056 * power manage the device without checking the start/stop
30891 31057 * cycle count log sense page.
30892 31058 *
30893 31059 * If "pm-capable" exists and is set to be false (0),
30894 31060 * then we should not power manage the device.
30895 31061 *
30896 31062 * If "pm-capable" doesn't exist then pm_cap will
30897 31063 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case,
30898 31064 * sd will check the start/stop cycle count log sense page
30899 31065 * and power manage the device if the cycle count limit has
30900 31066 * not been exceeded.
30901 31067 */
30902 31068 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi,
30903 31069 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED);
30904 31070 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) {
30905 31071 un->un_f_log_sense_supported = TRUE;
30906 31072 if (!un->un_f_power_condition_disabled &&
30907 31073 SD_INQUIRY(un)->inq_ansi == 6) {
30908 31074 un->un_f_power_condition_supported = TRUE;
30909 31075 }
30910 31076 } else {
30911 31077 /*
30912 31078 * pm-capable property exists.
30913 31079 *
30914 31080 * Convert "TRUE" values for pm_cap to
30915 31081 * SD_PM_CAPABLE_IS_TRUE to make it easier to check
30916 31082 * later. "TRUE" values are any values defined in
30917 31083 * inquiry.h.
30918 31084 */
30919 31085 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) {
30920 31086 un->un_f_log_sense_supported = FALSE;
30921 31087 } else {
30922 31088 /* SD_PM_CAPABLE_IS_TRUE case */
30923 31089 un->un_f_pm_supported = TRUE;
30924 31090 if (!un->un_f_power_condition_disabled &&
30925 31091 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) {
30926 31092 un->un_f_power_condition_supported =
30927 31093 TRUE;
30928 31094 }
30929 31095 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) {
30930 31096 un->un_f_log_sense_supported = TRUE;
30931 31097 un->un_f_pm_log_sense_smart =
30932 31098 SD_PM_CAP_SMART_LOG(pm_cap);
30933 31099 }
30934 31100 }
30935 31101
30936 31102 SD_INFO(SD_LOG_ATTACH_DETACH, un,
30937 31103 "sd_unit_attach: un:0x%p pm-capable "
30938 31104 "property set to %d.\n", un, un->un_f_pm_supported);
30939 31105 }
30940 31106 }
30941 31107
30942 31108 if (un->un_f_is_hotpluggable) {
30943 31109
30944 31110 /*
30945 31111 * Have to watch hotpluggable devices as well, since
30946 31112 * that's the only way for userland applications to
30947 31113 * detect hot removal while device is busy/mounted.
30948 31114 */
30949 31115 un->un_f_monitor_media_state = TRUE;
30950 31116
30951 31117 un->un_f_check_start_stop = TRUE;
30952 31118
30953 31119 }
30954 31120 }
30955 31121
30956 31122 /*
30957 31123 * sd_tg_rdwr:
30958 31124 * Provides rdwr access for cmlb via sd_tgops. The start_block is
30959 31125 * in sys block size, req_length in bytes.
30960 31126 *
30961 31127 */
30962 31128 static int
30963 31129 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
30964 31130 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
30965 31131 {
30966 31132 struct sd_lun *un;
30967 31133 int path_flag = (int)(uintptr_t)tg_cookie;
30968 31134 char *dkl = NULL;
30969 31135 diskaddr_t real_addr = start_block;
30970 31136 diskaddr_t first_byte, end_block;
30971 31137
30972 31138 size_t buffer_size = reqlength;
30973 31139 int rval = 0;
30974 31140 diskaddr_t cap;
30975 31141 uint32_t lbasize;
30976 31142 sd_ssc_t *ssc;
30977 31143
30978 31144 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
30979 31145 if (un == NULL)
30980 31146 return (ENXIO);
30981 31147
30982 31148 if (cmd != TG_READ && cmd != TG_WRITE)
30983 31149 return (EINVAL);
30984 31150
30985 31151 ssc = sd_ssc_init(un);
30986 31152 mutex_enter(SD_MUTEX(un));
30987 31153 if (un->un_f_tgt_blocksize_is_valid == FALSE) {
30988 31154 mutex_exit(SD_MUTEX(un));
30989 31155 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
30990 31156 &lbasize, path_flag);
30991 31157 if (rval != 0)
30992 31158 goto done1;
30993 31159 mutex_enter(SD_MUTEX(un));
30994 31160 sd_update_block_info(un, lbasize, cap);
30995 31161 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) {
30996 31162 mutex_exit(SD_MUTEX(un));
30997 31163 rval = EIO;
30998 31164 goto done;
30999 31165 }
31000 31166 }
31001 31167
31002 31168 if (NOT_DEVBSIZE(un)) {
31003 31169 /*
31004 31170 * sys_blocksize != tgt_blocksize, need to re-adjust
31005 31171 * blkno and save the index to beginning of dk_label
31006 31172 */
31007 31173 first_byte = SD_SYSBLOCKS2BYTES(start_block);
31008 31174 real_addr = first_byte / un->un_tgt_blocksize;
31009 31175
31010 31176 end_block = (first_byte + reqlength +
31011 31177 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
31012 31178
31013 31179 /* round up buffer size to multiple of target block size */
31014 31180 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize;
31015 31181
31016 31182 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr",
31017 31183 "label_addr: 0x%x allocation size: 0x%x\n",
31018 31184 real_addr, buffer_size);
31019 31185
31020 31186 if (((first_byte % un->un_tgt_blocksize) != 0) ||
31021 31187 (reqlength % un->un_tgt_blocksize) != 0)
31022 31188 /* the request is not aligned */
31023 31189 dkl = kmem_zalloc(buffer_size, KM_SLEEP);
31024 31190 }
31025 31191
31026 31192 /*
31027 31193 * The MMC standard allows READ CAPACITY to be
31028 31194 * inaccurate by a bounded amount (in the interest of
31029 31195 * response latency). As a result, failed READs are
31030 31196 * commonplace (due to the reading of metadata and not
31031 31197 * data). Depending on the per-Vendor/drive Sense data,
31032 31198 * the failed READ can cause many (unnecessary) retries.
31033 31199 */
31034 31200
31035 31201 if (ISCD(un) && (cmd == TG_READ) &&
31036 31202 (un->un_f_blockcount_is_valid == TRUE) &&
31037 31203 ((start_block == (un->un_blockcount - 1))||
31038 31204 (start_block == (un->un_blockcount - 2)))) {
31039 31205 path_flag = SD_PATH_DIRECT_PRIORITY;
31040 31206 }
31041 31207
31042 31208 mutex_exit(SD_MUTEX(un));
31043 31209 if (cmd == TG_READ) {
31044 31210 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr,
31045 31211 buffer_size, real_addr, path_flag);
31046 31212 if (dkl != NULL)
31047 31213 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block,
31048 31214 real_addr), bufaddr, reqlength);
31049 31215 } else {
31050 31216 if (dkl) {
31051 31217 rval = sd_send_scsi_READ(ssc, dkl, buffer_size,
31052 31218 real_addr, path_flag);
31053 31219 if (rval) {
31054 31220 goto done1;
31055 31221 }
31056 31222 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block,
31057 31223 real_addr), reqlength);
31058 31224 }
31059 31225 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr,
31060 31226 buffer_size, real_addr, path_flag);
31061 31227 }
31062 31228
31063 31229 done1:
31064 31230 if (dkl != NULL)
31065 31231 kmem_free(dkl, buffer_size);
31066 31232
31067 31233 if (rval != 0) {
31068 31234 if (rval == EIO)
31069 31235 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
31070 31236 else
31071 31237 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31072 31238 }
31073 31239 done:
31074 31240 sd_ssc_fini(ssc);
31075 31241 return (rval);
31076 31242 }
31077 31243
31078 31244
31079 31245 static int
31080 31246 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
31081 31247 {
31082 31248
31083 31249 struct sd_lun *un;
31084 31250 diskaddr_t cap;
31085 31251 uint32_t lbasize;
31086 31252 int path_flag = (int)(uintptr_t)tg_cookie;
31087 31253 int ret = 0;
31088 31254
31089 31255 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31090 31256 if (un == NULL)
31091 31257 return (ENXIO);
31092 31258
31093 31259 switch (cmd) {
31094 31260 case TG_GETPHYGEOM:
31095 31261 case TG_GETVIRTGEOM:
31096 31262 case TG_GETCAPACITY:
31097 31263 case TG_GETBLOCKSIZE:
31098 31264 mutex_enter(SD_MUTEX(un));
31099 31265
31100 31266 if ((un->un_f_blockcount_is_valid == TRUE) &&
31101 31267 (un->un_f_tgt_blocksize_is_valid == TRUE)) {
31102 31268 cap = un->un_blockcount;
31103 31269 lbasize = un->un_tgt_blocksize;
31104 31270 mutex_exit(SD_MUTEX(un));
31105 31271 } else {
31106 31272 sd_ssc_t *ssc;
31107 31273 mutex_exit(SD_MUTEX(un));
31108 31274 ssc = sd_ssc_init(un);
31109 31275 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31110 31276 &lbasize, path_flag);
31111 31277 if (ret != 0) {
31112 31278 if (ret == EIO)
31113 31279 sd_ssc_assessment(ssc,
31114 31280 SD_FMT_STATUS_CHECK);
31115 31281 else
31116 31282 sd_ssc_assessment(ssc,
31117 31283 SD_FMT_IGNORE);
31118 31284 sd_ssc_fini(ssc);
31119 31285 return (ret);
31120 31286 }
31121 31287 sd_ssc_fini(ssc);
31122 31288 mutex_enter(SD_MUTEX(un));
31123 31289 sd_update_block_info(un, lbasize, cap);
31124 31290 if ((un->un_f_blockcount_is_valid == FALSE) ||
31125 31291 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
31126 31292 mutex_exit(SD_MUTEX(un));
31127 31293 return (EIO);
31128 31294 }
31129 31295 mutex_exit(SD_MUTEX(un));
31130 31296 }
31131 31297
31132 31298 if (cmd == TG_GETCAPACITY) {
31133 31299 *(diskaddr_t *)arg = cap;
31134 31300 return (0);
31135 31301 }
31136 31302
31137 31303 if (cmd == TG_GETBLOCKSIZE) {
31138 31304 *(uint32_t *)arg = lbasize;
31139 31305 return (0);
31140 31306 }
31141 31307
31142 31308 if (cmd == TG_GETPHYGEOM)
31143 31309 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg,
31144 31310 cap, lbasize, path_flag);
31145 31311 else
31146 31312 /* TG_GETVIRTGEOM */
31147 31313 ret = sd_get_virtual_geometry(un,
31148 31314 (cmlb_geom_t *)arg, cap, lbasize);
31149 31315
31150 31316 return (ret);
31151 31317
31152 31318 case TG_GETATTR:
31153 31319 mutex_enter(SD_MUTEX(un));
31154 31320 ((tg_attribute_t *)arg)->media_is_writable =
31155 31321 un->un_f_mmc_writable_media;
31156 31322 ((tg_attribute_t *)arg)->media_is_solid_state =
31157 31323 un->un_f_is_solid_state;
31158 31324 ((tg_attribute_t *)arg)->media_is_rotational =
31159 31325 un->un_f_is_rotational;
31160 31326 mutex_exit(SD_MUTEX(un));
31161 31327 return (0);
31162 31328 default:
31163 31329 return (ENOTTY);
31164 31330
|
↓ open down ↓ |
517 lines elided |
↑ open up ↑ |
31165 31331 }
31166 31332 }
31167 31333
31168 31334 /*
31169 31335 * Function: sd_ssc_ereport_post
31170 31336 *
31171 31337 * Description: Will be called when SD driver need to post an ereport.
31172 31338 *
31173 31339 * Context: Kernel thread or interrupt context.
31174 31340 */
31175 -
31176 -#define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31177 -
31178 31341 static void
31179 31342 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
31180 31343 {
31181 - int uscsi_path_instance = 0;
31182 31344 uchar_t uscsi_pkt_reason;
31183 31345 uint32_t uscsi_pkt_state;
31184 31346 uint32_t uscsi_pkt_statistics;
31185 31347 uint64_t uscsi_ena;
31186 31348 uchar_t op_code;
31187 31349 uint8_t *sensep;
31188 31350 union scsi_cdb *cdbp;
31189 31351 uint_t cdblen = 0;
31190 31352 uint_t senlen = 0;
31191 31353 struct sd_lun *un;
31192 31354 dev_info_t *dip;
31193 31355 char *devid;
31194 31356 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON |
31195 31357 SSC_FLAGS_INVALID_STATUS |
31196 31358 SSC_FLAGS_INVALID_SENSE |
31197 31359 SSC_FLAGS_INVALID_DATA;
31198 31360 char assessment[16];
31199 31361
31200 31362 ASSERT(ssc != NULL);
31201 31363 ASSERT(ssc->ssc_uscsi_cmd != NULL);
31202 31364 ASSERT(ssc->ssc_uscsi_info != NULL);
31203 31365
31204 31366 un = ssc->ssc_un;
31205 31367 ASSERT(un != NULL);
31206 31368
31207 31369 dip = un->un_sd->sd_dev;
31208 31370
31209 31371 /*
31210 31372 * Get the devid:
31211 31373 * devid will only be passed to non-transport error reports.
31212 31374 */
31213 31375 devid = DEVI(dip)->devi_devid_str;
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
31214 31376
31215 31377 /*
31216 31378 * If we are syncing or dumping, the command will not be executed
31217 31379 * so we bypass this situation.
31218 31380 */
31219 31381 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
31220 31382 (un->un_state == SD_STATE_DUMPING))
31221 31383 return;
31222 31384
31223 31385 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
31224 - uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
31225 31386 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
31226 31387 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
31227 31388 uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
31228 31389
31229 31390 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
31230 31391 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
31231 31392
31232 31393 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31233 31394 if (cdbp == NULL) {
31234 31395 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
31235 31396 "sd_ssc_ereport_post meet empty cdb\n");
31236 31397 return;
31237 31398 }
31238 31399
31239 31400 op_code = cdbp->scc_cmd;
31240 31401
31241 31402 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen;
31242 31403 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
31243 31404 ssc->ssc_uscsi_cmd->uscsi_rqresid);
31244 31405
31245 31406 if (senlen > 0)
31246 31407 ASSERT(sensep != NULL);
31247 31408
31248 31409 /*
31249 31410 * Initialize drv_assess to corresponding values.
31250 31411 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending
31251 31412 * on the sense-key returned back.
31252 31413 */
31253 31414 switch (drv_assess) {
31254 31415 case SD_FM_DRV_RECOVERY:
31255 31416 (void) sprintf(assessment, "%s", "recovered");
31256 31417 break;
31257 31418 case SD_FM_DRV_RETRY:
31258 31419 (void) sprintf(assessment, "%s", "retry");
31259 31420 break;
31260 31421 case SD_FM_DRV_NOTICE:
31261 31422 (void) sprintf(assessment, "%s", "info");
31262 31423 break;
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
31263 31424 case SD_FM_DRV_FATAL:
31264 31425 default:
31265 31426 (void) sprintf(assessment, "%s", "unknown");
31266 31427 }
31267 31428 /*
31268 31429 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31269 31430 * command, we will post ereport.io.scsi.cmd.disk.recovered.
31270 31431 * driver-assessment will always be "recovered" here.
31271 31432 */
31272 31433 if (drv_assess == SD_FM_DRV_RECOVERY) {
31273 - scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31434 + scsi_fm_ereport_post(un->un_sd, 0, NULL,
31274 31435 "cmd.disk.recovered", uscsi_ena, devid, NULL,
31275 31436 DDI_NOSLEEP, NULL,
31276 31437 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31277 31438 DEVID_IF_KNOWN(devid),
31278 31439 "driver-assessment", DATA_TYPE_STRING, assessment,
31279 31440 "op-code", DATA_TYPE_UINT8, op_code,
31280 31441 "cdb", DATA_TYPE_UINT8_ARRAY,
31281 31442 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31282 31443 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31283 31444 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31284 31445 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31285 31446 NULL);
31286 31447 return;
31287 31448 }
31288 31449
31289 31450 /*
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
31290 31451 * If there is un-expected/un-decodable data, we should post
31291 31452 * ereport.io.scsi.cmd.disk.dev.uderr.
31292 31453 * driver-assessment will be set based on parameter drv_assess.
31293 31454 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31294 31455 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31295 31456 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31296 31457 * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31297 31458 */
31298 31459 if (ssc->ssc_flags & ssc_invalid_flags) {
31299 31460 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
31300 - scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31461 + scsi_fm_ereport_post(un->un_sd, 0,
31301 31462 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
31302 31463 NULL, DDI_NOSLEEP, NULL,
31303 31464 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31304 31465 DEVID_IF_KNOWN(devid),
31305 31466 "driver-assessment", DATA_TYPE_STRING,
31306 31467 drv_assess == SD_FM_DRV_FATAL ?
31307 31468 "fail" : assessment,
31308 31469 "op-code", DATA_TYPE_UINT8, op_code,
31309 31470 "cdb", DATA_TYPE_UINT8_ARRAY,
31310 31471 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31311 31472 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31312 31473 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31313 31474 "pkt-stats", DATA_TYPE_UINT32,
31314 31475 uscsi_pkt_statistics,
31315 31476 "stat-code", DATA_TYPE_UINT8,
31316 31477 ssc->ssc_uscsi_cmd->uscsi_status,
31317 31478 "un-decode-info", DATA_TYPE_STRING,
31318 31479 ssc->ssc_info,
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
31319 31480 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31320 31481 senlen, sensep,
31321 31482 NULL);
31322 31483 } else {
31323 31484 /*
31324 31485 * For other type of invalid data, the
31325 31486 * un-decode-value field would be empty because the
31326 31487 * un-decodable content could be seen from upper
31327 31488 * level payload or inside un-decode-info.
31328 31489 */
31329 - scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31490 + scsi_fm_ereport_post(un->un_sd, 0,
31330 31491 NULL,
31331 31492 "cmd.disk.dev.uderr", uscsi_ena, devid,
31332 31493 NULL, DDI_NOSLEEP, NULL,
31333 31494 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31334 31495 DEVID_IF_KNOWN(devid),
31335 31496 "driver-assessment", DATA_TYPE_STRING,
31336 31497 drv_assess == SD_FM_DRV_FATAL ?
31337 31498 "fail" : assessment,
31338 31499 "op-code", DATA_TYPE_UINT8, op_code,
31339 31500 "cdb", DATA_TYPE_UINT8_ARRAY,
31340 31501 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31341 31502 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31342 31503 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31343 31504 "pkt-stats", DATA_TYPE_UINT32,
31344 31505 uscsi_pkt_statistics,
31345 31506 "stat-code", DATA_TYPE_UINT8,
31346 31507 ssc->ssc_uscsi_cmd->uscsi_status,
31347 31508 "un-decode-info", DATA_TYPE_STRING,
31348 31509 ssc->ssc_info,
31349 31510 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31350 31511 0, NULL,
31351 31512 NULL);
31352 31513 }
31353 31514 ssc->ssc_flags &= ~ssc_invalid_flags;
31354 31515 return;
31355 31516 }
31356 31517
31357 31518 if (uscsi_pkt_reason != CMD_CMPLT ||
31358 31519 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) {
31359 31520 /*
31360 31521 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
31361 31522 * set inside sd_start_cmds due to errors(bad packet or
31362 31523 * fatal transport error), we should take it as a
31363 31524 * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31364 31525 * driver-assessment will be set based on drv_assess.
31365 31526 * We will set devid to NULL because it is a transport
31366 31527 * error.
31367 31528 */
31368 31529 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
31369 31530 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
31370 31531
31371 - scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31532 + scsi_fm_ereport_post(un->un_sd, 0, NULL,
31372 31533 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
31373 31534 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31374 31535 DEVID_IF_KNOWN(devid),
31375 31536 "driver-assessment", DATA_TYPE_STRING,
31376 31537 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31377 31538 "op-code", DATA_TYPE_UINT8, op_code,
31378 31539 "cdb", DATA_TYPE_UINT8_ARRAY,
31379 31540 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31380 31541 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31381 31542 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
31382 31543 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31383 31544 NULL);
31384 31545 } else {
31385 31546 /*
31386 31547 * If we got here, we have a completed command, and we need
31387 31548 * to further investigate the sense data to see what kind
31388 31549 * of ereport we should post.
31389 31550 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR
31390 31551 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE".
31391 31552 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is
31392 31553 * KEY_MEDIUM_ERROR.
31393 31554 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise.
31394 31555 * driver-assessment will be set based on the parameter
31395 31556 * drv_assess.
31396 31557 */
31397 31558 if (senlen > 0) {
31398 31559 /*
31399 31560 * Here we have sense data available.
31400 31561 */
31401 31562 uint8_t sense_key = scsi_sense_key(sensep);
31402 31563 uint8_t sense_asc = scsi_sense_asc(sensep);
31403 31564 uint8_t sense_ascq = scsi_sense_ascq(sensep);
31404 31565
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
31405 31566 if (sense_key == KEY_RECOVERABLE_ERROR &&
31406 31567 sense_asc == 0x00 && sense_ascq == 0x1d)
31407 31568 return;
31408 31569
31409 31570 if (sense_key == KEY_MEDIUM_ERROR) {
31410 31571 /*
31411 31572 * driver-assessment should be "fatal" if
31412 31573 * drv_assess is SD_FM_DRV_FATAL.
31413 31574 */
31414 31575 scsi_fm_ereport_post(un->un_sd,
31415 - uscsi_path_instance, NULL,
31576 + 0, NULL,
31416 31577 "cmd.disk.dev.rqs.merr",
31417 31578 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
31418 31579 FM_VERSION, DATA_TYPE_UINT8,
31419 31580 FM_EREPORT_VERS0,
31420 31581 DEVID_IF_KNOWN(devid),
31421 31582 "driver-assessment",
31422 31583 DATA_TYPE_STRING,
31423 31584 drv_assess == SD_FM_DRV_FATAL ?
31424 31585 "fatal" : assessment,
31425 31586 "op-code",
31426 31587 DATA_TYPE_UINT8, op_code,
31427 31588 "cdb",
31428 31589 DATA_TYPE_UINT8_ARRAY, cdblen,
31429 31590 ssc->ssc_uscsi_cmd->uscsi_cdb,
31430 31591 "pkt-reason",
31431 31592 DATA_TYPE_UINT8, uscsi_pkt_reason,
31432 31593 "pkt-state",
31433 31594 DATA_TYPE_UINT8, uscsi_pkt_state,
31434 31595 "pkt-stats",
31435 31596 DATA_TYPE_UINT32,
31436 31597 uscsi_pkt_statistics,
31437 31598 "stat-code",
31438 31599 DATA_TYPE_UINT8,
31439 31600 ssc->ssc_uscsi_cmd->uscsi_status,
31440 31601 "key",
31441 31602 DATA_TYPE_UINT8,
31442 31603 scsi_sense_key(sensep),
31443 31604 "asc",
31444 31605 DATA_TYPE_UINT8,
31445 31606 scsi_sense_asc(sensep),
31446 31607 "ascq",
31447 31608 DATA_TYPE_UINT8,
31448 31609 scsi_sense_ascq(sensep),
31449 31610 "sense-data",
31450 31611 DATA_TYPE_UINT8_ARRAY,
31451 31612 senlen, sensep,
31452 31613 "lba",
31453 31614 DATA_TYPE_UINT64,
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
31454 31615 ssc->ssc_uscsi_info->ui_lba,
31455 31616 NULL);
31456 31617 } else {
31457 31618 /*
31458 31619 * if sense-key == 0x4(hardware
31459 31620 * error), driver-assessment should
31460 31621 * be "fatal" if drv_assess is
31461 31622 * SD_FM_DRV_FATAL.
31462 31623 */
31463 31624 scsi_fm_ereport_post(un->un_sd,
31464 - uscsi_path_instance, NULL,
31625 + 0, NULL,
31465 31626 "cmd.disk.dev.rqs.derr",
31466 31627 uscsi_ena, devid,
31467 31628 NULL, DDI_NOSLEEP, NULL,
31468 31629 FM_VERSION,
31469 31630 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31470 31631 DEVID_IF_KNOWN(devid),
31471 31632 "driver-assessment",
31472 31633 DATA_TYPE_STRING,
31473 31634 drv_assess == SD_FM_DRV_FATAL ?
31474 31635 (sense_key == 0x4 ?
31475 31636 "fatal" : "fail") : assessment,
31476 31637 "op-code",
31477 31638 DATA_TYPE_UINT8, op_code,
31478 31639 "cdb",
31479 31640 DATA_TYPE_UINT8_ARRAY, cdblen,
31480 31641 ssc->ssc_uscsi_cmd->uscsi_cdb,
31481 31642 "pkt-reason",
31482 31643 DATA_TYPE_UINT8, uscsi_pkt_reason,
31483 31644 "pkt-state",
31484 31645 DATA_TYPE_UINT8, uscsi_pkt_state,
31485 31646 "pkt-stats",
31486 31647 DATA_TYPE_UINT32,
31487 31648 uscsi_pkt_statistics,
31488 31649 "stat-code",
31489 31650 DATA_TYPE_UINT8,
31490 31651 ssc->ssc_uscsi_cmd->uscsi_status,
31491 31652 "key",
31492 31653 DATA_TYPE_UINT8,
31493 31654 scsi_sense_key(sensep),
31494 31655 "asc",
31495 31656 DATA_TYPE_UINT8,
31496 31657 scsi_sense_asc(sensep),
31497 31658 "ascq",
31498 31659 DATA_TYPE_UINT8,
31499 31660 scsi_sense_ascq(sensep),
31500 31661 "sense-data",
31501 31662 DATA_TYPE_UINT8_ARRAY,
31502 31663 senlen, sensep,
31503 31664 NULL);
31504 31665 }
31505 31666 } else {
31506 31667 /*
31507 31668 * For stat_code == STATUS_GOOD, this is not a
31508 31669 * hardware error.
|
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
31509 31670 */
31510 31671 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
31511 31672 return;
31512 31673
31513 31674 /*
31514 31675 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31515 31676 * stat-code but with sense data unavailable.
31516 31677 * driver-assessment will be set based on parameter
31517 31678 * drv_assess.
31518 31679 */
31519 - scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31680 + scsi_fm_ereport_post(un->un_sd, 0,
31520 31681 NULL,
31521 31682 "cmd.disk.dev.serr", uscsi_ena,
31522 31683 devid, NULL, DDI_NOSLEEP, NULL,
31523 31684 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31524 31685 DEVID_IF_KNOWN(devid),
31525 31686 "driver-assessment", DATA_TYPE_STRING,
31526 31687 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31527 31688 "op-code", DATA_TYPE_UINT8, op_code,
31528 31689 "cdb",
31529 31690 DATA_TYPE_UINT8_ARRAY,
31530 31691 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31531 31692 "pkt-reason",
31532 31693 DATA_TYPE_UINT8, uscsi_pkt_reason,
31533 31694 "pkt-state",
31534 31695 DATA_TYPE_UINT8, uscsi_pkt_state,
31535 31696 "pkt-stats",
31536 31697 DATA_TYPE_UINT32, uscsi_pkt_statistics,
31537 31698 "stat-code",
31538 31699 DATA_TYPE_UINT8,
31539 31700 ssc->ssc_uscsi_cmd->uscsi_status,
31540 31701 NULL);
31541 31702 }
31542 31703 }
31543 31704 }
31544 31705
31545 31706 /*
31546 31707 * Function: sd_ssc_extract_info
31547 31708 *
31548 31709 * Description: Extract information available to help generate ereport.
31549 31710 *
31550 31711 * Context: Kernel thread or interrupt context.
31551 31712 */
31552 31713 static void
31553 31714 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp,
31554 31715 struct buf *bp, struct sd_xbuf *xp)
31555 31716 {
31556 31717 size_t senlen = 0;
31557 31718 union scsi_cdb *cdbp;
31558 31719 int path_instance;
31559 31720 /*
31560 31721 * Need scsi_cdb_size array to determine the cdb length.
31561 31722 */
31562 31723 extern uchar_t scsi_cdb_size[];
31563 31724
31564 31725 ASSERT(un != NULL);
31565 31726 ASSERT(pktp != NULL);
31566 31727 ASSERT(bp != NULL);
31567 31728 ASSERT(xp != NULL);
31568 31729 ASSERT(ssc != NULL);
31569 31730 ASSERT(mutex_owned(SD_MUTEX(un)));
31570 31731
31571 31732 /*
31572 31733 * Transfer the cdb buffer pointer here.
31573 31734 */
31574 31735 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
31575 31736
31576 31737 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)];
31577 31738 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp;
31578 31739
31579 31740 /*
31580 31741 * Transfer the sense data buffer pointer if sense data is available,
31581 31742 * calculate the sense data length first.
31582 31743 */
31583 31744 if ((xp->xb_sense_state & STATE_XARQ_DONE) ||
31584 31745 (xp->xb_sense_state & STATE_ARQ_DONE)) {
31585 31746 /*
31586 31747 * For arq case, we will enter here.
31587 31748 */
31588 31749 if (xp->xb_sense_state & STATE_XARQ_DONE) {
31589 31750 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid;
31590 31751 } else {
31591 31752 senlen = SENSE_LENGTH;
31592 31753 }
31593 31754 } else {
31594 31755 /*
31595 31756 * For non-arq case, we will enter this branch.
31596 31757 */
31597 31758 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK &&
31598 31759 (xp->xb_sense_state & STATE_XFERRED_DATA)) {
31599 31760 senlen = SENSE_LENGTH - xp->xb_sense_resid;
31600 31761 }
31601 31762
31602 31763 }
31603 31764
31604 31765 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff);
31605 31766 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0;
31606 31767 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data;
31607 31768
31608 31769 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
31609 31770
31610 31771 /*
31611 31772 * Only transfer path_instance when scsi_pkt was properly allocated.
31612 31773 */
31613 31774 path_instance = pktp->pkt_path_instance;
31614 31775 if (scsi_pkt_allocated_correctly(pktp) && path_instance)
31615 31776 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance;
31616 31777 else
31617 31778 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0;
31618 31779
31619 31780 /*
31620 31781 * Copy in the other fields we may need when posting ereport.
31621 31782 */
31622 31783 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason;
31623 31784 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state;
31624 31785 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics;
31625 31786 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
31626 31787
31627 31788 /*
31628 31789 * For partially read/write command, we will not create ena
31629 31790 * in case of a successful command be reconized as recovered.
31630 31791 */
31631 31792 if ((pktp->pkt_reason == CMD_CMPLT) &&
31632 31793 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) &&
31633 31794 (senlen == 0)) {
31634 31795 return;
31635 31796 }
31636 31797
31637 31798 /*
31638 31799 * To associate ereports of a single command execution flow, we
31639 31800 * need a shared ena for a specific command.
31640 31801 */
31641 31802 if (xp->xb_ena == 0)
31642 31803 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1);
31643 31804 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena;
31644 31805 }
31645 31806
31646 31807
31647 31808 /*
31648 31809 * Function: sd_check_bdc_vpd
31649 31810 *
31650 31811 * Description: Query the optional INQUIRY VPD page 0xb1. If the device
31651 31812 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION
31652 31813 * RATE.
31653 31814 *
31654 31815 * Set the following based on RPM value:
31655 31816 * = 0 device is not solid state, non-rotational
31656 31817 * = 1 device is solid state, non-rotational
31657 31818 * > 1 device is not solid state, rotational
31658 31819 *
31659 31820 * Context: Kernel thread or interrupt context.
31660 31821 */
31661 31822
31662 31823 static void
31663 31824 sd_check_bdc_vpd(sd_ssc_t *ssc)
31664 31825 {
31665 31826 int rval = 0;
31666 31827 uchar_t *inqb1 = NULL;
31667 31828 size_t inqb1_len = MAX_INQUIRY_SIZE;
31668 31829 size_t inqb1_resid = 0;
31669 31830 struct sd_lun *un;
31670 31831
31671 31832 ASSERT(ssc != NULL);
31672 31833 un = ssc->ssc_un;
31673 31834 ASSERT(un != NULL);
31674 31835 ASSERT(!mutex_owned(SD_MUTEX(un)));
31675 31836
31676 31837 mutex_enter(SD_MUTEX(un));
31677 31838 un->un_f_is_rotational = TRUE;
31678 31839 un->un_f_is_solid_state = FALSE;
31679 31840
31680 31841 if (ISCD(un)) {
31681 31842 mutex_exit(SD_MUTEX(un));
31682 31843 return;
31683 31844 }
31684 31845
31685 31846 if (sd_check_vpd_page_support(ssc) == 0 &&
31686 31847 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) {
31687 31848 mutex_exit(SD_MUTEX(un));
31688 31849 /* collect page b1 data */
31689 31850 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP);
31690 31851
31691 31852 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len,
31692 31853 0x01, 0xB1, &inqb1_resid);
31693 31854
31694 31855 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) {
31695 31856 SD_TRACE(SD_LOG_COMMON, un,
31696 31857 "sd_check_bdc_vpd: \
31697 31858 successfully get VPD page: %x \
31698 31859 PAGE LENGTH: %x BYTE 4: %x \
31699 31860 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4],
31700 31861 inqb1[5]);
31701 31862
31702 31863 mutex_enter(SD_MUTEX(un));
31703 31864 /*
31704 31865 * Check the MEDIUM ROTATION RATE.
31705 31866 */
31706 31867 if (inqb1[4] == 0) {
31707 31868 if (inqb1[5] == 0) {
31708 31869 un->un_f_is_rotational = FALSE;
31709 31870 } else if (inqb1[5] == 1) {
31710 31871 un->un_f_is_rotational = FALSE;
31711 31872 un->un_f_is_solid_state = TRUE;
31712 31873 /*
31713 31874 * Solid state drives don't need
31714 31875 * disksort.
31715 31876 */
31716 31877 un->un_f_disksort_disabled = TRUE;
31717 31878 }
31718 31879 }
31719 31880 mutex_exit(SD_MUTEX(un));
31720 31881 } else if (rval != 0) {
31721 31882 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31722 31883 }
31723 31884
31724 31885 kmem_free(inqb1, inqb1_len);
31725 31886 } else {
31726 31887 mutex_exit(SD_MUTEX(un));
31727 31888 }
31728 31889 }
31729 31890
31730 31891 /*
31731 31892 * Function: sd_check_emulation_mode
31732 31893 *
31733 31894 * Description: Check whether the SSD is at emulation mode
31734 31895 * by issuing READ_CAPACITY_16 to see whether
31735 31896 * we can get physical block size of the drive.
31736 31897 *
31737 31898 * Context: Kernel thread or interrupt context.
31738 31899 */
31739 31900
31740 31901 static void
31741 31902 sd_check_emulation_mode(sd_ssc_t *ssc)
31742 31903 {
31743 31904 int rval = 0;
31744 31905 uint64_t capacity;
31745 31906 uint_t lbasize;
31746 31907 uint_t pbsize;
31747 31908 int i;
31748 31909 int devid_len;
31749 31910 struct sd_lun *un;
31750 31911
31751 31912 ASSERT(ssc != NULL);
31752 31913 un = ssc->ssc_un;
31753 31914 ASSERT(un != NULL);
31754 31915 ASSERT(!mutex_owned(SD_MUTEX(un)));
31755 31916
31756 31917 mutex_enter(SD_MUTEX(un));
31757 31918 if (ISCD(un)) {
31758 31919 mutex_exit(SD_MUTEX(un));
31759 31920 return;
31760 31921 }
31761 31922
31762 31923 if (un->un_f_descr_format_supported) {
|
↓ open down ↓ |
233 lines elided |
↑ open up ↑ |
31763 31924 mutex_exit(SD_MUTEX(un));
31764 31925 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
31765 31926 &pbsize, SD_PATH_DIRECT);
31766 31927 mutex_enter(SD_MUTEX(un));
31767 31928
31768 31929 if (rval != 0) {
31769 31930 un->un_phy_blocksize = DEV_BSIZE;
31770 31931 } else {
31771 31932 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
31772 31933 un->un_phy_blocksize = DEV_BSIZE;
31773 - } else if (pbsize > un->un_phy_blocksize) {
31934 + } else if (pbsize > un->un_phy_blocksize &&
31935 + !un->un_f_sdconf_phy_blocksize) {
31774 31936 /*
31775 - * Don't reset the physical blocksize
31776 - * unless we've detected a larger value.
31937 + * Reset the physical block size
31938 + * if we've detected a larger value and
31939 + * we didn't already set the physical
31940 + * block size in sd.conf
31777 31941 */
31778 31942 un->un_phy_blocksize = pbsize;
31779 31943 }
31780 31944 }
31781 31945 }
31782 31946
31783 31947 for (i = 0; i < sd_flash_dev_table_size; i++) {
31784 31948 devid_len = (int)strlen(sd_flash_dev_table[i]);
31785 31949 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
31786 31950 == SD_SUCCESS) {
31787 31951 un->un_phy_blocksize = SSD_SECSIZE;
31788 31952 if (un->un_f_is_solid_state &&
31789 31953 un->un_phy_blocksize != un->un_tgt_blocksize)
31790 31954 un->un_f_enable_rmw = TRUE;
31791 31955 }
31792 31956 }
31793 31957
31794 31958 mutex_exit(SD_MUTEX(un));
31795 31959 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX