Print this page
re #7364 rb2201 "hddisco" hangs after unplugging both cables from JBOD (and NMS too)
re #8346 rb2639 KT disk failures
re #8346 rb2639 KT disk failures
re #10443 rb3479 3.1.3 crash: BAD TRAP: type=e (#pf Page fault)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2000 to 2010, LSI Corporation.
29 29 * All rights reserved.
30 30 *
31 31 * Redistribution and use in source and binary forms of all code within
32 32 * this file that is exclusively owned by LSI, with or without
33 33 * modification, is permitted provided that, in addition to the CDDL 1.0
34 34 * License requirements, the following conditions are met:
35 35 *
36 36 * Neither the name of the author nor the names of its contributors may be
37 37 * used to endorse or promote products derived from this software without
38 38 * specific prior written permission.
39 39 *
40 40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 44 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47 47 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 51 * DAMAGE.
52 52 */
53 53
54 54 /*
55 55 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
56 56 *
57 57 */
58 58
59 59 #if defined(lint) || defined(DEBUG)
60 60 #define MPTSAS_DEBUG
61 61 #endif
62 62
63 63 /*
64 64 * standard header files.
65 65 */
66 66 #include <sys/note.h>
67 67 #include <sys/scsi/scsi.h>
68 68 #include <sys/pci.h>
69 69 #include <sys/file.h>
70 70 #include <sys/policy.h>
71 71 #include <sys/sysevent.h>
72 72 #include <sys/sysevent/eventdefs.h>
73 73 #include <sys/sysevent/dr.h>
74 74 #include <sys/sata/sata_defs.h>
75 75 #include <sys/scsi/generic/sas.h>
76 76 #include <sys/scsi/impl/scsi_sas.h>
77 77
78 78 #pragma pack(1)
79 79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
80 80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
81 81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
82 82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
83 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
84 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
85 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
86 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
87 87 #pragma pack()
88 88
89 89 /*
90 90 * private header files.
91 91 *
92 92 */
93 93 #include <sys/scsi/impl/scsi_reset_notify.h>
94 94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
95 95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
96 96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
97 97 #include <sys/raidioctl.h>
98 98
99 99 #include <sys/fs/dv_node.h> /* devfs_clean */
100 100
101 101 /*
102 102 * FMA header files
103 103 */
104 104 #include <sys/ddifm.h>
105 105 #include <sys/fm/protocol.h>
106 106 #include <sys/fm/util.h>
107 107 #include <sys/fm/io/ddi.h>
108 108
109 109 /*
110 110 * autoconfiguration data and routines.
111 111 */
112 112 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
113 113 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
114 114 static int mptsas_power(dev_info_t *dip, int component, int level);
115 115
116 116 /*
117 117 * cb_ops function
118 118 */
119 119 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
120 120 cred_t *credp, int *rval);
121 121 #ifdef __sparc
122 122 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
123 123 #else /* __sparc */
124 124 static int mptsas_quiesce(dev_info_t *devi);
125 125 #endif /* __sparc */
126 126
127 127 /*
128 128 * Resource initilaization for hardware
129 129 */
130 130 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
131 131 static void mptsas_disable_bus_master(mptsas_t *mpt);
132 132 static void mptsas_hba_fini(mptsas_t *mpt);
133 133 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
134 134 static int mptsas_hba_setup(mptsas_t *mpt);
135 135 static void mptsas_hba_teardown(mptsas_t *mpt);
136 136 static int mptsas_config_space_init(mptsas_t *mpt);
137 137 static void mptsas_config_space_fini(mptsas_t *mpt);
138 138 static void mptsas_iport_register(mptsas_t *mpt);
139 139 static int mptsas_smp_setup(mptsas_t *mpt);
140 140 static void mptsas_smp_teardown(mptsas_t *mpt);
141 141 static int mptsas_cache_create(mptsas_t *mpt);
142 142 static void mptsas_cache_destroy(mptsas_t *mpt);
143 143 static int mptsas_alloc_request_frames(mptsas_t *mpt);
144 144 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
145 145 static int mptsas_alloc_free_queue(mptsas_t *mpt);
146 146 static int mptsas_alloc_post_queue(mptsas_t *mpt);
147 147 static void mptsas_alloc_reply_args(mptsas_t *mpt);
148 148 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
149 149 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
150 150 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
151 151
152 152 /*
153 153 * SCSA function prototypes
154 154 */
155 155 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
156 156 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
157 157 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
158 158 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
159 159 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
160 160 int tgtonly);
161 161 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
162 162 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
163 163 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
164 164 int tgtlen, int flags, int (*callback)(), caddr_t arg);
165 165 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
166 166 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
167 167 struct scsi_pkt *pkt);
168 168 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
169 169 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
170 170 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 171 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
172 172 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
173 173 void (*callback)(caddr_t), caddr_t arg);
174 174 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
175 175 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
176 176 static int mptsas_scsi_quiesce(dev_info_t *dip);
177 177 static int mptsas_scsi_unquiesce(dev_info_t *dip);
178 178 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
179 179 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
180 180
181 181 /*
182 182 * SMP functions
183 183 */
184 184 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
185 185
186 186 /*
187 187 * internal function prototypes.
188 188 */
189 189 static void mptsas_list_add(mptsas_t *mpt);
190 190 static void mptsas_list_del(mptsas_t *mpt);
191 191
192 192 static int mptsas_quiesce_bus(mptsas_t *mpt);
193 193 static int mptsas_unquiesce_bus(mptsas_t *mpt);
194 194
195 195 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
196 196 static void mptsas_free_handshake_msg(mptsas_t *mpt);
197 197
198 198 static void mptsas_ncmds_checkdrain(void *arg);
199 199
200 200 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
201 201 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
202 202 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
203 203 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
204 204
205 205 static int mptsas_do_detach(dev_info_t *dev);
206 206 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
207 207 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
208 208 struct scsi_pkt *pkt);
209 209 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
210 210
211 211 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
212 212 static void mptsas_handle_event(void *args);
213 213 static int mptsas_handle_event_sync(void *args);
214 214 static void mptsas_handle_dr(void *args);
215 215 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
216 216 dev_info_t *pdip);
217 217
218 218 static void mptsas_restart_cmd(void *);
219 219
220 220 static void mptsas_flush_hba(mptsas_t *mpt);
221 221 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
222 222 uint8_t tasktype);
223 223 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
224 224 uchar_t reason, uint_t stat);
225 225
226 226 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
227 227 static void mptsas_process_intr(mptsas_t *mpt,
228 228 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
229 229 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
|
↓ open down ↓ |
229 lines elided |
↑ open up ↑ |
230 230 pMpi2ReplyDescriptorsUnion_t reply_desc);
231 231 static void mptsas_handle_address_reply(mptsas_t *mpt,
232 232 pMpi2ReplyDescriptorsUnion_t reply_desc);
233 233 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
234 234 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
235 235 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
236 236
237 237 static void mptsas_watch(void *arg);
238 238 static void mptsas_watchsubr(mptsas_t *mpt);
239 239 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
240 +static void mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt);
240 241
241 242 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
242 243 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
243 244 uint8_t *data, uint32_t request_size, uint32_t reply_size,
244 245 uint32_t data_size, uint32_t direction, uint8_t *dataout,
245 246 uint32_t dataout_size, short timeout, int mode);
246 247 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
247 248
248 249 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
249 250 uint32_t unique_id);
250 251 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
251 252 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
252 253 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
253 254 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
254 255 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
255 256 uint32_t diag_type);
256 257 static int mptsas_diag_register(mptsas_t *mpt,
257 258 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
258 259 static int mptsas_diag_unregister(mptsas_t *mpt,
259 260 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
260 261 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
261 262 uint32_t *return_code);
262 263 static int mptsas_diag_read_buffer(mptsas_t *mpt,
263 264 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
264 265 uint32_t *return_code, int ioctl_mode);
265 266 static int mptsas_diag_release(mptsas_t *mpt,
266 267 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
267 268 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
268 269 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
269 270 int ioctl_mode);
270 271 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
271 272 int mode);
272 273
273 274 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
274 275 int cmdlen, int tgtlen, int statuslen, int kf);
275 276 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
276 277
277 278 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
278 279 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
279 280
280 281 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
281 282 int kmflags);
282 283 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
283 284
284 285 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
285 286 mptsas_cmd_t *cmd);
286 287 static void mptsas_check_task_mgt(mptsas_t *mpt,
287 288 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
288 289 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
289 290 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
290 291 int *resid);
291 292
292 293 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
293 294 static void mptsas_free_active_slots(mptsas_t *mpt);
294 295 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
295 296
296 297 static void mptsas_restart_hba(mptsas_t *mpt);
297 298 static void mptsas_restart_waitq(mptsas_t *mpt);
298 299
299 300 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
300 301 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
301 302 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
302 303
303 304 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
304 305 static void mptsas_doneq_empty(mptsas_t *mpt);
305 306 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
306 307
307 308 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
308 309 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
309 310 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
310 311 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
311 312
312 313
313 314 static void mptsas_start_watch_reset_delay();
314 315 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
315 316 static void mptsas_watch_reset_delay(void *arg);
316 317 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
317 318
318 319 /*
319 320 * helper functions
320 321 */
321 322 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
322 323
323 324 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
324 325 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
325 326 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
326 327 int lun);
327 328 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
328 329 int lun);
329 330 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
330 331 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
331 332
332 333 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
333 334 int *lun);
334 335 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
335 336
336 337 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
337 338 uint8_t phy);
338 339 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
339 340 uint64_t wwid);
|
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
340 341 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
341 342 uint64_t wwid);
342 343
343 344 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
344 345 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
345 346
346 347 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
347 348 uint16_t *handle, mptsas_target_t **pptgt);
348 349 static void mptsas_update_phymask(mptsas_t *mpt);
349 350
350 -static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
351 - uint32_t *status, uint8_t cmd);
352 351 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
353 352 mptsas_phymask_t *phymask);
354 353 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
355 354 mptsas_phymask_t phymask);
356 -static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
357 - uint32_t slotstatus);
358 355
359 356
360 357 /*
361 358 * Enumeration / DR functions
362 359 */
363 360 static void mptsas_config_all(dev_info_t *pdip);
364 361 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
365 362 dev_info_t **lundip);
366 363 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
367 364 dev_info_t **lundip);
368 365
369 366 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
370 367 static int mptsas_offline_target(dev_info_t *pdip, char *name);
371 368
372 369 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
373 370 dev_info_t **dip);
374 371
375 372 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
376 373 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
377 374 dev_info_t **dip, mptsas_target_t *ptgt);
378 375
379 376 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
380 377 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
381 378
382 379 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
383 380 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
384 381 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
385 382 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
386 383 int lun);
387 384
388 385 static void mptsas_offline_missed_luns(dev_info_t *pdip,
389 386 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
390 387 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
391 388 mdi_pathinfo_t *rpip, uint_t flags);
392 389
393 390 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
394 391 dev_info_t **smp_dip);
395 392 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
396 393 uint_t flags);
397 394
398 395 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
399 396 int mode, int *rval);
400 397 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
401 398 int mode, int *rval);
402 399 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
403 400 int mode, int *rval);
404 401 static void mptsas_record_event(void *args);
405 402 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
406 403 int mode);
407 404
408 405 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
409 406 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
410 407 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
411 408 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
412 409 mptsas_phymask_t key2);
413 410 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
414 411 mptsas_phymask_t key2);
415 412 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
416 413
417 414 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
418 415 uint32_t, mptsas_phymask_t, uint8_t);
419 416 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
420 417 mptsas_smp_t *data);
421 418 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
422 419 mptsas_phymask_t phymask);
423 420 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
424 421 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
425 422 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
426 423 dev_info_t **smp_dip);
427 424
428 425 /*
429 426 * Power management functions
430 427 */
431 428 static int mptsas_get_pci_cap(mptsas_t *mpt);
432 429 static int mptsas_init_pm(mptsas_t *mpt);
433 430
434 431 /*
435 432 * MPT MSI tunable:
436 433 *
437 434 * By default MSI is enabled on all supported platforms.
438 435 */
439 436 boolean_t mptsas_enable_msi = B_TRUE;
440 437 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
441 438
442 439 static int mptsas_register_intrs(mptsas_t *);
443 440 static void mptsas_unregister_intrs(mptsas_t *);
444 441 static int mptsas_add_intrs(mptsas_t *, int);
445 442 static void mptsas_rem_intrs(mptsas_t *);
446 443
447 444 /*
448 445 * FMA Prototypes
449 446 */
450 447 static void mptsas_fm_init(mptsas_t *mpt);
451 448 static void mptsas_fm_fini(mptsas_t *mpt);
452 449 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
453 450
454 451 extern pri_t minclsyspri, maxclsyspri;
455 452
456 453 /*
457 454 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
|
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
458 455 * under this device that the paths to a physical device are created when
459 456 * MPxIO is used.
460 457 */
461 458 extern dev_info_t *scsi_vhci_dip;
462 459
463 460 /*
464 461 * Tunable timeout value for Inquiry VPD page 0x83
465 462 * By default the value is 30 seconds.
466 463 */
467 464 int mptsas_inq83_retry_timeout = 30;
465 +/*
466 + * Maximum number of command timeouts (0 - 255) considered acceptable.
467 + */
468 +int mptsas_timeout_threshold = 2;
469 +/*
470 + * Timeouts exceeding threshold within this period are considered excessive.
471 + */
472 +int mptsas_timeout_interval = 30;
468 473
469 474 /*
470 475 * This is used to allocate memory for message frame storage, not for
471 476 * data I/O DMA. All message frames must be stored in the first 4G of
472 477 * physical memory.
473 478 */
474 479 ddi_dma_attr_t mptsas_dma_attrs = {
475 480 DMA_ATTR_V0, /* attribute layout version */
476 481 0x0ull, /* address low - should be 0 (longlong) */
477 482 0xffffffffull, /* address high - 32-bit max range */
478 483 0x00ffffffull, /* count max - max DMA object size */
479 484 4, /* allocation alignment requirements */
480 485 0x78, /* burstsizes - binary encoded values */
481 486 1, /* minxfer - gran. of DMA engine */
482 487 0x00ffffffull, /* maxxfer - gran. of DMA engine */
483 488 0xffffffffull, /* max segment size (DMA boundary) */
484 489 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
485 490 512, /* granularity - device transfer size */
486 491 0 /* flags, set to 0 */
487 492 };
488 493
489 494 /*
490 495 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
491 496 * physical addresses are supported.)
492 497 */
493 498 ddi_dma_attr_t mptsas_dma_attrs64 = {
494 499 DMA_ATTR_V0, /* attribute layout version */
495 500 0x0ull, /* address low - should be 0 (longlong) */
496 501 0xffffffffffffffffull, /* address high - 64-bit max */
497 502 0x00ffffffull, /* count max - max DMA object size */
498 503 4, /* allocation alignment requirements */
499 504 0x78, /* burstsizes - binary encoded values */
500 505 1, /* minxfer - gran. of DMA engine */
501 506 0x00ffffffull, /* maxxfer - gran. of DMA engine */
502 507 0xffffffffull, /* max segment size (DMA boundary) */
503 508 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
504 509 512, /* granularity - device transfer size */
505 510 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
506 511 };
507 512
508 513 ddi_device_acc_attr_t mptsas_dev_attr = {
509 514 DDI_DEVICE_ATTR_V1,
510 515 DDI_STRUCTURE_LE_ACC,
511 516 DDI_STRICTORDER_ACC,
512 517 DDI_DEFAULT_ACC
513 518 };
514 519
515 520 static struct cb_ops mptsas_cb_ops = {
516 521 scsi_hba_open, /* open */
517 522 scsi_hba_close, /* close */
518 523 nodev, /* strategy */
519 524 nodev, /* print */
520 525 nodev, /* dump */
521 526 nodev, /* read */
522 527 nodev, /* write */
523 528 mptsas_ioctl, /* ioctl */
524 529 nodev, /* devmap */
525 530 nodev, /* mmap */
526 531 nodev, /* segmap */
527 532 nochpoll, /* chpoll */
528 533 ddi_prop_op, /* cb_prop_op */
529 534 NULL, /* streamtab */
530 535 D_MP, /* cb_flag */
531 536 CB_REV, /* rev */
532 537 nodev, /* aread */
533 538 nodev /* awrite */
534 539 };
535 540
536 541 static struct dev_ops mptsas_ops = {
537 542 DEVO_REV, /* devo_rev, */
538 543 0, /* refcnt */
539 544 ddi_no_info, /* info */
540 545 nulldev, /* identify */
541 546 nulldev, /* probe */
542 547 mptsas_attach, /* attach */
543 548 mptsas_detach, /* detach */
544 549 #ifdef __sparc
545 550 mptsas_reset,
546 551 #else
547 552 nodev, /* reset */
548 553 #endif /* __sparc */
549 554 &mptsas_cb_ops, /* driver operations */
550 555 NULL, /* bus operations */
551 556 mptsas_power, /* power management */
552 557 #ifdef __sparc
553 558 ddi_quiesce_not_needed
554 559 #else
555 560 mptsas_quiesce /* quiesce */
556 561 #endif /* __sparc */
557 562 };
558 563
559 564
560 565 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
561 566
562 567 static struct modldrv modldrv = {
563 568 &mod_driverops, /* Type of module. This one is a driver */
564 569 MPTSAS_MOD_STRING, /* Name of the module. */
565 570 &mptsas_ops, /* driver ops */
566 571 };
567 572
568 573 static struct modlinkage modlinkage = {
569 574 MODREV_1, &modldrv, NULL
570 575 };
571 576 #define TARGET_PROP "target"
572 577 #define LUN_PROP "lun"
573 578 #define LUN64_PROP "lun64"
574 579 #define SAS_PROP "sas-mpt"
575 580 #define MDI_GUID "wwn"
576 581 #define NDI_GUID "guid"
577 582 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
578 583
579 584 /*
580 585 * Local static data
581 586 */
582 587 #if defined(MPTSAS_DEBUG)
583 588 uint32_t mptsas_debug_flags = 0;
584 589 #endif /* defined(MPTSAS_DEBUG) */
585 590 uint32_t mptsas_debug_resets = 0;
586 591
587 592 static kmutex_t mptsas_global_mutex;
588 593 static void *mptsas_state; /* soft state ptr */
589 594 static krwlock_t mptsas_global_rwlock;
590 595
591 596 static kmutex_t mptsas_log_mutex;
592 597 static char mptsas_log_buf[256];
593 598 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
594 599
595 600 static mptsas_t *mptsas_head, *mptsas_tail;
596 601 static clock_t mptsas_scsi_watchdog_tick;
597 602 static clock_t mptsas_tick;
598 603 static timeout_id_t mptsas_reset_watch;
599 604 static timeout_id_t mptsas_timeout_id;
600 605 static int mptsas_timeouts_enabled = 0;
601 606 /*
602 607 * warlock directives
603 608 */
604 609 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
605 610 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
606 611 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
607 612 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
608 613 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
609 614 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
610 615
611 616 /*
612 617 * SM - HBA statics
613 618 */
614 619 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
615 620
616 621 #ifdef MPTSAS_DEBUG
617 622 void debug_enter(char *);
618 623 #endif
619 624
620 625 /*
621 626 * Notes:
622 627 * - scsi_hba_init(9F) initializes SCSI HBA modules
623 628 * - must call scsi_hba_fini(9F) if modload() fails
624 629 */
625 630 int
626 631 _init(void)
627 632 {
628 633 int status;
629 634 /* CONSTCOND */
630 635 ASSERT(NO_COMPETING_THREADS);
631 636
632 637 NDBG0(("_init"));
633 638
634 639 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
635 640 MPTSAS_INITIAL_SOFT_SPACE);
636 641 if (status != 0) {
637 642 return (status);
638 643 }
639 644
640 645 if ((status = scsi_hba_init(&modlinkage)) != 0) {
641 646 ddi_soft_state_fini(&mptsas_state);
642 647 return (status);
643 648 }
644 649
645 650 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
646 651 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
647 652 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
648 653
649 654 if ((status = mod_install(&modlinkage)) != 0) {
650 655 mutex_destroy(&mptsas_log_mutex);
651 656 rw_destroy(&mptsas_global_rwlock);
652 657 mutex_destroy(&mptsas_global_mutex);
653 658 ddi_soft_state_fini(&mptsas_state);
654 659 scsi_hba_fini(&modlinkage);
655 660 }
656 661
657 662 return (status);
658 663 }
659 664
660 665 /*
661 666 * Notes:
662 667 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
663 668 */
664 669 int
665 670 _fini(void)
666 671 {
667 672 int status;
668 673 /* CONSTCOND */
669 674 ASSERT(NO_COMPETING_THREADS);
670 675
671 676 NDBG0(("_fini"));
672 677
673 678 if ((status = mod_remove(&modlinkage)) == 0) {
674 679 ddi_soft_state_fini(&mptsas_state);
675 680 scsi_hba_fini(&modlinkage);
676 681 mutex_destroy(&mptsas_global_mutex);
677 682 rw_destroy(&mptsas_global_rwlock);
678 683 mutex_destroy(&mptsas_log_mutex);
679 684 }
680 685 return (status);
681 686 }
682 687
683 688 /*
684 689 * The loadable-module _info(9E) entry point
685 690 */
686 691 int
687 692 _info(struct modinfo *modinfop)
688 693 {
689 694 /* CONSTCOND */
690 695 ASSERT(NO_COMPETING_THREADS);
691 696 NDBG0(("mptsas _info"));
692 697
693 698 return (mod_info(&modlinkage, modinfop));
694 699 }
695 700
696 701
697 702 static int
698 703 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
699 704 {
700 705 dev_info_t *pdip;
701 706 mptsas_t *mpt;
702 707 scsi_hba_tran_t *hba_tran;
703 708 char *iport = NULL;
704 709 char phymask[MPTSAS_MAX_PHYS];
705 710 mptsas_phymask_t phy_mask = 0;
706 711 int dynamic_port = 0;
707 712 uint32_t page_address;
708 713 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
709 714 int rval = DDI_FAILURE;
710 715 int i = 0;
711 716 uint8_t numphys = 0;
712 717 uint8_t phy_id;
713 718 uint8_t phy_port = 0;
714 719 uint16_t attached_devhdl = 0;
715 720 uint32_t dev_info;
716 721 uint64_t attached_sas_wwn;
717 722 uint16_t dev_hdl;
718 723 uint16_t pdev_hdl;
719 724 uint16_t bay_num, enclosure;
720 725 char attached_wwnstr[MPTSAS_WWN_STRLEN];
721 726
722 727 /* CONSTCOND */
723 728 ASSERT(NO_COMPETING_THREADS);
724 729
725 730 switch (cmd) {
726 731 case DDI_ATTACH:
727 732 break;
728 733
729 734 case DDI_RESUME:
730 735 /*
731 736 * If this a scsi-iport node, nothing to do here.
732 737 */
733 738 return (DDI_SUCCESS);
734 739
735 740 default:
736 741 return (DDI_FAILURE);
737 742 }
738 743
739 744 pdip = ddi_get_parent(dip);
740 745
741 746 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
742 747 NULL) {
743 748 cmn_err(CE_WARN, "Failed attach iport because fail to "
744 749 "get tran vector for the HBA node");
745 750 return (DDI_FAILURE);
746 751 }
747 752
748 753 mpt = TRAN2MPT(hba_tran);
749 754 ASSERT(mpt != NULL);
750 755 if (mpt == NULL)
751 756 return (DDI_FAILURE);
752 757
753 758 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
754 759 NULL) {
755 760 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
756 761 "get tran vector for the iport node");
757 762 return (DDI_FAILURE);
758 763 }
759 764
760 765 /*
761 766 * Overwrite parent's tran_hba_private to iport's tran vector
762 767 */
763 768 hba_tran->tran_hba_private = mpt;
764 769
765 770 ddi_report_dev(dip);
766 771
767 772 /*
768 773 * Get SAS address for initiator port according dev_handle
769 774 */
770 775 iport = ddi_get_name_addr(dip);
771 776 if (iport && strncmp(iport, "v0", 2) == 0) {
772 777 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
773 778 MPTSAS_VIRTUAL_PORT, 1) !=
774 779 DDI_PROP_SUCCESS) {
775 780 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
776 781 MPTSAS_VIRTUAL_PORT);
777 782 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
778 783 "prop update failed");
779 784 return (DDI_FAILURE);
780 785 }
781 786 return (DDI_SUCCESS);
782 787 }
783 788
784 789 mutex_enter(&mpt->m_mutex);
785 790 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
786 791 bzero(phymask, sizeof (phymask));
787 792 (void) sprintf(phymask,
788 793 "%x", mpt->m_phy_info[i].phy_mask);
789 794 if (strcmp(phymask, iport) == 0) {
790 795 break;
791 796 }
792 797 }
793 798
794 799 if (i == MPTSAS_MAX_PHYS) {
795 800 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
796 801 "seems not exist", iport);
797 802 mutex_exit(&mpt->m_mutex);
798 803 return (DDI_FAILURE);
799 804 }
800 805
801 806 phy_mask = mpt->m_phy_info[i].phy_mask;
802 807
803 808 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
804 809 dynamic_port = 1;
805 810 else
806 811 dynamic_port = 0;
807 812
808 813 /*
809 814 * Update PHY info for smhba
810 815 */
811 816 if (mptsas_smhba_phy_init(mpt)) {
812 817 mutex_exit(&mpt->m_mutex);
813 818 mptsas_log(mpt, CE_WARN, "mptsas phy update "
814 819 "failed");
815 820 return (DDI_FAILURE);
816 821 }
817 822
818 823 mutex_exit(&mpt->m_mutex);
819 824
820 825 numphys = 0;
821 826 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
822 827 if ((phy_mask >> i) & 0x01) {
823 828 numphys++;
824 829 }
825 830 }
826 831
827 832 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
828 833 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
829 834 mpt->un.m_base_wwid);
830 835
831 836 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
832 837 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
833 838 DDI_PROP_SUCCESS) {
834 839 (void) ddi_prop_remove(DDI_DEV_T_NONE,
835 840 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
836 841 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
837 842 "prop update failed");
838 843 return (DDI_FAILURE);
839 844 }
840 845 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
841 846 MPTSAS_NUM_PHYS, numphys) !=
842 847 DDI_PROP_SUCCESS) {
843 848 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
844 849 return (DDI_FAILURE);
845 850 }
846 851
847 852 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
848 853 "phymask", phy_mask) !=
849 854 DDI_PROP_SUCCESS) {
850 855 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
851 856 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
852 857 "prop update failed");
853 858 return (DDI_FAILURE);
854 859 }
855 860
856 861 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
857 862 "dynamic-port", dynamic_port) !=
858 863 DDI_PROP_SUCCESS) {
859 864 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
860 865 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
861 866 "prop update failed");
862 867 return (DDI_FAILURE);
863 868 }
864 869 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
865 870 MPTSAS_VIRTUAL_PORT, 0) !=
866 871 DDI_PROP_SUCCESS) {
867 872 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
868 873 MPTSAS_VIRTUAL_PORT);
869 874 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
870 875 "prop update failed");
871 876 return (DDI_FAILURE);
872 877 }
873 878 mptsas_smhba_set_phy_props(mpt,
874 879 iport, dip, numphys, &attached_devhdl);
875 880
876 881 mutex_enter(&mpt->m_mutex);
877 882 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
878 883 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
879 884 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
880 885 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
881 886 &pdev_hdl, &bay_num, &enclosure);
882 887 if (rval != DDI_SUCCESS) {
883 888 mptsas_log(mpt, CE_WARN,
884 889 "Failed to get device page0 for handle:%d",
885 890 attached_devhdl);
886 891 mutex_exit(&mpt->m_mutex);
887 892 return (DDI_FAILURE);
888 893 }
889 894
890 895 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
891 896 bzero(phymask, sizeof (phymask));
892 897 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
893 898 if (strcmp(phymask, iport) == 0) {
894 899 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
895 900 "%x",
896 901 mpt->m_phy_info[i].phy_mask);
897 902 }
898 903 }
899 904 mutex_exit(&mpt->m_mutex);
900 905
901 906 bzero(attached_wwnstr, sizeof (attached_wwnstr));
902 907 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
903 908 attached_sas_wwn);
904 909 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
905 910 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
906 911 DDI_PROP_SUCCESS) {
907 912 (void) ddi_prop_remove(DDI_DEV_T_NONE,
908 913 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
909 914 return (DDI_FAILURE);
910 915 }
911 916
912 917 /* Create kstats for each phy on this iport */
913 918
914 919 mptsas_create_phy_stats(mpt, iport, dip);
915 920
916 921 /*
917 922 * register sas hba iport with mdi (MPxIO/vhci)
918 923 */
919 924 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
920 925 dip, 0) == MDI_SUCCESS) {
921 926 mpt->m_mpxio_enable = TRUE;
922 927 }
923 928 return (DDI_SUCCESS);
924 929 }
925 930
926 931 /*
927 932 * Notes:
928 933 * Set up all device state and allocate data structures,
929 934 * mutexes, condition variables, etc. for device operation.
930 935 * Add interrupts needed.
931 936 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
932 937 */
933 938 static int
934 939 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
935 940 {
936 941 mptsas_t *mpt = NULL;
937 942 int instance, i, j;
938 943 int doneq_thread_num;
939 944 char intr_added = 0;
940 945 char map_setup = 0;
941 946 char config_setup = 0;
942 947 char hba_attach_setup = 0;
943 948 char smp_attach_setup = 0;
944 949 char mutex_init_done = 0;
945 950 char event_taskq_create = 0;
946 951 char dr_taskq_create = 0;
947 952 char doneq_thread_create = 0;
948 953 scsi_hba_tran_t *hba_tran;
949 954 uint_t mem_bar = MEM_SPACE;
950 955 int rval = DDI_FAILURE;
951 956
952 957 /* CONSTCOND */
953 958 ASSERT(NO_COMPETING_THREADS);
954 959
955 960 if (scsi_hba_iport_unit_address(dip)) {
956 961 return (mptsas_iport_attach(dip, cmd));
957 962 }
958 963
959 964 switch (cmd) {
960 965 case DDI_ATTACH:
961 966 break;
962 967
963 968 case DDI_RESUME:
964 969 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
965 970 return (DDI_FAILURE);
966 971
967 972 mpt = TRAN2MPT(hba_tran);
968 973
969 974 if (!mpt) {
970 975 return (DDI_FAILURE);
971 976 }
972 977
973 978 /*
974 979 * Reset hardware and softc to "no outstanding commands"
975 980 * Note that a check condition can result on first command
976 981 * to a target.
977 982 */
978 983 mutex_enter(&mpt->m_mutex);
979 984
980 985 /*
981 986 * raise power.
982 987 */
983 988 if (mpt->m_options & MPTSAS_OPT_PM) {
984 989 mutex_exit(&mpt->m_mutex);
985 990 (void) pm_busy_component(dip, 0);
986 991 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
987 992 if (rval == DDI_SUCCESS) {
988 993 mutex_enter(&mpt->m_mutex);
989 994 } else {
990 995 /*
991 996 * The pm_raise_power() call above failed,
992 997 * and that can only occur if we were unable
993 998 * to reset the hardware. This is probably
994 999 * due to unhealty hardware, and because
995 1000 * important filesystems(such as the root
996 1001 * filesystem) could be on the attached disks,
997 1002 * it would not be a good idea to continue,
998 1003 * as we won't be entirely certain we are
999 1004 * writing correct data. So we panic() here
1000 1005 * to not only prevent possible data corruption,
1001 1006 * but to give developers or end users a hope
1002 1007 * of identifying and correcting any problems.
1003 1008 */
1004 1009 fm_panic("mptsas could not reset hardware "
1005 1010 "during resume");
1006 1011 }
1007 1012 }
1008 1013
1009 1014 mpt->m_suspended = 0;
1010 1015
1011 1016 /*
1012 1017 * Reinitialize ioc
1013 1018 */
1014 1019 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1015 1020 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1016 1021 mutex_exit(&mpt->m_mutex);
1017 1022 if (mpt->m_options & MPTSAS_OPT_PM) {
1018 1023 (void) pm_idle_component(dip, 0);
1019 1024 }
1020 1025 fm_panic("mptsas init chip fail during resume");
1021 1026 }
1022 1027 /*
1023 1028 * mptsas_update_driver_data needs interrupts so enable them
1024 1029 * first.
1025 1030 */
1026 1031 MPTSAS_ENABLE_INTR(mpt);
1027 1032 mptsas_update_driver_data(mpt);
1028 1033
1029 1034 /* start requests, if possible */
1030 1035 mptsas_restart_hba(mpt);
1031 1036
1032 1037 mutex_exit(&mpt->m_mutex);
1033 1038
1034 1039 /*
1035 1040 * Restart watch thread
1036 1041 */
1037 1042 mutex_enter(&mptsas_global_mutex);
1038 1043 if (mptsas_timeout_id == 0) {
1039 1044 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1040 1045 mptsas_tick);
1041 1046 mptsas_timeouts_enabled = 1;
1042 1047 }
1043 1048 mutex_exit(&mptsas_global_mutex);
1044 1049
1045 1050 /* report idle status to pm framework */
1046 1051 if (mpt->m_options & MPTSAS_OPT_PM) {
1047 1052 (void) pm_idle_component(dip, 0);
1048 1053 }
1049 1054
1050 1055 return (DDI_SUCCESS);
1051 1056
1052 1057 default:
1053 1058 return (DDI_FAILURE);
1054 1059
1055 1060 }
1056 1061
1057 1062 instance = ddi_get_instance(dip);
1058 1063
1059 1064 /*
1060 1065 * Allocate softc information.
1061 1066 */
1062 1067 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1063 1068 mptsas_log(NULL, CE_WARN,
1064 1069 "mptsas%d: cannot allocate soft state", instance);
1065 1070 goto fail;
1066 1071 }
1067 1072
1068 1073 mpt = ddi_get_soft_state(mptsas_state, instance);
1069 1074
1070 1075 if (mpt == NULL) {
1071 1076 mptsas_log(NULL, CE_WARN,
1072 1077 "mptsas%d: cannot get soft state", instance);
1073 1078 goto fail;
1074 1079 }
1075 1080
1076 1081 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1077 1082 scsi_size_clean(dip);
1078 1083
1079 1084 mpt->m_dip = dip;
1080 1085 mpt->m_instance = instance;
1081 1086
1082 1087 /* Make a per-instance copy of the structures */
1083 1088 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1084 1089 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1085 1090 mpt->m_reg_acc_attr = mptsas_dev_attr;
1086 1091 mpt->m_dev_acc_attr = mptsas_dev_attr;
1087 1092
1088 1093 /*
1089 1094 * Initialize FMA
1090 1095 */
1091 1096 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1092 1097 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1093 1098 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1094 1099 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1095 1100
1096 1101 mptsas_fm_init(mpt);
1097 1102
1098 1103 if (mptsas_alloc_handshake_msg(mpt,
1099 1104 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1100 1105 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1101 1106 goto fail;
1102 1107 }
1103 1108
1104 1109 /*
1105 1110 * Setup configuration space
1106 1111 */
1107 1112 if (mptsas_config_space_init(mpt) == FALSE) {
1108 1113 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1109 1114 goto fail;
1110 1115 }
1111 1116 config_setup++;
1112 1117
1113 1118 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1114 1119 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1115 1120 mptsas_log(mpt, CE_WARN, "map setup failed");
1116 1121 goto fail;
1117 1122 }
1118 1123 map_setup++;
1119 1124
1120 1125 /*
1121 1126 * A taskq is created for dealing with the event handler
1122 1127 */
1123 1128 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1124 1129 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1125 1130 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1126 1131 goto fail;
1127 1132 }
1128 1133 event_taskq_create++;
1129 1134
1130 1135 /*
1131 1136 * A taskq is created for dealing with dr events
1132 1137 */
1133 1138 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1134 1139 "mptsas_dr_taskq",
1135 1140 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1136 1141 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1137 1142 "failed");
1138 1143 goto fail;
1139 1144 }
1140 1145 dr_taskq_create++;
1141 1146
1142 1147 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1143 1148 0, "mptsas_doneq_thread_threshold_prop", 10);
1144 1149 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1145 1150 0, "mptsas_doneq_length_threshold_prop", 8);
1146 1151 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1147 1152 0, "mptsas_doneq_thread_n_prop", 8);
1148 1153
1149 1154 if (mpt->m_doneq_thread_n) {
1150 1155 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1151 1156 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1152 1157
1153 1158 mutex_enter(&mpt->m_doneq_mutex);
1154 1159 mpt->m_doneq_thread_id =
1155 1160 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1156 1161 * mpt->m_doneq_thread_n, KM_SLEEP);
1157 1162
1158 1163 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1159 1164 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1160 1165 CV_DRIVER, NULL);
1161 1166 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1162 1167 MUTEX_DRIVER, NULL);
1163 1168 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1164 1169 mpt->m_doneq_thread_id[j].flag |=
1165 1170 MPTSAS_DONEQ_THREAD_ACTIVE;
1166 1171 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1167 1172 mpt->m_doneq_thread_id[j].arg.t = j;
1168 1173 mpt->m_doneq_thread_id[j].threadp =
1169 1174 thread_create(NULL, 0, mptsas_doneq_thread,
1170 1175 &mpt->m_doneq_thread_id[j].arg,
1171 1176 0, &p0, TS_RUN, minclsyspri);
1172 1177 mpt->m_doneq_thread_id[j].donetail =
1173 1178 &mpt->m_doneq_thread_id[j].doneq;
1174 1179 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1175 1180 }
1176 1181 mutex_exit(&mpt->m_doneq_mutex);
1177 1182 doneq_thread_create++;
1178 1183 }
1179 1184
1180 1185 /* Initialize mutex used in interrupt handler */
1181 1186 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1182 1187 DDI_INTR_PRI(mpt->m_intr_pri));
1183 1188 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1184 1189 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1185 1190 DDI_INTR_PRI(mpt->m_intr_pri));
1186 1191 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1187 1192 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1188 1193 NULL, MUTEX_DRIVER,
1189 1194 DDI_INTR_PRI(mpt->m_intr_pri));
1190 1195 }
1191 1196
1192 1197 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1193 1198 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1194 1199 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1195 1200 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1196 1201 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1197 1202 mutex_init_done++;
1198 1203
1199 1204 /*
1200 1205 * Disable hardware interrupt since we're not ready to
1201 1206 * handle it yet.
1202 1207 */
1203 1208 MPTSAS_DISABLE_INTR(mpt);
1204 1209 if (mptsas_register_intrs(mpt) == FALSE)
1205 1210 goto fail;
1206 1211 intr_added++;
1207 1212
1208 1213 mutex_enter(&mpt->m_mutex);
1209 1214 /*
1210 1215 * Initialize power management component
1211 1216 */
1212 1217 if (mpt->m_options & MPTSAS_OPT_PM) {
1213 1218 if (mptsas_init_pm(mpt)) {
1214 1219 mutex_exit(&mpt->m_mutex);
1215 1220 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1216 1221 "failed");
1217 1222 goto fail;
1218 1223 }
1219 1224 }
1220 1225
1221 1226 /*
1222 1227 * Initialize chip using Message Unit Reset, if allowed
1223 1228 */
1224 1229 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1225 1230 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1226 1231 mutex_exit(&mpt->m_mutex);
1227 1232 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1228 1233 goto fail;
1229 1234 }
1230 1235
1231 1236 /*
1232 1237 * Fill in the phy_info structure and get the base WWID
1233 1238 */
1234 1239 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1235 1240 mptsas_log(mpt, CE_WARN,
1236 1241 "mptsas_get_manufacture_page5 failed!");
1237 1242 goto fail;
1238 1243 }
1239 1244
1240 1245 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1241 1246 mptsas_log(mpt, CE_WARN,
1242 1247 "mptsas_get_sas_io_unit_page_hndshk failed!");
1243 1248 goto fail;
1244 1249 }
1245 1250
1246 1251 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1247 1252 mptsas_log(mpt, CE_WARN,
1248 1253 "mptsas_get_manufacture_page0 failed!");
1249 1254 goto fail;
1250 1255 }
1251 1256
1252 1257 mutex_exit(&mpt->m_mutex);
1253 1258
1254 1259 /*
1255 1260 * Register the iport for multiple port HBA
1256 1261 */
1257 1262 mptsas_iport_register(mpt);
1258 1263
1259 1264 /*
1260 1265 * initialize SCSI HBA transport structure
1261 1266 */
1262 1267 if (mptsas_hba_setup(mpt) == FALSE)
1263 1268 goto fail;
1264 1269 hba_attach_setup++;
1265 1270
1266 1271 if (mptsas_smp_setup(mpt) == FALSE)
1267 1272 goto fail;
1268 1273 smp_attach_setup++;
1269 1274
1270 1275 if (mptsas_cache_create(mpt) == FALSE)
1271 1276 goto fail;
1272 1277
1273 1278 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1274 1279 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1275 1280 if (mpt->m_scsi_reset_delay == 0) {
1276 1281 mptsas_log(mpt, CE_NOTE,
1277 1282 "scsi_reset_delay of 0 is not recommended,"
1278 1283 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1279 1284 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1280 1285 }
1281 1286
1282 1287 /*
1283 1288 * Initialize the wait and done FIFO queue
1284 1289 */
1285 1290 mpt->m_donetail = &mpt->m_doneq;
1286 1291 mpt->m_waitqtail = &mpt->m_waitq;
1287 1292 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1288 1293 mpt->m_tx_draining = 0;
1289 1294
1290 1295 /*
1291 1296 * ioc cmd queue initialize
1292 1297 */
1293 1298 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1294 1299 mpt->m_dev_handle = 0xFFFF;
1295 1300
1296 1301 MPTSAS_ENABLE_INTR(mpt);
1297 1302
1298 1303 /*
1299 1304 * enable event notification
1300 1305 */
1301 1306 mutex_enter(&mpt->m_mutex);
1302 1307 if (mptsas_ioc_enable_event_notification(mpt)) {
1303 1308 mutex_exit(&mpt->m_mutex);
1304 1309 goto fail;
1305 1310 }
1306 1311 mutex_exit(&mpt->m_mutex);
1307 1312
1308 1313 /*
1309 1314 * Initialize PHY info for smhba
1310 1315 */
1311 1316 if (mptsas_smhba_setup(mpt)) {
1312 1317 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1313 1318 "failed");
1314 1319 goto fail;
1315 1320 }
1316 1321
1317 1322 /* Check all dma handles allocated in attach */
1318 1323 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1319 1324 != DDI_SUCCESS) ||
1320 1325 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1321 1326 != DDI_SUCCESS) ||
1322 1327 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1323 1328 != DDI_SUCCESS) ||
1324 1329 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1325 1330 != DDI_SUCCESS) ||
1326 1331 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1327 1332 != DDI_SUCCESS)) {
1328 1333 goto fail;
1329 1334 }
1330 1335
1331 1336 /* Check all acc handles allocated in attach */
1332 1337 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1333 1338 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1334 1339 != DDI_SUCCESS) ||
1335 1340 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1336 1341 != DDI_SUCCESS) ||
1337 1342 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1338 1343 != DDI_SUCCESS) ||
1339 1344 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1340 1345 != DDI_SUCCESS) ||
1341 1346 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1342 1347 != DDI_SUCCESS) ||
1343 1348 (mptsas_check_acc_handle(mpt->m_config_handle)
1344 1349 != DDI_SUCCESS)) {
1345 1350 goto fail;
1346 1351 }
1347 1352
1348 1353 /*
1349 1354 * After this point, we are not going to fail the attach.
1350 1355 */
1351 1356 /*
1352 1357 * used for mptsas_watch
1353 1358 */
1354 1359 mptsas_list_add(mpt);
1355 1360
1356 1361 mutex_enter(&mptsas_global_mutex);
1357 1362 if (mptsas_timeouts_enabled == 0) {
1358 1363 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1359 1364 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1360 1365
1361 1366 mptsas_tick = mptsas_scsi_watchdog_tick *
1362 1367 drv_usectohz((clock_t)1000000);
1363 1368
1364 1369 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1365 1370 mptsas_timeouts_enabled = 1;
1366 1371 }
1367 1372 mutex_exit(&mptsas_global_mutex);
1368 1373
1369 1374 /* Print message of HBA present */
1370 1375 ddi_report_dev(dip);
1371 1376
1372 1377 /* report idle status to pm framework */
1373 1378 if (mpt->m_options & MPTSAS_OPT_PM) {
1374 1379 (void) pm_idle_component(dip, 0);
1375 1380 }
1376 1381
1377 1382 return (DDI_SUCCESS);
1378 1383
1379 1384 fail:
1380 1385 mptsas_log(mpt, CE_WARN, "attach failed");
1381 1386 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1382 1387 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1383 1388 if (mpt) {
1384 1389 mutex_enter(&mptsas_global_mutex);
1385 1390
1386 1391 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1387 1392 timeout_id_t tid = mptsas_timeout_id;
1388 1393 mptsas_timeouts_enabled = 0;
1389 1394 mptsas_timeout_id = 0;
1390 1395 mutex_exit(&mptsas_global_mutex);
1391 1396 (void) untimeout(tid);
1392 1397 mutex_enter(&mptsas_global_mutex);
1393 1398 }
1394 1399 mutex_exit(&mptsas_global_mutex);
1395 1400 /* deallocate in reverse order */
1396 1401 mptsas_cache_destroy(mpt);
1397 1402
1398 1403 if (smp_attach_setup) {
1399 1404 mptsas_smp_teardown(mpt);
1400 1405 }
1401 1406 if (hba_attach_setup) {
1402 1407 mptsas_hba_teardown(mpt);
1403 1408 }
1404 1409
1405 1410 if (mpt->m_active) {
1406 1411 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1407 1412 sizeof (mptsas_smp_t));
1408 1413 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1409 1414 sizeof (mptsas_target_t));
1410 1415 mptsas_free_active_slots(mpt);
1411 1416 }
1412 1417 if (intr_added) {
1413 1418 mptsas_unregister_intrs(mpt);
1414 1419 }
1415 1420
1416 1421 if (doneq_thread_create) {
1417 1422 mutex_enter(&mpt->m_doneq_mutex);
1418 1423 doneq_thread_num = mpt->m_doneq_thread_n;
1419 1424 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1420 1425 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1421 1426 mpt->m_doneq_thread_id[j].flag &=
1422 1427 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1423 1428 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1424 1429 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1425 1430 }
1426 1431 while (mpt->m_doneq_thread_n) {
1427 1432 cv_wait(&mpt->m_doneq_thread_cv,
1428 1433 &mpt->m_doneq_mutex);
1429 1434 }
1430 1435 for (j = 0; j < doneq_thread_num; j++) {
1431 1436 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1432 1437 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1433 1438 }
1434 1439 kmem_free(mpt->m_doneq_thread_id,
1435 1440 sizeof (mptsas_doneq_thread_list_t)
1436 1441 * doneq_thread_num);
1437 1442 mutex_exit(&mpt->m_doneq_mutex);
1438 1443 cv_destroy(&mpt->m_doneq_thread_cv);
1439 1444 mutex_destroy(&mpt->m_doneq_mutex);
1440 1445 }
1441 1446 if (event_taskq_create) {
1442 1447 ddi_taskq_destroy(mpt->m_event_taskq);
1443 1448 }
1444 1449 if (dr_taskq_create) {
1445 1450 ddi_taskq_destroy(mpt->m_dr_taskq);
1446 1451 }
1447 1452 if (mutex_init_done) {
1448 1453 mutex_destroy(&mpt->m_tx_waitq_mutex);
1449 1454 mutex_destroy(&mpt->m_passthru_mutex);
1450 1455 mutex_destroy(&mpt->m_mutex);
1451 1456 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1452 1457 mutex_destroy(
1453 1458 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1454 1459 }
1455 1460 cv_destroy(&mpt->m_cv);
1456 1461 cv_destroy(&mpt->m_passthru_cv);
1457 1462 cv_destroy(&mpt->m_fw_cv);
1458 1463 cv_destroy(&mpt->m_config_cv);
1459 1464 cv_destroy(&mpt->m_fw_diag_cv);
1460 1465 }
1461 1466
1462 1467 if (map_setup) {
1463 1468 mptsas_cfg_fini(mpt);
1464 1469 }
1465 1470 if (config_setup) {
1466 1471 mptsas_config_space_fini(mpt);
1467 1472 }
1468 1473 mptsas_free_handshake_msg(mpt);
1469 1474 mptsas_hba_fini(mpt);
1470 1475
1471 1476 mptsas_fm_fini(mpt);
1472 1477 ddi_soft_state_free(mptsas_state, instance);
1473 1478 ddi_prop_remove_all(dip);
1474 1479 }
1475 1480 return (DDI_FAILURE);
1476 1481 }
1477 1482
1478 1483 static int
1479 1484 mptsas_suspend(dev_info_t *devi)
1480 1485 {
1481 1486 mptsas_t *mpt, *g;
1482 1487 scsi_hba_tran_t *tran;
1483 1488
1484 1489 if (scsi_hba_iport_unit_address(devi)) {
1485 1490 return (DDI_SUCCESS);
1486 1491 }
1487 1492
1488 1493 if ((tran = ddi_get_driver_private(devi)) == NULL)
1489 1494 return (DDI_SUCCESS);
1490 1495
1491 1496 mpt = TRAN2MPT(tran);
1492 1497 if (!mpt) {
1493 1498 return (DDI_SUCCESS);
1494 1499 }
1495 1500
1496 1501 mutex_enter(&mpt->m_mutex);
1497 1502
1498 1503 if (mpt->m_suspended++) {
1499 1504 mutex_exit(&mpt->m_mutex);
1500 1505 return (DDI_SUCCESS);
1501 1506 }
1502 1507
1503 1508 /*
1504 1509 * Cancel timeout threads for this mpt
1505 1510 */
1506 1511 if (mpt->m_quiesce_timeid) {
1507 1512 timeout_id_t tid = mpt->m_quiesce_timeid;
1508 1513 mpt->m_quiesce_timeid = 0;
1509 1514 mutex_exit(&mpt->m_mutex);
1510 1515 (void) untimeout(tid);
1511 1516 mutex_enter(&mpt->m_mutex);
1512 1517 }
1513 1518
1514 1519 if (mpt->m_restart_cmd_timeid) {
1515 1520 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1516 1521 mpt->m_restart_cmd_timeid = 0;
1517 1522 mutex_exit(&mpt->m_mutex);
1518 1523 (void) untimeout(tid);
1519 1524 mutex_enter(&mpt->m_mutex);
1520 1525 }
1521 1526
1522 1527 mutex_exit(&mpt->m_mutex);
1523 1528
1524 1529 (void) pm_idle_component(mpt->m_dip, 0);
1525 1530
1526 1531 /*
1527 1532 * Cancel watch threads if all mpts suspended
1528 1533 */
1529 1534 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1530 1535 for (g = mptsas_head; g != NULL; g = g->m_next) {
1531 1536 if (!g->m_suspended)
1532 1537 break;
1533 1538 }
1534 1539 rw_exit(&mptsas_global_rwlock);
1535 1540
1536 1541 mutex_enter(&mptsas_global_mutex);
1537 1542 if (g == NULL) {
1538 1543 timeout_id_t tid;
1539 1544
1540 1545 mptsas_timeouts_enabled = 0;
1541 1546 if (mptsas_timeout_id) {
1542 1547 tid = mptsas_timeout_id;
1543 1548 mptsas_timeout_id = 0;
1544 1549 mutex_exit(&mptsas_global_mutex);
1545 1550 (void) untimeout(tid);
1546 1551 mutex_enter(&mptsas_global_mutex);
1547 1552 }
1548 1553 if (mptsas_reset_watch) {
1549 1554 tid = mptsas_reset_watch;
1550 1555 mptsas_reset_watch = 0;
1551 1556 mutex_exit(&mptsas_global_mutex);
1552 1557 (void) untimeout(tid);
1553 1558 mutex_enter(&mptsas_global_mutex);
1554 1559 }
1555 1560 }
1556 1561 mutex_exit(&mptsas_global_mutex);
1557 1562
1558 1563 mutex_enter(&mpt->m_mutex);
1559 1564
1560 1565 /*
1561 1566 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1562 1567 */
1563 1568 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1564 1569 (mpt->m_power_level != PM_LEVEL_D0)) {
1565 1570 mutex_exit(&mpt->m_mutex);
1566 1571 return (DDI_SUCCESS);
1567 1572 }
1568 1573
1569 1574 /* Disable HBA interrupts in hardware */
1570 1575 MPTSAS_DISABLE_INTR(mpt);
1571 1576 /*
1572 1577 * Send RAID action system shutdown to sync IR
1573 1578 */
1574 1579 mptsas_raid_action_system_shutdown(mpt);
1575 1580
1576 1581 mutex_exit(&mpt->m_mutex);
1577 1582
1578 1583 /* drain the taskq */
1579 1584 ddi_taskq_wait(mpt->m_event_taskq);
1580 1585 ddi_taskq_wait(mpt->m_dr_taskq);
1581 1586
1582 1587 return (DDI_SUCCESS);
1583 1588 }
1584 1589
1585 1590 #ifdef __sparc
1586 1591 /*ARGSUSED*/
1587 1592 static int
1588 1593 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1589 1594 {
1590 1595 mptsas_t *mpt;
1591 1596 scsi_hba_tran_t *tran;
1592 1597
1593 1598 /*
1594 1599 * If this call is for iport, just return.
1595 1600 */
1596 1601 if (scsi_hba_iport_unit_address(devi))
1597 1602 return (DDI_SUCCESS);
1598 1603
1599 1604 if ((tran = ddi_get_driver_private(devi)) == NULL)
1600 1605 return (DDI_SUCCESS);
1601 1606
1602 1607 if ((mpt = TRAN2MPT(tran)) == NULL)
1603 1608 return (DDI_SUCCESS);
1604 1609
1605 1610 /*
1606 1611 * Send RAID action system shutdown to sync IR. Disable HBA
1607 1612 * interrupts in hardware first.
1608 1613 */
1609 1614 MPTSAS_DISABLE_INTR(mpt);
1610 1615 mptsas_raid_action_system_shutdown(mpt);
1611 1616
1612 1617 return (DDI_SUCCESS);
1613 1618 }
1614 1619 #else /* __sparc */
1615 1620 /*
1616 1621 * quiesce(9E) entry point.
1617 1622 *
1618 1623 * This function is called when the system is single-threaded at high
1619 1624 * PIL with preemption disabled. Therefore, this function must not be
1620 1625 * blocked.
1621 1626 *
1622 1627 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1623 1628 * DDI_FAILURE indicates an error condition and should almost never happen.
1624 1629 */
1625 1630 static int
1626 1631 mptsas_quiesce(dev_info_t *devi)
1627 1632 {
1628 1633 mptsas_t *mpt;
1629 1634 scsi_hba_tran_t *tran;
1630 1635
1631 1636 /*
1632 1637 * If this call is for iport, just return.
1633 1638 */
1634 1639 if (scsi_hba_iport_unit_address(devi))
1635 1640 return (DDI_SUCCESS);
1636 1641
1637 1642 if ((tran = ddi_get_driver_private(devi)) == NULL)
1638 1643 return (DDI_SUCCESS);
1639 1644
1640 1645 if ((mpt = TRAN2MPT(tran)) == NULL)
1641 1646 return (DDI_SUCCESS);
1642 1647
1643 1648 /* Disable HBA interrupts in hardware */
1644 1649 MPTSAS_DISABLE_INTR(mpt);
1645 1650 /* Send RAID action system shutdonw to sync IR */
1646 1651 mptsas_raid_action_system_shutdown(mpt);
1647 1652
1648 1653 return (DDI_SUCCESS);
1649 1654 }
1650 1655 #endif /* __sparc */
1651 1656
1652 1657 /*
1653 1658 * detach(9E). Remove all device allocations and system resources;
1654 1659 * disable device interrupts.
1655 1660 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1656 1661 */
1657 1662 static int
1658 1663 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1659 1664 {
1660 1665 /* CONSTCOND */
1661 1666 ASSERT(NO_COMPETING_THREADS);
1662 1667 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1663 1668
1664 1669 switch (cmd) {
1665 1670 case DDI_DETACH:
1666 1671 return (mptsas_do_detach(devi));
1667 1672
1668 1673 case DDI_SUSPEND:
1669 1674 return (mptsas_suspend(devi));
1670 1675
1671 1676 default:
1672 1677 return (DDI_FAILURE);
1673 1678 }
1674 1679 /* NOTREACHED */
1675 1680 }
1676 1681
1677 1682 static int
1678 1683 mptsas_do_detach(dev_info_t *dip)
1679 1684 {
1680 1685 mptsas_t *mpt;
1681 1686 scsi_hba_tran_t *tran;
1682 1687 int circ = 0;
1683 1688 int circ1 = 0;
1684 1689 mdi_pathinfo_t *pip = NULL;
1685 1690 int i;
1686 1691 int doneq_thread_num = 0;
1687 1692
1688 1693 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1689 1694
1690 1695 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1691 1696 return (DDI_FAILURE);
1692 1697
1693 1698 mpt = TRAN2MPT(tran);
1694 1699 if (!mpt) {
1695 1700 return (DDI_FAILURE);
1696 1701 }
1697 1702 /*
1698 1703 * Still have pathinfo child, should not detach mpt driver
1699 1704 */
1700 1705 if (scsi_hba_iport_unit_address(dip)) {
1701 1706 if (mpt->m_mpxio_enable) {
1702 1707 /*
1703 1708 * MPxIO enabled for the iport
1704 1709 */
1705 1710 ndi_devi_enter(scsi_vhci_dip, &circ1);
1706 1711 ndi_devi_enter(dip, &circ);
1707 1712 while (pip = mdi_get_next_client_path(dip, NULL)) {
1708 1713 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1709 1714 continue;
1710 1715 }
1711 1716 ndi_devi_exit(dip, circ);
1712 1717 ndi_devi_exit(scsi_vhci_dip, circ1);
1713 1718 NDBG12(("detach failed because of "
1714 1719 "outstanding path info"));
1715 1720 return (DDI_FAILURE);
1716 1721 }
1717 1722 ndi_devi_exit(dip, circ);
1718 1723 ndi_devi_exit(scsi_vhci_dip, circ1);
1719 1724 (void) mdi_phci_unregister(dip, 0);
1720 1725 }
1721 1726
1722 1727 ddi_prop_remove_all(dip);
1723 1728
1724 1729 return (DDI_SUCCESS);
1725 1730 }
1726 1731
1727 1732 /* Make sure power level is D0 before accessing registers */
1728 1733 if (mpt->m_options & MPTSAS_OPT_PM) {
1729 1734 (void) pm_busy_component(dip, 0);
1730 1735 if (mpt->m_power_level != PM_LEVEL_D0) {
1731 1736 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1732 1737 DDI_SUCCESS) {
1733 1738 mptsas_log(mpt, CE_WARN,
1734 1739 "mptsas%d: Raise power request failed.",
1735 1740 mpt->m_instance);
1736 1741 (void) pm_idle_component(dip, 0);
1737 1742 return (DDI_FAILURE);
1738 1743 }
1739 1744 }
1740 1745 }
1741 1746
1742 1747 /*
1743 1748 * Send RAID action system shutdown to sync IR. After action, send a
1744 1749 * Message Unit Reset. Since after that DMA resource will be freed,
1745 1750 * set ioc to READY state will avoid HBA initiated DMA operation.
1746 1751 */
1747 1752 mutex_enter(&mpt->m_mutex);
1748 1753 MPTSAS_DISABLE_INTR(mpt);
1749 1754 mptsas_raid_action_system_shutdown(mpt);
1750 1755 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1751 1756 (void) mptsas_ioc_reset(mpt, FALSE);
1752 1757 mutex_exit(&mpt->m_mutex);
1753 1758 mptsas_rem_intrs(mpt);
1754 1759 ddi_taskq_destroy(mpt->m_event_taskq);
1755 1760 ddi_taskq_destroy(mpt->m_dr_taskq);
1756 1761
1757 1762 if (mpt->m_doneq_thread_n) {
1758 1763 mutex_enter(&mpt->m_doneq_mutex);
1759 1764 doneq_thread_num = mpt->m_doneq_thread_n;
1760 1765 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1761 1766 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1762 1767 mpt->m_doneq_thread_id[i].flag &=
1763 1768 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1764 1769 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1765 1770 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1766 1771 }
1767 1772 while (mpt->m_doneq_thread_n) {
1768 1773 cv_wait(&mpt->m_doneq_thread_cv,
1769 1774 &mpt->m_doneq_mutex);
1770 1775 }
1771 1776 for (i = 0; i < doneq_thread_num; i++) {
1772 1777 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1773 1778 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1774 1779 }
1775 1780 kmem_free(mpt->m_doneq_thread_id,
1776 1781 sizeof (mptsas_doneq_thread_list_t)
1777 1782 * doneq_thread_num);
1778 1783 mutex_exit(&mpt->m_doneq_mutex);
1779 1784 cv_destroy(&mpt->m_doneq_thread_cv);
1780 1785 mutex_destroy(&mpt->m_doneq_mutex);
1781 1786 }
1782 1787
1783 1788 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1784 1789
1785 1790 mptsas_list_del(mpt);
1786 1791
1787 1792 /*
1788 1793 * Cancel timeout threads for this mpt
1789 1794 */
1790 1795 mutex_enter(&mpt->m_mutex);
1791 1796 if (mpt->m_quiesce_timeid) {
1792 1797 timeout_id_t tid = mpt->m_quiesce_timeid;
1793 1798 mpt->m_quiesce_timeid = 0;
1794 1799 mutex_exit(&mpt->m_mutex);
1795 1800 (void) untimeout(tid);
1796 1801 mutex_enter(&mpt->m_mutex);
1797 1802 }
1798 1803
1799 1804 if (mpt->m_restart_cmd_timeid) {
1800 1805 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1801 1806 mpt->m_restart_cmd_timeid = 0;
1802 1807 mutex_exit(&mpt->m_mutex);
1803 1808 (void) untimeout(tid);
1804 1809 mutex_enter(&mpt->m_mutex);
1805 1810 }
1806 1811
1807 1812 mutex_exit(&mpt->m_mutex);
1808 1813
1809 1814 /*
1810 1815 * last mpt? ... if active, CANCEL watch threads.
1811 1816 */
1812 1817 mutex_enter(&mptsas_global_mutex);
1813 1818 if (mptsas_head == NULL) {
1814 1819 timeout_id_t tid;
1815 1820 /*
1816 1821 * Clear mptsas_timeouts_enable so that the watch thread
1817 1822 * gets restarted on DDI_ATTACH
1818 1823 */
1819 1824 mptsas_timeouts_enabled = 0;
1820 1825 if (mptsas_timeout_id) {
1821 1826 tid = mptsas_timeout_id;
1822 1827 mptsas_timeout_id = 0;
1823 1828 mutex_exit(&mptsas_global_mutex);
1824 1829 (void) untimeout(tid);
1825 1830 mutex_enter(&mptsas_global_mutex);
1826 1831 }
1827 1832 if (mptsas_reset_watch) {
1828 1833 tid = mptsas_reset_watch;
1829 1834 mptsas_reset_watch = 0;
1830 1835 mutex_exit(&mptsas_global_mutex);
1831 1836 (void) untimeout(tid);
1832 1837 mutex_enter(&mptsas_global_mutex);
1833 1838 }
1834 1839 }
1835 1840 mutex_exit(&mptsas_global_mutex);
1836 1841
1837 1842 /*
1838 1843 * Delete Phy stats
1839 1844 */
1840 1845 mptsas_destroy_phy_stats(mpt);
1841 1846
1842 1847 /*
1843 1848 * Delete nt_active.
1844 1849 */
1845 1850 mutex_enter(&mpt->m_mutex);
1846 1851 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1847 1852 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1848 1853 mptsas_free_active_slots(mpt);
1849 1854 mutex_exit(&mpt->m_mutex);
1850 1855
1851 1856 /* deallocate everything that was allocated in mptsas_attach */
1852 1857 mptsas_cache_destroy(mpt);
1853 1858
1854 1859 mptsas_hba_fini(mpt);
1855 1860 mptsas_cfg_fini(mpt);
1856 1861
1857 1862 /* Lower the power informing PM Framework */
1858 1863 if (mpt->m_options & MPTSAS_OPT_PM) {
1859 1864 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1860 1865 mptsas_log(mpt, CE_WARN,
1861 1866 "!mptsas%d: Lower power request failed "
1862 1867 "during detach, ignoring.",
1863 1868 mpt->m_instance);
1864 1869 }
1865 1870
1866 1871 mutex_destroy(&mpt->m_tx_waitq_mutex);
1867 1872 mutex_destroy(&mpt->m_passthru_mutex);
1868 1873 mutex_destroy(&mpt->m_mutex);
1869 1874 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1870 1875 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1871 1876 }
1872 1877 cv_destroy(&mpt->m_cv);
1873 1878 cv_destroy(&mpt->m_passthru_cv);
1874 1879 cv_destroy(&mpt->m_fw_cv);
1875 1880 cv_destroy(&mpt->m_config_cv);
1876 1881 cv_destroy(&mpt->m_fw_diag_cv);
1877 1882
1878 1883
1879 1884 mptsas_smp_teardown(mpt);
1880 1885 mptsas_hba_teardown(mpt);
1881 1886
1882 1887 mptsas_config_space_fini(mpt);
1883 1888
1884 1889 mptsas_free_handshake_msg(mpt);
1885 1890
1886 1891 mptsas_fm_fini(mpt);
1887 1892 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1888 1893 ddi_prop_remove_all(dip);
1889 1894
1890 1895 return (DDI_SUCCESS);
1891 1896 }
1892 1897
1893 1898 static void
1894 1899 mptsas_list_add(mptsas_t *mpt)
1895 1900 {
1896 1901 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1897 1902
1898 1903 if (mptsas_head == NULL) {
1899 1904 mptsas_head = mpt;
1900 1905 } else {
1901 1906 mptsas_tail->m_next = mpt;
1902 1907 }
1903 1908 mptsas_tail = mpt;
1904 1909 rw_exit(&mptsas_global_rwlock);
1905 1910 }
1906 1911
1907 1912 static void
1908 1913 mptsas_list_del(mptsas_t *mpt)
1909 1914 {
1910 1915 mptsas_t *m;
1911 1916 /*
1912 1917 * Remove device instance from the global linked list
1913 1918 */
1914 1919 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1915 1920 if (mptsas_head == mpt) {
1916 1921 m = mptsas_head = mpt->m_next;
1917 1922 } else {
1918 1923 for (m = mptsas_head; m != NULL; m = m->m_next) {
1919 1924 if (m->m_next == mpt) {
1920 1925 m->m_next = mpt->m_next;
1921 1926 break;
1922 1927 }
1923 1928 }
1924 1929 if (m == NULL) {
1925 1930 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1926 1931 }
1927 1932 }
1928 1933
1929 1934 if (mptsas_tail == mpt) {
1930 1935 mptsas_tail = m;
1931 1936 }
1932 1937 rw_exit(&mptsas_global_rwlock);
1933 1938 }
1934 1939
1935 1940 static int
1936 1941 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1937 1942 {
1938 1943 ddi_dma_attr_t task_dma_attrs;
1939 1944
1940 1945 task_dma_attrs = mpt->m_msg_dma_attr;
1941 1946 task_dma_attrs.dma_attr_sgllen = 1;
1942 1947 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1943 1948
1944 1949 /* allocate Task Management ddi_dma resources */
1945 1950 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1946 1951 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1947 1952 alloc_size, NULL) == FALSE) {
1948 1953 return (DDI_FAILURE);
1949 1954 }
1950 1955 mpt->m_hshk_dma_size = alloc_size;
1951 1956
1952 1957 return (DDI_SUCCESS);
1953 1958 }
1954 1959
1955 1960 static void
1956 1961 mptsas_free_handshake_msg(mptsas_t *mpt)
1957 1962 {
1958 1963 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1959 1964 mpt->m_hshk_dma_size = 0;
1960 1965 }
1961 1966
1962 1967 static int
1963 1968 mptsas_hba_setup(mptsas_t *mpt)
1964 1969 {
1965 1970 scsi_hba_tran_t *hba_tran;
1966 1971 int tran_flags;
1967 1972
1968 1973 /* Allocate a transport structure */
1969 1974 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1970 1975 SCSI_HBA_CANSLEEP);
1971 1976 ASSERT(mpt->m_tran != NULL);
1972 1977
1973 1978 hba_tran->tran_hba_private = mpt;
1974 1979 hba_tran->tran_tgt_private = NULL;
1975 1980
1976 1981 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1977 1982 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1978 1983
1979 1984 hba_tran->tran_start = mptsas_scsi_start;
1980 1985 hba_tran->tran_reset = mptsas_scsi_reset;
1981 1986 hba_tran->tran_abort = mptsas_scsi_abort;
1982 1987 hba_tran->tran_getcap = mptsas_scsi_getcap;
1983 1988 hba_tran->tran_setcap = mptsas_scsi_setcap;
1984 1989 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
1985 1990 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
1986 1991
1987 1992 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
1988 1993 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
1989 1994 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
1990 1995
1991 1996 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
1992 1997 hba_tran->tran_get_name = mptsas_get_name;
1993 1998
1994 1999 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
1995 2000 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
1996 2001 hba_tran->tran_bus_reset = NULL;
1997 2002
1998 2003 hba_tran->tran_add_eventcall = NULL;
1999 2004 hba_tran->tran_get_eventcookie = NULL;
2000 2005 hba_tran->tran_post_event = NULL;
2001 2006 hba_tran->tran_remove_eventcall = NULL;
2002 2007
2003 2008 hba_tran->tran_bus_config = mptsas_bus_config;
2004 2009
2005 2010 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2006 2011
2007 2012 /*
2008 2013 * All children of the HBA are iports. We need tran was cloned.
2009 2014 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2010 2015 * inherited to iport's tran vector.
2011 2016 */
2012 2017 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2013 2018
2014 2019 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2015 2020 hba_tran, tran_flags) != DDI_SUCCESS) {
2016 2021 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2017 2022 scsi_hba_tran_free(hba_tran);
2018 2023 mpt->m_tran = NULL;
2019 2024 return (FALSE);
2020 2025 }
2021 2026 return (TRUE);
2022 2027 }
2023 2028
2024 2029 static void
2025 2030 mptsas_hba_teardown(mptsas_t *mpt)
2026 2031 {
2027 2032 (void) scsi_hba_detach(mpt->m_dip);
2028 2033 if (mpt->m_tran != NULL) {
2029 2034 scsi_hba_tran_free(mpt->m_tran);
2030 2035 mpt->m_tran = NULL;
2031 2036 }
2032 2037 }
2033 2038
2034 2039 static void
2035 2040 mptsas_iport_register(mptsas_t *mpt)
2036 2041 {
2037 2042 int i, j;
2038 2043 mptsas_phymask_t mask = 0x0;
2039 2044 /*
2040 2045 * initial value of mask is 0
2041 2046 */
2042 2047 mutex_enter(&mpt->m_mutex);
2043 2048 for (i = 0; i < mpt->m_num_phys; i++) {
2044 2049 mptsas_phymask_t phy_mask = 0x0;
2045 2050 char phy_mask_name[MPTSAS_MAX_PHYS];
2046 2051 uint8_t current_port;
2047 2052
2048 2053 if (mpt->m_phy_info[i].attached_devhdl == 0)
2049 2054 continue;
2050 2055
2051 2056 bzero(phy_mask_name, sizeof (phy_mask_name));
2052 2057
2053 2058 current_port = mpt->m_phy_info[i].port_num;
2054 2059
2055 2060 if ((mask & (1 << i)) != 0)
2056 2061 continue;
2057 2062
2058 2063 for (j = 0; j < mpt->m_num_phys; j++) {
2059 2064 if (mpt->m_phy_info[j].attached_devhdl &&
2060 2065 (mpt->m_phy_info[j].port_num == current_port)) {
2061 2066 phy_mask |= (1 << j);
2062 2067 }
2063 2068 }
2064 2069 mask = mask | phy_mask;
2065 2070
2066 2071 for (j = 0; j < mpt->m_num_phys; j++) {
2067 2072 if ((phy_mask >> j) & 0x01) {
2068 2073 mpt->m_phy_info[j].phy_mask = phy_mask;
2069 2074 }
2070 2075 }
2071 2076
2072 2077 (void) sprintf(phy_mask_name, "%x", phy_mask);
2073 2078
2074 2079 mutex_exit(&mpt->m_mutex);
2075 2080 /*
2076 2081 * register a iport
2077 2082 */
2078 2083 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2079 2084 mutex_enter(&mpt->m_mutex);
2080 2085 }
2081 2086 mutex_exit(&mpt->m_mutex);
2082 2087 /*
2083 2088 * register a virtual port for RAID volume always
2084 2089 */
2085 2090 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2086 2091
2087 2092 }
2088 2093
2089 2094 static int
2090 2095 mptsas_smp_setup(mptsas_t *mpt)
2091 2096 {
2092 2097 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2093 2098 ASSERT(mpt->m_smptran != NULL);
2094 2099 mpt->m_smptran->smp_tran_hba_private = mpt;
2095 2100 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2096 2101 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2097 2102 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2098 2103 smp_hba_tran_free(mpt->m_smptran);
2099 2104 mpt->m_smptran = NULL;
2100 2105 return (FALSE);
2101 2106 }
2102 2107 /*
2103 2108 * Initialize smp hash table
2104 2109 */
2105 2110 mptsas_hash_init(&mpt->m_active->m_smptbl);
2106 2111 mpt->m_smp_devhdl = 0xFFFF;
2107 2112
2108 2113 return (TRUE);
2109 2114 }
2110 2115
2111 2116 static void
2112 2117 mptsas_smp_teardown(mptsas_t *mpt)
2113 2118 {
2114 2119 (void) smp_hba_detach(mpt->m_dip);
2115 2120 if (mpt->m_smptran != NULL) {
2116 2121 smp_hba_tran_free(mpt->m_smptran);
2117 2122 mpt->m_smptran = NULL;
2118 2123 }
2119 2124 mpt->m_smp_devhdl = 0;
2120 2125 }
2121 2126
2122 2127 static int
2123 2128 mptsas_cache_create(mptsas_t *mpt)
2124 2129 {
2125 2130 int instance = mpt->m_instance;
2126 2131 char buf[64];
2127 2132
2128 2133 /*
2129 2134 * create kmem cache for packets
2130 2135 */
2131 2136 (void) sprintf(buf, "mptsas%d_cache", instance);
2132 2137 mpt->m_kmem_cache = kmem_cache_create(buf,
2133 2138 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2134 2139 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2135 2140 NULL, (void *)mpt, NULL, 0);
2136 2141
2137 2142 if (mpt->m_kmem_cache == NULL) {
2138 2143 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2139 2144 return (FALSE);
2140 2145 }
2141 2146
2142 2147 /*
2143 2148 * create kmem cache for extra SGL frames if SGL cannot
2144 2149 * be accomodated into main request frame.
2145 2150 */
2146 2151 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2147 2152 mpt->m_cache_frames = kmem_cache_create(buf,
2148 2153 sizeof (mptsas_cache_frames_t), 8,
2149 2154 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2150 2155 NULL, (void *)mpt, NULL, 0);
2151 2156
2152 2157 if (mpt->m_cache_frames == NULL) {
2153 2158 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2154 2159 return (FALSE);
2155 2160 }
2156 2161
2157 2162 return (TRUE);
2158 2163 }
2159 2164
2160 2165 static void
2161 2166 mptsas_cache_destroy(mptsas_t *mpt)
2162 2167 {
2163 2168 /* deallocate in reverse order */
2164 2169 if (mpt->m_cache_frames) {
2165 2170 kmem_cache_destroy(mpt->m_cache_frames);
2166 2171 mpt->m_cache_frames = NULL;
2167 2172 }
2168 2173 if (mpt->m_kmem_cache) {
2169 2174 kmem_cache_destroy(mpt->m_kmem_cache);
2170 2175 mpt->m_kmem_cache = NULL;
2171 2176 }
2172 2177 }
2173 2178
2174 2179 static int
2175 2180 mptsas_power(dev_info_t *dip, int component, int level)
2176 2181 {
2177 2182 #ifndef __lock_lint
2178 2183 _NOTE(ARGUNUSED(component))
2179 2184 #endif
2180 2185 mptsas_t *mpt;
2181 2186 int rval = DDI_SUCCESS;
2182 2187 int polls = 0;
2183 2188 uint32_t ioc_status;
2184 2189
2185 2190 if (scsi_hba_iport_unit_address(dip) != 0)
2186 2191 return (DDI_SUCCESS);
2187 2192
2188 2193 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2189 2194 if (mpt == NULL) {
2190 2195 return (DDI_FAILURE);
2191 2196 }
2192 2197
2193 2198 mutex_enter(&mpt->m_mutex);
2194 2199
2195 2200 /*
2196 2201 * If the device is busy, don't lower its power level
2197 2202 */
2198 2203 if (mpt->m_busy && (mpt->m_power_level > level)) {
2199 2204 mutex_exit(&mpt->m_mutex);
2200 2205 return (DDI_FAILURE);
2201 2206 }
2202 2207 switch (level) {
2203 2208 case PM_LEVEL_D0:
2204 2209 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2205 2210 MPTSAS_POWER_ON(mpt);
2206 2211 /*
2207 2212 * Wait up to 30 seconds for IOC to come out of reset.
2208 2213 */
2209 2214 while (((ioc_status = ddi_get32(mpt->m_datap,
2210 2215 &mpt->m_reg->Doorbell)) &
2211 2216 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2212 2217 if (polls++ > 3000) {
2213 2218 break;
2214 2219 }
2215 2220 delay(drv_usectohz(10000));
2216 2221 }
2217 2222 /*
2218 2223 * If IOC is not in operational state, try to hard reset it.
2219 2224 */
2220 2225 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2221 2226 MPI2_IOC_STATE_OPERATIONAL) {
2222 2227 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2223 2228 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2224 2229 mptsas_log(mpt, CE_WARN,
2225 2230 "mptsas_power: hard reset failed");
2226 2231 mutex_exit(&mpt->m_mutex);
2227 2232 return (DDI_FAILURE);
2228 2233 }
2229 2234 }
2230 2235 mpt->m_power_level = PM_LEVEL_D0;
2231 2236 break;
2232 2237 case PM_LEVEL_D3:
2233 2238 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2234 2239 MPTSAS_POWER_OFF(mpt);
2235 2240 break;
2236 2241 default:
2237 2242 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2238 2243 mpt->m_instance, level);
2239 2244 rval = DDI_FAILURE;
2240 2245 break;
2241 2246 }
2242 2247 mutex_exit(&mpt->m_mutex);
2243 2248 return (rval);
2244 2249 }
2245 2250
2246 2251 /*
2247 2252 * Initialize configuration space and figure out which
2248 2253 * chip and revison of the chip the mpt driver is using.
2249 2254 */
2250 2255 static int
2251 2256 mptsas_config_space_init(mptsas_t *mpt)
2252 2257 {
2253 2258 NDBG0(("mptsas_config_space_init"));
2254 2259
2255 2260 if (mpt->m_config_handle != NULL)
2256 2261 return (TRUE);
2257 2262
2258 2263 if (pci_config_setup(mpt->m_dip,
2259 2264 &mpt->m_config_handle) != DDI_SUCCESS) {
2260 2265 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2261 2266 return (FALSE);
2262 2267 }
2263 2268
2264 2269 /*
2265 2270 * This is a workaround for a XMITS ASIC bug which does not
2266 2271 * drive the CBE upper bits.
2267 2272 */
2268 2273 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2269 2274 PCI_STAT_PERROR) {
2270 2275 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2271 2276 PCI_STAT_PERROR);
2272 2277 }
2273 2278
2274 2279 mptsas_setup_cmd_reg(mpt);
2275 2280
2276 2281 /*
2277 2282 * Get the chip device id:
2278 2283 */
2279 2284 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2280 2285
2281 2286 /*
2282 2287 * Save the revision.
2283 2288 */
2284 2289 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2285 2290
2286 2291 /*
2287 2292 * Save the SubSystem Vendor and Device IDs
2288 2293 */
2289 2294 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2290 2295 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2291 2296
2292 2297 /*
2293 2298 * Set the latency timer to 0x40 as specified by the upa -> pci
2294 2299 * bridge chip design team. This may be done by the sparc pci
2295 2300 * bus nexus driver, but the driver should make sure the latency
2296 2301 * timer is correct for performance reasons.
2297 2302 */
2298 2303 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2299 2304 MPTSAS_LATENCY_TIMER);
2300 2305
2301 2306 (void) mptsas_get_pci_cap(mpt);
2302 2307 return (TRUE);
2303 2308 }
2304 2309
2305 2310 static void
2306 2311 mptsas_config_space_fini(mptsas_t *mpt)
2307 2312 {
2308 2313 if (mpt->m_config_handle != NULL) {
2309 2314 mptsas_disable_bus_master(mpt);
2310 2315 pci_config_teardown(&mpt->m_config_handle);
2311 2316 mpt->m_config_handle = NULL;
2312 2317 }
2313 2318 }
2314 2319
2315 2320 static void
2316 2321 mptsas_setup_cmd_reg(mptsas_t *mpt)
2317 2322 {
2318 2323 ushort_t cmdreg;
2319 2324
2320 2325 /*
2321 2326 * Set the command register to the needed values.
2322 2327 */
2323 2328 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2324 2329 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2325 2330 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2326 2331 cmdreg &= ~PCI_COMM_IO;
2327 2332 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2328 2333 }
2329 2334
2330 2335 static void
2331 2336 mptsas_disable_bus_master(mptsas_t *mpt)
2332 2337 {
2333 2338 ushort_t cmdreg;
2334 2339
2335 2340 /*
2336 2341 * Clear the master enable bit in the PCI command register.
2337 2342 * This prevents any bus mastering activity like DMA.
2338 2343 */
2339 2344 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2340 2345 cmdreg &= ~PCI_COMM_ME;
2341 2346 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2342 2347 }
2343 2348
2344 2349 int
2345 2350 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2346 2351 {
2347 2352 ddi_dma_attr_t attrs;
2348 2353
2349 2354 attrs = mpt->m_io_dma_attr;
2350 2355 attrs.dma_attr_sgllen = 1;
2351 2356
2352 2357 ASSERT(dma_statep != NULL);
2353 2358
2354 2359 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2355 2360 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2356 2361 &dma_statep->cookie) == FALSE) {
2357 2362 return (DDI_FAILURE);
2358 2363 }
2359 2364
2360 2365 return (DDI_SUCCESS);
2361 2366 }
2362 2367
2363 2368 void
2364 2369 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2365 2370 {
2366 2371 ASSERT(dma_statep != NULL);
2367 2372 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2368 2373 dma_statep->size = 0;
2369 2374 }
2370 2375
2371 2376 int
2372 2377 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2373 2378 {
2374 2379 ddi_dma_attr_t attrs;
2375 2380 ddi_dma_handle_t dma_handle;
2376 2381 caddr_t memp;
2377 2382 ddi_acc_handle_t accessp;
2378 2383 int rval;
2379 2384
2380 2385 ASSERT(mutex_owned(&mpt->m_mutex));
2381 2386
2382 2387 attrs = mpt->m_msg_dma_attr;
2383 2388 attrs.dma_attr_sgllen = 1;
2384 2389 attrs.dma_attr_granular = size;
2385 2390
2386 2391 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2387 2392 &accessp, &memp, size, NULL) == FALSE) {
2388 2393 return (DDI_FAILURE);
2389 2394 }
2390 2395
2391 2396 rval = (*callback) (mpt, memp, var, accessp);
2392 2397
2393 2398 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2394 2399 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2395 2400 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2396 2401 rval = DDI_FAILURE;
2397 2402 }
2398 2403
2399 2404 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2400 2405 return (rval);
2401 2406
2402 2407 }
2403 2408
2404 2409 static int
2405 2410 mptsas_alloc_request_frames(mptsas_t *mpt)
2406 2411 {
2407 2412 ddi_dma_attr_t frame_dma_attrs;
2408 2413 caddr_t memp;
2409 2414 ddi_dma_cookie_t cookie;
2410 2415 size_t mem_size;
2411 2416
2412 2417 /*
2413 2418 * re-alloc when it has already alloced
2414 2419 */
2415 2420 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2416 2421 &mpt->m_acc_req_frame_hdl);
2417 2422
2418 2423 /*
2419 2424 * The size of the request frame pool is:
2420 2425 * Number of Request Frames * Request Frame Size
2421 2426 */
2422 2427 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2423 2428
2424 2429 /*
2425 2430 * set the DMA attributes. System Request Message Frames must be
2426 2431 * aligned on a 16-byte boundry.
2427 2432 */
2428 2433 frame_dma_attrs = mpt->m_msg_dma_attr;
2429 2434 frame_dma_attrs.dma_attr_align = 16;
2430 2435 frame_dma_attrs.dma_attr_sgllen = 1;
2431 2436
2432 2437 /*
2433 2438 * allocate the request frame pool.
2434 2439 */
2435 2440 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2436 2441 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2437 2442 mem_size, &cookie) == FALSE) {
2438 2443 return (DDI_FAILURE);
2439 2444 }
2440 2445
2441 2446 /*
2442 2447 * Store the request frame memory address. This chip uses this
2443 2448 * address to dma to and from the driver's frame. The second
2444 2449 * address is the address mpt uses to fill in the frame.
2445 2450 */
2446 2451 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2447 2452 mpt->m_req_frame = memp;
2448 2453
2449 2454 /*
2450 2455 * Clear the request frame pool.
2451 2456 */
2452 2457 bzero(mpt->m_req_frame, mem_size);
2453 2458
2454 2459 return (DDI_SUCCESS);
2455 2460 }
2456 2461
2457 2462 static int
2458 2463 mptsas_alloc_reply_frames(mptsas_t *mpt)
2459 2464 {
2460 2465 ddi_dma_attr_t frame_dma_attrs;
2461 2466 caddr_t memp;
2462 2467 ddi_dma_cookie_t cookie;
2463 2468 size_t mem_size;
2464 2469
2465 2470 /*
2466 2471 * re-alloc when it has already alloced
2467 2472 */
2468 2473 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2469 2474 &mpt->m_acc_reply_frame_hdl);
2470 2475
2471 2476 /*
2472 2477 * The size of the reply frame pool is:
2473 2478 * Number of Reply Frames * Reply Frame Size
2474 2479 */
2475 2480 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2476 2481
2477 2482 /*
2478 2483 * set the DMA attributes. System Reply Message Frames must be
2479 2484 * aligned on a 4-byte boundry. This is the default.
2480 2485 */
2481 2486 frame_dma_attrs = mpt->m_msg_dma_attr;
2482 2487 frame_dma_attrs.dma_attr_sgllen = 1;
2483 2488
2484 2489 /*
2485 2490 * allocate the reply frame pool
2486 2491 */
2487 2492 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2488 2493 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2489 2494 mem_size, &cookie) == FALSE) {
2490 2495 return (DDI_FAILURE);
2491 2496 }
2492 2497
2493 2498 /*
2494 2499 * Store the reply frame memory address. This chip uses this
2495 2500 * address to dma to and from the driver's frame. The second
2496 2501 * address is the address mpt uses to process the frame.
2497 2502 */
2498 2503 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2499 2504 mpt->m_reply_frame = memp;
2500 2505
2501 2506 /*
2502 2507 * Clear the reply frame pool.
2503 2508 */
2504 2509 bzero(mpt->m_reply_frame, mem_size);
2505 2510
2506 2511 return (DDI_SUCCESS);
2507 2512 }
2508 2513
2509 2514 static int
2510 2515 mptsas_alloc_free_queue(mptsas_t *mpt)
2511 2516 {
2512 2517 ddi_dma_attr_t frame_dma_attrs;
2513 2518 caddr_t memp;
2514 2519 ddi_dma_cookie_t cookie;
2515 2520 size_t mem_size;
2516 2521
2517 2522 /*
2518 2523 * re-alloc when it has already alloced
2519 2524 */
2520 2525 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2521 2526 &mpt->m_acc_free_queue_hdl);
2522 2527
2523 2528 /*
2524 2529 * The reply free queue size is:
2525 2530 * Reply Free Queue Depth * 4
2526 2531 * The "4" is the size of one 32 bit address (low part of 64-bit
2527 2532 * address)
2528 2533 */
2529 2534 mem_size = mpt->m_free_queue_depth * 4;
2530 2535
2531 2536 /*
2532 2537 * set the DMA attributes The Reply Free Queue must be aligned on a
2533 2538 * 16-byte boundry.
2534 2539 */
2535 2540 frame_dma_attrs = mpt->m_msg_dma_attr;
2536 2541 frame_dma_attrs.dma_attr_align = 16;
2537 2542 frame_dma_attrs.dma_attr_sgllen = 1;
2538 2543
2539 2544 /*
2540 2545 * allocate the reply free queue
2541 2546 */
2542 2547 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2543 2548 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2544 2549 mem_size, &cookie) == FALSE) {
2545 2550 return (DDI_FAILURE);
2546 2551 }
2547 2552
2548 2553 /*
2549 2554 * Store the reply free queue memory address. This chip uses this
2550 2555 * address to read from the reply free queue. The second address
2551 2556 * is the address mpt uses to manage the queue.
2552 2557 */
2553 2558 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2554 2559 mpt->m_free_queue = memp;
2555 2560
2556 2561 /*
2557 2562 * Clear the reply free queue memory.
2558 2563 */
2559 2564 bzero(mpt->m_free_queue, mem_size);
2560 2565
2561 2566 return (DDI_SUCCESS);
2562 2567 }
2563 2568
2564 2569 static int
2565 2570 mptsas_alloc_post_queue(mptsas_t *mpt)
2566 2571 {
2567 2572 ddi_dma_attr_t frame_dma_attrs;
2568 2573 caddr_t memp;
2569 2574 ddi_dma_cookie_t cookie;
2570 2575 size_t mem_size;
2571 2576
2572 2577 /*
2573 2578 * re-alloc when it has already alloced
2574 2579 */
2575 2580 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2576 2581 &mpt->m_acc_post_queue_hdl);
2577 2582
2578 2583 /*
2579 2584 * The reply descriptor post queue size is:
2580 2585 * Reply Descriptor Post Queue Depth * 8
2581 2586 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2582 2587 */
2583 2588 mem_size = mpt->m_post_queue_depth * 8;
2584 2589
2585 2590 /*
2586 2591 * set the DMA attributes. The Reply Descriptor Post Queue must be
2587 2592 * aligned on a 16-byte boundry.
2588 2593 */
2589 2594 frame_dma_attrs = mpt->m_msg_dma_attr;
2590 2595 frame_dma_attrs.dma_attr_align = 16;
2591 2596 frame_dma_attrs.dma_attr_sgllen = 1;
2592 2597
2593 2598 /*
2594 2599 * allocate the reply post queue
2595 2600 */
2596 2601 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2597 2602 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2598 2603 mem_size, &cookie) == FALSE) {
2599 2604 return (DDI_FAILURE);
2600 2605 }
2601 2606
2602 2607 /*
2603 2608 * Store the reply descriptor post queue memory address. This chip
2604 2609 * uses this address to write to the reply descriptor post queue. The
2605 2610 * second address is the address mpt uses to manage the queue.
2606 2611 */
2607 2612 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2608 2613 mpt->m_post_queue = memp;
2609 2614
2610 2615 /*
|
↓ open down ↓ |
2133 lines elided |
↑ open up ↑ |
2611 2616 * Clear the reply post queue memory.
2612 2617 */
2613 2618 bzero(mpt->m_post_queue, mem_size);
2614 2619
2615 2620 return (DDI_SUCCESS);
2616 2621 }
2617 2622
2618 2623 static void
2619 2624 mptsas_alloc_reply_args(mptsas_t *mpt)
2620 2625 {
2621 - if (mpt->m_replyh_args != NULL) {
2622 - kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2623 - * mpt->m_max_replies);
2624 - mpt->m_replyh_args = NULL;
2626 + if (mpt->m_replyh_args == NULL) {
2627 + mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2628 + mpt->m_max_replies, KM_SLEEP);
2625 2629 }
2626 - mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2627 - mpt->m_max_replies, KM_SLEEP);
2628 2630 }
2629 2631
2630 2632 static int
2631 2633 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2632 2634 {
2633 2635 mptsas_cache_frames_t *frames = NULL;
2634 2636 if (cmd->cmd_extra_frames == NULL) {
2635 2637 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2636 2638 if (frames == NULL) {
2637 2639 return (DDI_FAILURE);
2638 2640 }
2639 2641 cmd->cmd_extra_frames = frames;
2640 2642 }
2641 2643 return (DDI_SUCCESS);
2642 2644 }
2643 2645
2644 2646 static void
2645 2647 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2646 2648 {
2647 2649 if (cmd->cmd_extra_frames) {
2648 2650 kmem_cache_free(mpt->m_cache_frames,
2649 2651 (void *)cmd->cmd_extra_frames);
2650 2652 cmd->cmd_extra_frames = NULL;
2651 2653 }
2652 2654 }
2653 2655
2654 2656 static void
2655 2657 mptsas_cfg_fini(mptsas_t *mpt)
2656 2658 {
2657 2659 NDBG0(("mptsas_cfg_fini"));
2658 2660 ddi_regs_map_free(&mpt->m_datap);
2659 2661 }
2660 2662
2661 2663 static void
2662 2664 mptsas_hba_fini(mptsas_t *mpt)
2663 2665 {
2664 2666 NDBG0(("mptsas_hba_fini"));
2665 2667
2666 2668 /*
2667 2669 * Free up any allocated memory
2668 2670 */
2669 2671 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2670 2672 &mpt->m_acc_req_frame_hdl);
2671 2673
2672 2674 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2673 2675 &mpt->m_acc_reply_frame_hdl);
2674 2676
2675 2677 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2676 2678 &mpt->m_acc_free_queue_hdl);
2677 2679
2678 2680 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2679 2681 &mpt->m_acc_post_queue_hdl);
2680 2682
2681 2683 if (mpt->m_replyh_args != NULL) {
2682 2684 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2683 2685 * mpt->m_max_replies);
2684 2686 }
2685 2687 }
2686 2688
2687 2689 static int
2688 2690 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2689 2691 {
2690 2692 int lun = 0;
2691 2693 char *sas_wwn = NULL;
2692 2694 int phynum = -1;
2693 2695 int reallen = 0;
2694 2696
2695 2697 /* Get the target num */
2696 2698 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2697 2699 LUN_PROP, 0);
2698 2700
2699 2701 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2700 2702 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2701 2703 /*
2702 2704 * Stick in the address of form "pPHY,LUN"
2703 2705 */
2704 2706 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2705 2707 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2706 2708 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2707 2709 == DDI_PROP_SUCCESS) {
2708 2710 /*
2709 2711 * Stick in the address of the form "wWWN,LUN"
2710 2712 */
2711 2713 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2712 2714 ddi_prop_free(sas_wwn);
2713 2715 } else {
2714 2716 return (DDI_FAILURE);
2715 2717 }
2716 2718
2717 2719 ASSERT(reallen < len);
2718 2720 if (reallen >= len) {
2719 2721 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2720 2722 "length too small, it needs to be %d bytes", reallen + 1);
2721 2723 }
2722 2724 return (DDI_SUCCESS);
2723 2725 }
2724 2726
2725 2727 /*
2726 2728 * tran_tgt_init(9E) - target device instance initialization
2727 2729 */
2728 2730 static int
2729 2731 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2730 2732 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2731 2733 {
2732 2734 #ifndef __lock_lint
2733 2735 _NOTE(ARGUNUSED(hba_tran))
2734 2736 #endif
2735 2737
2736 2738 /*
2737 2739 * At this point, the scsi_device structure already exists
2738 2740 * and has been initialized.
2739 2741 *
2740 2742 * Use this function to allocate target-private data structures,
2741 2743 * if needed by this HBA. Add revised flow-control and queue
2742 2744 * properties for child here, if desired and if you can tell they
2743 2745 * support tagged queueing by now.
2744 2746 */
2745 2747 mptsas_t *mpt;
2746 2748 int lun = sd->sd_address.a_lun;
2747 2749 mdi_pathinfo_t *pip = NULL;
2748 2750 mptsas_tgt_private_t *tgt_private = NULL;
2749 2751 mptsas_target_t *ptgt = NULL;
2750 2752 char *psas_wwn = NULL;
2751 2753 int phymask = 0;
2752 2754 uint64_t sas_wwn = 0;
2753 2755 mpt = SDEV2MPT(sd);
2754 2756
2755 2757 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2756 2758
2757 2759 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2758 2760 (void *)hba_dip, (void *)tgt_dip, lun));
2759 2761
2760 2762 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2761 2763 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2762 2764 ddi_set_name_addr(tgt_dip, NULL);
2763 2765 return (DDI_FAILURE);
2764 2766 }
2765 2767 /*
2766 2768 * phymask is 0 means the virtual port for RAID
2767 2769 */
2768 2770 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2769 2771 "phymask", 0);
2770 2772 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2771 2773 if ((pip = (void *)(sd->sd_private)) == NULL) {
2772 2774 /*
2773 2775 * Very bad news if this occurs. Somehow scsi_vhci has
2774 2776 * lost the pathinfo node for this target.
2775 2777 */
2776 2778 return (DDI_NOT_WELL_FORMED);
2777 2779 }
2778 2780
2779 2781 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2780 2782 DDI_PROP_SUCCESS) {
2781 2783 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2782 2784 return (DDI_FAILURE);
2783 2785 }
2784 2786
2785 2787 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2786 2788 &psas_wwn) == MDI_SUCCESS) {
2787 2789 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2788 2790 sas_wwn = 0;
2789 2791 }
2790 2792 (void) mdi_prop_free(psas_wwn);
2791 2793 }
2792 2794 } else {
2793 2795 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2794 2796 DDI_PROP_DONTPASS, LUN_PROP, 0);
2795 2797 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2796 2798 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2797 2799 DDI_PROP_SUCCESS) {
2798 2800 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2799 2801 sas_wwn = 0;
2800 2802 }
2801 2803 ddi_prop_free(psas_wwn);
2802 2804 } else {
2803 2805 sas_wwn = 0;
2804 2806 }
2805 2807 }
2806 2808 ASSERT((sas_wwn != 0) || (phymask != 0));
2807 2809 mutex_enter(&mpt->m_mutex);
2808 2810 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2809 2811 mutex_exit(&mpt->m_mutex);
2810 2812 if (ptgt == NULL) {
2811 2813 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2812 2814 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2813 2815 sas_wwn);
2814 2816 return (DDI_FAILURE);
2815 2817 }
2816 2818 if (hba_tran->tran_tgt_private == NULL) {
2817 2819 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2818 2820 KM_SLEEP);
2819 2821 tgt_private->t_lun = lun;
2820 2822 tgt_private->t_private = ptgt;
2821 2823 hba_tran->tran_tgt_private = tgt_private;
2822 2824 }
2823 2825
2824 2826 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2825 2827 return (DDI_SUCCESS);
2826 2828 }
2827 2829 mutex_enter(&mpt->m_mutex);
2828 2830
2829 2831 if (ptgt->m_deviceinfo &
2830 2832 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2831 2833 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2832 2834 uchar_t *inq89 = NULL;
2833 2835 int inq89_len = 0x238;
2834 2836 int reallen = 0;
2835 2837 int rval = 0;
2836 2838 struct sata_id *sid = NULL;
2837 2839 char model[SATA_ID_MODEL_LEN + 1];
2838 2840 char fw[SATA_ID_FW_LEN + 1];
2839 2841 char *vid, *pid;
2840 2842 int i;
2841 2843
2842 2844 mutex_exit(&mpt->m_mutex);
2843 2845 /*
2844 2846 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2845 2847 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2846 2848 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2847 2849 */
2848 2850 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2849 2851 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2850 2852 inq89, inq89_len, &reallen, 1);
2851 2853
2852 2854 if (rval != 0) {
2853 2855 if (inq89 != NULL) {
2854 2856 kmem_free(inq89, inq89_len);
2855 2857 }
2856 2858
2857 2859 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2858 2860 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2859 2861 return (DDI_SUCCESS);
2860 2862 }
2861 2863 sid = (void *)(&inq89[60]);
2862 2864
2863 2865 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2864 2866 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2865 2867
2866 2868 model[SATA_ID_MODEL_LEN] = 0;
2867 2869 fw[SATA_ID_FW_LEN] = 0;
2868 2870
2869 2871 /*
2870 2872 * split model into into vid/pid
2871 2873 */
2872 2874 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2873 2875 if ((*pid == ' ') || (*pid == '\t'))
2874 2876 break;
2875 2877 if (i < SATA_ID_MODEL_LEN) {
2876 2878 vid = model;
2877 2879 /*
2878 2880 * terminate vid, establish pid
2879 2881 */
2880 2882 *pid++ = 0;
2881 2883 } else {
2882 2884 /*
2883 2885 * vid will stay "ATA ", the rule is same
2884 2886 * as sata framework implementation.
2885 2887 */
2886 2888 vid = NULL;
2887 2889 /*
2888 2890 * model is all pid
2889 2891 */
2890 2892 pid = model;
2891 2893 }
2892 2894
2893 2895 /*
2894 2896 * override SCSA "inquiry-*" properties
2895 2897 */
2896 2898 if (vid)
2897 2899 (void) scsi_device_prop_update_inqstring(sd,
2898 2900 INQUIRY_VENDOR_ID, vid, strlen(vid));
2899 2901 if (pid)
2900 2902 (void) scsi_device_prop_update_inqstring(sd,
2901 2903 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2902 2904 (void) scsi_device_prop_update_inqstring(sd,
2903 2905 INQUIRY_REVISION_ID, fw, strlen(fw));
2904 2906
2905 2907 if (inq89 != NULL) {
2906 2908 kmem_free(inq89, inq89_len);
2907 2909 }
2908 2910 } else {
2909 2911 mutex_exit(&mpt->m_mutex);
2910 2912 }
2911 2913
2912 2914 return (DDI_SUCCESS);
2913 2915 }
2914 2916 /*
2915 2917 * tran_tgt_free(9E) - target device instance deallocation
2916 2918 */
2917 2919 static void
2918 2920 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2919 2921 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2920 2922 {
2921 2923 #ifndef __lock_lint
2922 2924 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2923 2925 #endif
2924 2926
2925 2927 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2926 2928
2927 2929 if (tgt_private != NULL) {
2928 2930 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2929 2931 hba_tran->tran_tgt_private = NULL;
2930 2932 }
2931 2933 }
2932 2934
2933 2935 /*
2934 2936 * scsi_pkt handling
2935 2937 *
2936 2938 * Visible to the external world via the transport structure.
2937 2939 */
2938 2940
2939 2941 /*
2940 2942 * Notes:
2941 2943 * - transport the command to the addressed SCSI target/lun device
2942 2944 * - normal operation is to schedule the command to be transported,
2943 2945 * and return TRAN_ACCEPT if this is successful.
2944 2946 * - if NO_INTR, tran_start must poll device for command completion
2945 2947 */
2946 2948 static int
2947 2949 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2948 2950 {
2949 2951 #ifndef __lock_lint
2950 2952 _NOTE(ARGUNUSED(ap))
2951 2953 #endif
2952 2954 mptsas_t *mpt = PKT2MPT(pkt);
2953 2955 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2954 2956 int rval;
2955 2957 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2956 2958
2957 2959 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2958 2960 ASSERT(ptgt);
2959 2961 if (ptgt == NULL)
2960 2962 return (TRAN_FATAL_ERROR);
2961 2963
2962 2964 /*
2963 2965 * prepare the pkt before taking mutex.
2964 2966 */
2965 2967 rval = mptsas_prepare_pkt(cmd);
2966 2968 if (rval != TRAN_ACCEPT) {
2967 2969 return (rval);
2968 2970 }
2969 2971
2970 2972 /*
2971 2973 * Send the command to target/lun, however your HBA requires it.
2972 2974 * If busy, return TRAN_BUSY; if there's some other formatting error
2973 2975 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2974 2976 * return of TRAN_ACCEPT.
2975 2977 *
2976 2978 * Remember that access to shared resources, including the mptsas_t
2977 2979 * data structure and the HBA hardware registers, must be protected
2978 2980 * with mutexes, here and everywhere.
2979 2981 *
2980 2982 * Also remember that at interrupt time, you'll get an argument
2981 2983 * to the interrupt handler which is a pointer to your mptsas_t
2982 2984 * structure; you'll have to remember which commands are outstanding
2983 2985 * and which scsi_pkt is the currently-running command so the
2984 2986 * interrupt handler can refer to the pkt to set completion
2985 2987 * status, call the target driver back through pkt_comp, etc.
2986 2988 *
2987 2989 * If the instance lock is held by other thread, don't spin to wait
2988 2990 * for it. Instead, queue the cmd and next time when the instance lock
2989 2991 * is not held, accept all the queued cmd. A extra tx_waitq is
2990 2992 * introduced to protect the queue.
2991 2993 *
2992 2994 * The polled cmd will not be queud and accepted as usual.
2993 2995 *
2994 2996 * Under the tx_waitq mutex, record whether a thread is draining
2995 2997 * the tx_waitq. An IO requesting thread that finds the instance
2996 2998 * mutex contended appends to the tx_waitq and while holding the
2997 2999 * tx_wait mutex, if the draining flag is not set, sets it and then
2998 3000 * proceeds to spin for the instance mutex. This scheme ensures that
2999 3001 * the last cmd in a burst be processed.
3000 3002 *
3001 3003 * we enable this feature only when the helper threads are enabled,
3002 3004 * at which we think the loads are heavy.
3003 3005 *
3004 3006 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3005 3007 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3006 3008 */
3007 3009
3008 3010 if (mpt->m_doneq_thread_n) {
3009 3011 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3010 3012 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3011 3013 mutex_exit(&mpt->m_mutex);
3012 3014 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3013 3015 mutex_enter(&mpt->m_mutex);
3014 3016 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3015 3017 mutex_exit(&mpt->m_mutex);
3016 3018 } else {
3017 3019 mutex_enter(&mpt->m_tx_waitq_mutex);
3018 3020 /*
3019 3021 * ptgt->m_dr_flag is protected by m_mutex or
3020 3022 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3021 3023 * is acquired.
3022 3024 */
3023 3025 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3024 3026 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3025 3027 /*
3026 3028 * The command should be allowed to
3027 3029 * retry by returning TRAN_BUSY to
3028 3030 * to stall the I/O's which come from
3029 3031 * scsi_vhci since the device/path is
3030 3032 * in unstable state now.
3031 3033 */
3032 3034 mutex_exit(&mpt->m_tx_waitq_mutex);
3033 3035 return (TRAN_BUSY);
3034 3036 } else {
3035 3037 /*
3036 3038 * The device is offline, just fail the
3037 3039 * command by returning
3038 3040 * TRAN_FATAL_ERROR.
3039 3041 */
3040 3042 mutex_exit(&mpt->m_tx_waitq_mutex);
3041 3043 return (TRAN_FATAL_ERROR);
3042 3044 }
3043 3045 }
3044 3046 if (mpt->m_tx_draining) {
3045 3047 cmd->cmd_flags |= CFLAG_TXQ;
3046 3048 *mpt->m_tx_waitqtail = cmd;
3047 3049 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3048 3050 mutex_exit(&mpt->m_tx_waitq_mutex);
3049 3051 } else { /* drain the queue */
3050 3052 mpt->m_tx_draining = 1;
3051 3053 mutex_exit(&mpt->m_tx_waitq_mutex);
3052 3054 mutex_enter(&mpt->m_mutex);
3053 3055 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3054 3056 mutex_exit(&mpt->m_mutex);
3055 3057 }
3056 3058 }
3057 3059 } else {
3058 3060 mutex_enter(&mpt->m_mutex);
3059 3061 /*
3060 3062 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3061 3063 * in this case, m_mutex is acquired.
3062 3064 */
3063 3065 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3064 3066 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3065 3067 /*
3066 3068 * commands should be allowed to retry by
3067 3069 * returning TRAN_BUSY to stall the I/O's
3068 3070 * which come from scsi_vhci since the device/
3069 3071 * path is in unstable state now.
3070 3072 */
3071 3073 mutex_exit(&mpt->m_mutex);
3072 3074 return (TRAN_BUSY);
3073 3075 } else {
3074 3076 /*
3075 3077 * The device is offline, just fail the
3076 3078 * command by returning TRAN_FATAL_ERROR.
3077 3079 */
3078 3080 mutex_exit(&mpt->m_mutex);
3079 3081 return (TRAN_FATAL_ERROR);
3080 3082 }
3081 3083 }
3082 3084 rval = mptsas_accept_pkt(mpt, cmd);
3083 3085 mutex_exit(&mpt->m_mutex);
3084 3086 }
3085 3087
3086 3088 return (rval);
3087 3089 }
3088 3090
3089 3091 /*
3090 3092 * Accept all the queued cmds(if any) before accept the current one.
3091 3093 */
3092 3094 static int
3093 3095 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3094 3096 {
3095 3097 int rval;
3096 3098 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3097 3099
3098 3100 ASSERT(mutex_owned(&mpt->m_mutex));
3099 3101 /*
3100 3102 * The call to mptsas_accept_tx_waitq() must always be performed
3101 3103 * because that is where mpt->m_tx_draining is cleared.
3102 3104 */
3103 3105 mutex_enter(&mpt->m_tx_waitq_mutex);
3104 3106 mptsas_accept_tx_waitq(mpt);
3105 3107 mutex_exit(&mpt->m_tx_waitq_mutex);
3106 3108 /*
3107 3109 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3108 3110 * in this case, m_mutex is acquired.
3109 3111 */
3110 3112 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3111 3113 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3112 3114 /*
3113 3115 * The command should be allowed to retry by returning
3114 3116 * TRAN_BUSY to stall the I/O's which come from
3115 3117 * scsi_vhci since the device/path is in unstable state
3116 3118 * now.
3117 3119 */
3118 3120 return (TRAN_BUSY);
3119 3121 } else {
3120 3122 /*
3121 3123 * The device is offline, just fail the command by
3122 3124 * return TRAN_FATAL_ERROR.
3123 3125 */
3124 3126 return (TRAN_FATAL_ERROR);
3125 3127 }
3126 3128 }
3127 3129 rval = mptsas_accept_pkt(mpt, cmd);
3128 3130
3129 3131 return (rval);
3130 3132 }
3131 3133
3132 3134 static int
3133 3135 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3134 3136 {
3135 3137 int rval = TRAN_ACCEPT;
3136 3138 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3137 3139
3138 3140 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3139 3141
3140 3142 ASSERT(mutex_owned(&mpt->m_mutex));
3141 3143
3142 3144 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3143 3145 rval = mptsas_prepare_pkt(cmd);
3144 3146 if (rval != TRAN_ACCEPT) {
3145 3147 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3146 3148 return (rval);
3147 3149 }
3148 3150 }
3149 3151
3150 3152 /*
3151 3153 * reset the throttle if we were draining
3152 3154 */
3153 3155 if ((ptgt->m_t_ncmds == 0) &&
3154 3156 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3155 3157 NDBG23(("reset throttle"));
3156 3158 ASSERT(ptgt->m_reset_delay == 0);
3157 3159 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3158 3160 }
3159 3161
3160 3162 /*
3161 3163 * If HBA is being reset, the DevHandles are being re-initialized,
3162 3164 * which means that they could be invalid even if the target is still
3163 3165 * attached. Check if being reset and if DevHandle is being
3164 3166 * re-initialized. If this is the case, return BUSY so the I/O can be
3165 3167 * retried later.
3166 3168 */
3167 3169 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3168 3170 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3169 3171 if (cmd->cmd_flags & CFLAG_TXQ) {
3170 3172 mptsas_doneq_add(mpt, cmd);
3171 3173 mptsas_doneq_empty(mpt);
3172 3174 return (rval);
3173 3175 } else {
3174 3176 return (TRAN_BUSY);
3175 3177 }
3176 3178 }
3177 3179
3178 3180 /*
3179 3181 * If device handle has already been invalidated, just
3180 3182 * fail the command. In theory, command from scsi_vhci
3181 3183 * client is impossible send down command with invalid
3182 3184 * devhdl since devhdl is set after path offline, target
3183 3185 * driver is not suppose to select a offlined path.
3184 3186 */
3185 3187 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3186 3188 NDBG20(("rejecting command, it might because invalid devhdl "
3187 3189 "request."));
3188 3190 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3189 3191 if (cmd->cmd_flags & CFLAG_TXQ) {
3190 3192 mptsas_doneq_add(mpt, cmd);
3191 3193 mptsas_doneq_empty(mpt);
3192 3194 return (rval);
3193 3195 } else {
3194 3196 return (TRAN_FATAL_ERROR);
3195 3197 }
3196 3198 }
3197 3199 /*
3198 3200 * The first case is the normal case. mpt gets a command from the
3199 3201 * target driver and starts it.
3200 3202 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3201 3203 * commands is m_max_requests - 2.
3202 3204 */
3203 3205 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3204 3206 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3205 3207 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3206 3208 (ptgt->m_reset_delay == 0) &&
3207 3209 (ptgt->m_t_nwait == 0) &&
3208 3210 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3209 3211 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3210 3212 (void) mptsas_start_cmd(mpt, cmd);
3211 3213 } else {
3212 3214 mptsas_waitq_add(mpt, cmd);
3213 3215 }
3214 3216 } else {
3215 3217 /*
3216 3218 * Add this pkt to the work queue
3217 3219 */
3218 3220 mptsas_waitq_add(mpt, cmd);
3219 3221
3220 3222 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3221 3223 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3222 3224
3223 3225 /*
3224 3226 * Only flush the doneq if this is not a TM
3225 3227 * cmd. For TM cmds the flushing of the
3226 3228 * doneq will be done in those routines.
3227 3229 */
3228 3230 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3229 3231 mptsas_doneq_empty(mpt);
3230 3232 }
3231 3233 }
3232 3234 }
3233 3235 return (rval);
3234 3236 }
3235 3237
3236 3238 int
3237 3239 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3238 3240 {
3239 3241 mptsas_slots_t *slots;
3240 3242 int slot;
3241 3243 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3242 3244
3243 3245 ASSERT(mutex_owned(&mpt->m_mutex));
3244 3246 slots = mpt->m_active;
3245 3247
3246 3248 /*
3247 3249 * Account for reserved TM request slot and reserved SMID of 0.
3248 3250 */
3249 3251 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3250 3252
3251 3253 /*
3252 3254 * m_tags is equivalent to the SMID when sending requests. Since the
3253 3255 * SMID cannot be 0, start out at one if rolling over past the size
3254 3256 * of the request queue depth. Also, don't use the last SMID, which is
3255 3257 * reserved for TM requests.
3256 3258 */
3257 3259 slot = (slots->m_tags)++;
3258 3260 if (slots->m_tags > slots->m_n_slots) {
3259 3261 slots->m_tags = 1;
3260 3262 }
3261 3263
3262 3264 alloc_tag:
3263 3265 /* Validate tag, should never fail. */
3264 3266 if (slots->m_slot[slot] == NULL) {
3265 3267 /*
3266 3268 * Make sure SMID is not using reserved value of 0
3267 3269 * and the TM request slot.
3268 3270 */
3269 3271 ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3270 3272 cmd->cmd_slot = slot;
3271 3273 slots->m_slot[slot] = cmd;
3272 3274 mpt->m_ncmds++;
3273 3275
3274 3276 /*
3275 3277 * only increment per target ncmds if this is not a
3276 3278 * command that has no target associated with it (i.e. a
3277 3279 * event acknoledgment)
3278 3280 */
3279 3281 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3280 3282 ptgt->m_t_ncmds++;
3281 3283 }
3282 3284 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3283 3285
3284 3286 /*
3285 3287 * If initial timout is less than or equal to one tick, bump
3286 3288 * the timeout by a tick so that command doesn't timeout before
3287 3289 * its allotted time.
3288 3290 */
3289 3291 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3290 3292 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3291 3293 }
3292 3294 return (TRUE);
3293 3295 } else {
3294 3296 int i;
3295 3297
3296 3298 /*
3297 3299 * If slot in use, scan until a free one is found. Don't use 0
3298 3300 * or final slot, which is reserved for TM requests.
3299 3301 */
3300 3302 for (i = 0; i < slots->m_n_slots; i++) {
3301 3303 slot = slots->m_tags;
3302 3304 if (++(slots->m_tags) > slots->m_n_slots) {
3303 3305 slots->m_tags = 1;
3304 3306 }
3305 3307 if (slots->m_slot[slot] == NULL) {
3306 3308 NDBG22(("found free slot %d", slot));
3307 3309 goto alloc_tag;
3308 3310 }
3309 3311 }
3310 3312 }
3311 3313 return (FALSE);
3312 3314 }
3313 3315
3314 3316 /*
3315 3317 * prepare the pkt:
3316 3318 * the pkt may have been resubmitted or just reused so
3317 3319 * initialize some fields and do some checks.
3318 3320 */
3319 3321 static int
3320 3322 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3321 3323 {
3322 3324 struct scsi_pkt *pkt = CMD2PKT(cmd);
3323 3325
3324 3326 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3325 3327
3326 3328 /*
3327 3329 * Reinitialize some fields that need it; the packet may
3328 3330 * have been resubmitted
3329 3331 */
3330 3332 pkt->pkt_reason = CMD_CMPLT;
3331 3333 pkt->pkt_state = 0;
3332 3334 pkt->pkt_statistics = 0;
3333 3335 pkt->pkt_resid = 0;
3334 3336 cmd->cmd_age = 0;
3335 3337 cmd->cmd_pkt_flags = pkt->pkt_flags;
3336 3338
3337 3339 /*
3338 3340 * zero status byte.
3339 3341 */
3340 3342 *(pkt->pkt_scbp) = 0;
3341 3343
3342 3344 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3343 3345 pkt->pkt_resid = cmd->cmd_dmacount;
3344 3346
3345 3347 /*
3346 3348 * consistent packets need to be sync'ed first
3347 3349 * (only for data going out)
3348 3350 */
3349 3351 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3350 3352 (cmd->cmd_flags & CFLAG_DMASEND)) {
3351 3353 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3352 3354 DDI_DMA_SYNC_FORDEV);
3353 3355 }
3354 3356 }
3355 3357
3356 3358 cmd->cmd_flags =
3357 3359 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3358 3360 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3359 3361
3360 3362 return (TRAN_ACCEPT);
3361 3363 }
3362 3364
3363 3365 /*
3364 3366 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3365 3367 *
3366 3368 * One of three possibilities:
3367 3369 * - allocate scsi_pkt
3368 3370 * - allocate scsi_pkt and DMA resources
3369 3371 * - allocate DMA resources to an already-allocated pkt
3370 3372 */
3371 3373 static struct scsi_pkt *
3372 3374 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3373 3375 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3374 3376 int (*callback)(), caddr_t arg)
3375 3377 {
3376 3378 mptsas_cmd_t *cmd, *new_cmd;
3377 3379 mptsas_t *mpt = ADDR2MPT(ap);
3378 3380 int failure = 1;
3379 3381 uint_t oldcookiec;
3380 3382 mptsas_target_t *ptgt = NULL;
3381 3383 int rval;
3382 3384 mptsas_tgt_private_t *tgt_private;
3383 3385 int kf;
3384 3386
3385 3387 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3386 3388
3387 3389 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3388 3390 tran_tgt_private;
3389 3391 ASSERT(tgt_private != NULL);
3390 3392 if (tgt_private == NULL) {
3391 3393 return (NULL);
3392 3394 }
3393 3395 ptgt = tgt_private->t_private;
3394 3396 ASSERT(ptgt != NULL);
3395 3397 if (ptgt == NULL)
3396 3398 return (NULL);
3397 3399 ap->a_target = ptgt->m_devhdl;
3398 3400 ap->a_lun = tgt_private->t_lun;
3399 3401
3400 3402 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3401 3403 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3402 3404 statuslen *= 100; tgtlen *= 4;
3403 3405 #endif
3404 3406 NDBG3(("mptsas_scsi_init_pkt:\n"
3405 3407 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3406 3408 ap->a_target, (void *)pkt, (void *)bp,
3407 3409 cmdlen, statuslen, tgtlen, flags));
3408 3410
3409 3411 /*
3410 3412 * Allocate the new packet.
3411 3413 */
3412 3414 if (pkt == NULL) {
3413 3415 ddi_dma_handle_t save_dma_handle;
3414 3416 ddi_dma_handle_t save_arq_dma_handle;
3415 3417 struct buf *save_arq_bp;
3416 3418 ddi_dma_cookie_t save_arqcookie;
3417 3419
3418 3420 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3419 3421
3420 3422 if (cmd) {
3421 3423 save_dma_handle = cmd->cmd_dmahandle;
3422 3424 save_arq_dma_handle = cmd->cmd_arqhandle;
3423 3425 save_arq_bp = cmd->cmd_arq_buf;
3424 3426 save_arqcookie = cmd->cmd_arqcookie;
3425 3427 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3426 3428 cmd->cmd_dmahandle = save_dma_handle;
3427 3429 cmd->cmd_arqhandle = save_arq_dma_handle;
3428 3430 cmd->cmd_arq_buf = save_arq_bp;
3429 3431 cmd->cmd_arqcookie = save_arqcookie;
3430 3432
3431 3433 pkt = (void *)((uchar_t *)cmd +
3432 3434 sizeof (struct mptsas_cmd));
3433 3435 pkt->pkt_ha_private = (opaque_t)cmd;
3434 3436 pkt->pkt_address = *ap;
3435 3437 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3436 3438 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3437 3439 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3438 3440 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3439 3441 cmd->cmd_cdblen = (uchar_t)cmdlen;
3440 3442 cmd->cmd_scblen = statuslen;
3441 3443 cmd->cmd_rqslen = SENSE_LENGTH;
3442 3444 cmd->cmd_tgt_addr = ptgt;
3443 3445 failure = 0;
3444 3446 }
3445 3447
3446 3448 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3447 3449 (tgtlen > PKT_PRIV_LEN) ||
3448 3450 (statuslen > EXTCMDS_STATUS_SIZE)) {
3449 3451 if (failure == 0) {
3450 3452 /*
3451 3453 * if extern alloc fails, all will be
3452 3454 * deallocated, including cmd
3453 3455 */
3454 3456 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3455 3457 cmdlen, tgtlen, statuslen, kf);
3456 3458 }
3457 3459 if (failure) {
3458 3460 /*
3459 3461 * if extern allocation fails, it will
3460 3462 * deallocate the new pkt as well
3461 3463 */
3462 3464 return (NULL);
3463 3465 }
3464 3466 }
3465 3467 new_cmd = cmd;
3466 3468
3467 3469 } else {
3468 3470 cmd = PKT2CMD(pkt);
3469 3471 new_cmd = NULL;
3470 3472 }
3471 3473
3472 3474
3473 3475 /* grab cmd->cmd_cookiec here as oldcookiec */
3474 3476
3475 3477 oldcookiec = cmd->cmd_cookiec;
3476 3478
3477 3479 /*
3478 3480 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3479 3481 * greater than 0 and we'll need to grab the next dma window
3480 3482 */
3481 3483 /*
3482 3484 * SLM-not doing extra command frame right now; may add later
3483 3485 */
3484 3486
3485 3487 if (cmd->cmd_nwin > 0) {
3486 3488
3487 3489 /*
3488 3490 * Make sure we havn't gone past the the total number
3489 3491 * of windows
3490 3492 */
3491 3493 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3492 3494 return (NULL);
3493 3495 }
3494 3496 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3495 3497 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3496 3498 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3497 3499 return (NULL);
3498 3500 }
3499 3501 goto get_dma_cookies;
3500 3502 }
3501 3503
3502 3504
3503 3505 if (flags & PKT_XARQ) {
3504 3506 cmd->cmd_flags |= CFLAG_XARQ;
3505 3507 }
3506 3508
3507 3509 /*
3508 3510 * DMA resource allocation. This version assumes your
3509 3511 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3510 3512 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3511 3513 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3512 3514 */
3513 3515 if (bp && (bp->b_bcount != 0) &&
3514 3516 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3515 3517
3516 3518 int cnt, dma_flags;
3517 3519 mptti_t *dmap; /* ptr to the S/G list */
3518 3520
3519 3521 /*
3520 3522 * Set up DMA memory and position to the next DMA segment.
3521 3523 */
3522 3524 ASSERT(cmd->cmd_dmahandle != NULL);
3523 3525
3524 3526 if (bp->b_flags & B_READ) {
3525 3527 dma_flags = DDI_DMA_READ;
3526 3528 cmd->cmd_flags &= ~CFLAG_DMASEND;
3527 3529 } else {
3528 3530 dma_flags = DDI_DMA_WRITE;
3529 3531 cmd->cmd_flags |= CFLAG_DMASEND;
3530 3532 }
3531 3533 if (flags & PKT_CONSISTENT) {
3532 3534 cmd->cmd_flags |= CFLAG_CMDIOPB;
3533 3535 dma_flags |= DDI_DMA_CONSISTENT;
3534 3536 }
3535 3537
3536 3538 if (flags & PKT_DMA_PARTIAL) {
3537 3539 dma_flags |= DDI_DMA_PARTIAL;
3538 3540 }
3539 3541
3540 3542 /*
3541 3543 * workaround for byte hole issue on psycho and
3542 3544 * schizo pre 2.1
3543 3545 */
3544 3546 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3545 3547 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3546 3548 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3547 3549 dma_flags |= DDI_DMA_CONSISTENT;
3548 3550 }
3549 3551
3550 3552 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3551 3553 dma_flags, callback, arg,
3552 3554 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3553 3555 if (rval == DDI_DMA_PARTIAL_MAP) {
3554 3556 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3555 3557 &cmd->cmd_nwin);
3556 3558 cmd->cmd_winindex = 0;
3557 3559 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3558 3560 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3559 3561 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3560 3562 &cmd->cmd_cookiec);
3561 3563 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3562 3564 switch (rval) {
3563 3565 case DDI_DMA_NORESOURCES:
3564 3566 bioerror(bp, 0);
3565 3567 break;
3566 3568 case DDI_DMA_BADATTR:
3567 3569 case DDI_DMA_NOMAPPING:
3568 3570 bioerror(bp, EFAULT);
3569 3571 break;
3570 3572 case DDI_DMA_TOOBIG:
3571 3573 default:
3572 3574 bioerror(bp, EINVAL);
3573 3575 break;
3574 3576 }
3575 3577 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3576 3578 if (new_cmd) {
3577 3579 mptsas_scsi_destroy_pkt(ap, pkt);
3578 3580 }
3579 3581 return ((struct scsi_pkt *)NULL);
3580 3582 }
3581 3583
3582 3584 get_dma_cookies:
3583 3585 cmd->cmd_flags |= CFLAG_DMAVALID;
3584 3586 ASSERT(cmd->cmd_cookiec > 0);
3585 3587
3586 3588 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3587 3589 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3588 3590 cmd->cmd_cookiec);
3589 3591 bioerror(bp, EINVAL);
3590 3592 if (new_cmd) {
3591 3593 mptsas_scsi_destroy_pkt(ap, pkt);
3592 3594 }
3593 3595 return ((struct scsi_pkt *)NULL);
3594 3596 }
3595 3597
3596 3598 /*
3597 3599 * Allocate extra SGL buffer if needed.
3598 3600 */
3599 3601 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3600 3602 (cmd->cmd_extra_frames == NULL)) {
3601 3603 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3602 3604 DDI_FAILURE) {
3603 3605 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3604 3606 "failed");
3605 3607 bioerror(bp, ENOMEM);
3606 3608 if (new_cmd) {
3607 3609 mptsas_scsi_destroy_pkt(ap, pkt);
3608 3610 }
3609 3611 return ((struct scsi_pkt *)NULL);
3610 3612 }
3611 3613 }
3612 3614
3613 3615 /*
3614 3616 * Always use scatter-gather transfer
3615 3617 * Use the loop below to store physical addresses of
3616 3618 * DMA segments, from the DMA cookies, into your HBA's
3617 3619 * scatter-gather list.
3618 3620 * We need to ensure we have enough kmem alloc'd
3619 3621 * for the sg entries since we are no longer using an
3620 3622 * array inside mptsas_cmd_t.
3621 3623 *
3622 3624 * We check cmd->cmd_cookiec against oldcookiec so
3623 3625 * the scatter-gather list is correctly allocated
3624 3626 */
3625 3627
3626 3628 if (oldcookiec != cmd->cmd_cookiec) {
3627 3629 if (cmd->cmd_sg != (mptti_t *)NULL) {
3628 3630 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3629 3631 oldcookiec);
3630 3632 cmd->cmd_sg = NULL;
3631 3633 }
3632 3634 }
3633 3635
3634 3636 if (cmd->cmd_sg == (mptti_t *)NULL) {
3635 3637 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3636 3638 cmd->cmd_cookiec), kf);
3637 3639
3638 3640 if (cmd->cmd_sg == (mptti_t *)NULL) {
3639 3641 mptsas_log(mpt, CE_WARN,
3640 3642 "unable to kmem_alloc enough memory "
3641 3643 "for scatter/gather list");
3642 3644 /*
3643 3645 * if we have an ENOMEM condition we need to behave
3644 3646 * the same way as the rest of this routine
3645 3647 */
3646 3648
3647 3649 bioerror(bp, ENOMEM);
3648 3650 if (new_cmd) {
3649 3651 mptsas_scsi_destroy_pkt(ap, pkt);
3650 3652 }
3651 3653 return ((struct scsi_pkt *)NULL);
3652 3654 }
3653 3655 }
3654 3656
3655 3657 dmap = cmd->cmd_sg;
3656 3658
3657 3659 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3658 3660
3659 3661 /*
3660 3662 * store the first segment into the S/G list
3661 3663 */
3662 3664 dmap->count = cmd->cmd_cookie.dmac_size;
3663 3665 dmap->addr.address64.Low = (uint32_t)
3664 3666 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3665 3667 dmap->addr.address64.High = (uint32_t)
3666 3668 (cmd->cmd_cookie.dmac_laddress >> 32);
3667 3669
3668 3670 /*
3669 3671 * dmacount counts the size of the dma for this window
3670 3672 * (if partial dma is being used). totaldmacount
3671 3673 * keeps track of the total amount of dma we have
3672 3674 * transferred for all the windows (needed to calculate
3673 3675 * the resid value below).
3674 3676 */
3675 3677 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3676 3678 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3677 3679
3678 3680 /*
3679 3681 * We already stored the first DMA scatter gather segment,
3680 3682 * start at 1 if we need to store more.
3681 3683 */
3682 3684 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3683 3685 /*
3684 3686 * Get next DMA cookie
3685 3687 */
3686 3688 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3687 3689 &cmd->cmd_cookie);
3688 3690 dmap++;
3689 3691
3690 3692 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3691 3693 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3692 3694
3693 3695 /*
3694 3696 * store the segment parms into the S/G list
3695 3697 */
3696 3698 dmap->count = cmd->cmd_cookie.dmac_size;
3697 3699 dmap->addr.address64.Low = (uint32_t)
3698 3700 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3699 3701 dmap->addr.address64.High = (uint32_t)
3700 3702 (cmd->cmd_cookie.dmac_laddress >> 32);
3701 3703 }
3702 3704
3703 3705 /*
3704 3706 * If this was partially allocated we set the resid
3705 3707 * the amount of data NOT transferred in this window
3706 3708 * If there is only one window, the resid will be 0
3707 3709 */
3708 3710 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3709 3711 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3710 3712 }
3711 3713 return (pkt);
3712 3714 }
3713 3715
3714 3716 /*
3715 3717 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3716 3718 *
3717 3719 * Notes:
3718 3720 * - also frees DMA resources if allocated
3719 3721 * - implicit DMA synchonization
3720 3722 */
3721 3723 static void
3722 3724 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3723 3725 {
3724 3726 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3725 3727 mptsas_t *mpt = ADDR2MPT(ap);
3726 3728
3727 3729 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3728 3730 ap->a_target, (void *)pkt));
3729 3731
3730 3732 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3731 3733 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3732 3734 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3733 3735 }
3734 3736
3735 3737 if (cmd->cmd_sg) {
3736 3738 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3737 3739 cmd->cmd_sg = NULL;
3738 3740 }
3739 3741
3740 3742 mptsas_free_extra_sgl_frame(mpt, cmd);
3741 3743
3742 3744 if ((cmd->cmd_flags &
3743 3745 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3744 3746 CFLAG_SCBEXTERN)) == 0) {
3745 3747 cmd->cmd_flags = CFLAG_FREE;
3746 3748 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3747 3749 } else {
3748 3750 mptsas_pkt_destroy_extern(mpt, cmd);
3749 3751 }
3750 3752 }
3751 3753
3752 3754 /*
3753 3755 * kmem cache constructor and destructor:
3754 3756 * When constructing, we bzero the cmd and allocate the dma handle
3755 3757 * When destructing, just free the dma handle
3756 3758 */
3757 3759 static int
3758 3760 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3759 3761 {
3760 3762 mptsas_cmd_t *cmd = buf;
3761 3763 mptsas_t *mpt = cdrarg;
3762 3764 struct scsi_address ap;
3763 3765 uint_t cookiec;
3764 3766 ddi_dma_attr_t arq_dma_attr;
3765 3767 int (*callback)(caddr_t);
3766 3768
3767 3769 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3768 3770
3769 3771 NDBG4(("mptsas_kmem_cache_constructor"));
3770 3772
3771 3773 ap.a_hba_tran = mpt->m_tran;
3772 3774 ap.a_target = 0;
3773 3775 ap.a_lun = 0;
3774 3776
3775 3777 /*
3776 3778 * allocate a dma handle
3777 3779 */
3778 3780 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3779 3781 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3780 3782 cmd->cmd_dmahandle = NULL;
3781 3783 return (-1);
3782 3784 }
3783 3785
3784 3786 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3785 3787 SENSE_LENGTH, B_READ, callback, NULL);
3786 3788 if (cmd->cmd_arq_buf == NULL) {
3787 3789 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3788 3790 cmd->cmd_dmahandle = NULL;
3789 3791 return (-1);
3790 3792 }
3791 3793
3792 3794 /*
3793 3795 * allocate a arq handle
3794 3796 */
3795 3797 arq_dma_attr = mpt->m_msg_dma_attr;
3796 3798 arq_dma_attr.dma_attr_sgllen = 1;
3797 3799 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3798 3800 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3799 3801 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3800 3802 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3801 3803 cmd->cmd_dmahandle = NULL;
3802 3804 cmd->cmd_arqhandle = NULL;
3803 3805 return (-1);
3804 3806 }
3805 3807
3806 3808 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3807 3809 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3808 3810 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3809 3811 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3810 3812 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3811 3813 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3812 3814 cmd->cmd_dmahandle = NULL;
3813 3815 cmd->cmd_arqhandle = NULL;
3814 3816 cmd->cmd_arq_buf = NULL;
3815 3817 return (-1);
3816 3818 }
3817 3819
3818 3820 return (0);
3819 3821 }
3820 3822
3821 3823 static void
3822 3824 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3823 3825 {
3824 3826 #ifndef __lock_lint
3825 3827 _NOTE(ARGUNUSED(cdrarg))
3826 3828 #endif
3827 3829 mptsas_cmd_t *cmd = buf;
3828 3830
3829 3831 NDBG4(("mptsas_kmem_cache_destructor"));
3830 3832
3831 3833 if (cmd->cmd_arqhandle) {
3832 3834 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3833 3835 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3834 3836 cmd->cmd_arqhandle = NULL;
3835 3837 }
3836 3838 if (cmd->cmd_arq_buf) {
3837 3839 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3838 3840 cmd->cmd_arq_buf = NULL;
3839 3841 }
3840 3842 if (cmd->cmd_dmahandle) {
3841 3843 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3842 3844 cmd->cmd_dmahandle = NULL;
3843 3845 }
3844 3846 }
3845 3847
3846 3848 static int
3847 3849 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3848 3850 {
3849 3851 mptsas_cache_frames_t *p = buf;
3850 3852 mptsas_t *mpt = cdrarg;
3851 3853 ddi_dma_attr_t frame_dma_attr;
3852 3854 size_t mem_size, alloc_len;
3853 3855 ddi_dma_cookie_t cookie;
3854 3856 uint_t ncookie;
3855 3857 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3856 3858 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3857 3859
3858 3860 frame_dma_attr = mpt->m_msg_dma_attr;
3859 3861 frame_dma_attr.dma_attr_align = 0x10;
3860 3862 frame_dma_attr.dma_attr_sgllen = 1;
3861 3863
3862 3864 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3863 3865 &p->m_dma_hdl) != DDI_SUCCESS) {
3864 3866 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3865 3867 " extra SGL.");
3866 3868 return (DDI_FAILURE);
3867 3869 }
3868 3870
3869 3871 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3870 3872
3871 3873 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3872 3874 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3873 3875 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3874 3876 ddi_dma_free_handle(&p->m_dma_hdl);
3875 3877 p->m_dma_hdl = NULL;
3876 3878 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3877 3879 " extra SGL.");
3878 3880 return (DDI_FAILURE);
3879 3881 }
3880 3882
3881 3883 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3882 3884 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3883 3885 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3884 3886 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3885 3887 ddi_dma_free_handle(&p->m_dma_hdl);
3886 3888 p->m_dma_hdl = NULL;
3887 3889 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3888 3890 " extra SGL");
3889 3891 return (DDI_FAILURE);
3890 3892 }
3891 3893
3892 3894 /*
3893 3895 * Store the SGL memory address. This chip uses this
3894 3896 * address to dma to and from the driver. The second
3895 3897 * address is the address mpt uses to fill in the SGL.
3896 3898 */
3897 3899 p->m_phys_addr = cookie.dmac_address;
3898 3900
3899 3901 return (DDI_SUCCESS);
3900 3902 }
3901 3903
3902 3904 static void
3903 3905 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3904 3906 {
3905 3907 #ifndef __lock_lint
3906 3908 _NOTE(ARGUNUSED(cdrarg))
3907 3909 #endif
3908 3910 mptsas_cache_frames_t *p = buf;
3909 3911 if (p->m_dma_hdl != NULL) {
3910 3912 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3911 3913 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3912 3914 ddi_dma_free_handle(&p->m_dma_hdl);
3913 3915 p->m_phys_addr = NULL;
3914 3916 p->m_frames_addr = NULL;
3915 3917 p->m_dma_hdl = NULL;
3916 3918 p->m_acc_hdl = NULL;
3917 3919 }
3918 3920
3919 3921 }
3920 3922
3921 3923 /*
3922 3924 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3923 3925 * for non-standard length cdb, pkt_private, status areas
3924 3926 * if allocation fails, then deallocate all external space and the pkt
3925 3927 */
3926 3928 /* ARGSUSED */
3927 3929 static int
3928 3930 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3929 3931 int cmdlen, int tgtlen, int statuslen, int kf)
3930 3932 {
3931 3933 caddr_t cdbp, scbp, tgt;
3932 3934 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3933 3935 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3934 3936 struct scsi_address ap;
3935 3937 size_t senselength;
3936 3938 ddi_dma_attr_t ext_arq_dma_attr;
3937 3939 uint_t cookiec;
3938 3940
3939 3941 NDBG3(("mptsas_pkt_alloc_extern: "
3940 3942 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3941 3943 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3942 3944
3943 3945 tgt = cdbp = scbp = NULL;
3944 3946 cmd->cmd_scblen = statuslen;
3945 3947 cmd->cmd_privlen = (uchar_t)tgtlen;
3946 3948
3947 3949 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3948 3950 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3949 3951 goto fail;
3950 3952 }
3951 3953 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3952 3954 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3953 3955 }
3954 3956 if (tgtlen > PKT_PRIV_LEN) {
3955 3957 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3956 3958 goto fail;
3957 3959 }
3958 3960 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3959 3961 cmd->cmd_pkt->pkt_private = tgt;
3960 3962 }
3961 3963 if (statuslen > EXTCMDS_STATUS_SIZE) {
3962 3964 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3963 3965 goto fail;
3964 3966 }
3965 3967 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3966 3968 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3967 3969
3968 3970 /* allocate sense data buf for DMA */
3969 3971
3970 3972 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3971 3973 struct scsi_arq_status, sts_sensedata);
3972 3974 cmd->cmd_rqslen = (uchar_t)senselength;
3973 3975
3974 3976 ap.a_hba_tran = mpt->m_tran;
3975 3977 ap.a_target = 0;
3976 3978 ap.a_lun = 0;
3977 3979
3978 3980 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3979 3981 (struct buf *)NULL, senselength, B_READ,
3980 3982 callback, NULL);
3981 3983
3982 3984 if (cmd->cmd_ext_arq_buf == NULL) {
3983 3985 goto fail;
3984 3986 }
3985 3987 /*
3986 3988 * allocate a extern arq handle and bind the buf
3987 3989 */
3988 3990 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3989 3991 ext_arq_dma_attr.dma_attr_sgllen = 1;
3990 3992 if ((ddi_dma_alloc_handle(mpt->m_dip,
3991 3993 &ext_arq_dma_attr, callback,
3992 3994 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3993 3995 goto fail;
3994 3996 }
3995 3997
3996 3998 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3997 3999 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3998 4000 callback, NULL, &cmd->cmd_ext_arqcookie,
3999 4001 &cookiec)
4000 4002 != DDI_SUCCESS) {
4001 4003 goto fail;
4002 4004 }
4003 4005 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4004 4006 }
4005 4007 return (0);
4006 4008 fail:
4007 4009 mptsas_pkt_destroy_extern(mpt, cmd);
4008 4010 return (1);
4009 4011 }
4010 4012
4011 4013 /*
4012 4014 * deallocate external pkt space and deallocate the pkt
4013 4015 */
4014 4016 static void
4015 4017 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4016 4018 {
4017 4019 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4018 4020
4019 4021 if (cmd->cmd_flags & CFLAG_FREE) {
4020 4022 mptsas_log(mpt, CE_PANIC,
4021 4023 "mptsas_pkt_destroy_extern: freeing free packet");
4022 4024 _NOTE(NOT_REACHED)
4023 4025 /* NOTREACHED */
4024 4026 }
4025 4027 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4026 4028 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4027 4029 }
4028 4030 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4029 4031 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4030 4032 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4031 4033 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4032 4034 }
4033 4035 if (cmd->cmd_ext_arqhandle) {
4034 4036 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4035 4037 cmd->cmd_ext_arqhandle = NULL;
4036 4038 }
4037 4039 if (cmd->cmd_ext_arq_buf)
4038 4040 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4039 4041 }
4040 4042 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4041 4043 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4042 4044 }
4043 4045 cmd->cmd_flags = CFLAG_FREE;
4044 4046 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4045 4047 }
4046 4048
4047 4049 /*
4048 4050 * tran_sync_pkt(9E) - explicit DMA synchronization
4049 4051 */
4050 4052 /*ARGSUSED*/
4051 4053 static void
4052 4054 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4053 4055 {
4054 4056 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4055 4057
4056 4058 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4057 4059 ap->a_target, (void *)pkt));
4058 4060
4059 4061 if (cmd->cmd_dmahandle) {
4060 4062 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4061 4063 (cmd->cmd_flags & CFLAG_DMASEND) ?
4062 4064 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4063 4065 }
4064 4066 }
4065 4067
4066 4068 /*
4067 4069 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4068 4070 */
4069 4071 /*ARGSUSED*/
4070 4072 static void
4071 4073 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4072 4074 {
4073 4075 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4074 4076 mptsas_t *mpt = ADDR2MPT(ap);
4075 4077
4076 4078 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4077 4079 ap->a_target, (void *)pkt));
4078 4080
4079 4081 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4080 4082 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4081 4083 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4082 4084 }
4083 4085
4084 4086 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4085 4087 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4086 4088 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4087 4089 }
4088 4090
4089 4091 mptsas_free_extra_sgl_frame(mpt, cmd);
4090 4092 }
4091 4093
4092 4094 static void
4093 4095 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4094 4096 {
4095 4097 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4096 4098 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4097 4099 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4098 4100 DDI_DMA_SYNC_FORCPU);
4099 4101 }
4100 4102 (*pkt->pkt_comp)(pkt);
4101 4103 }
4102 4104
4103 4105 static void
4104 4106 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4105 4107 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4106 4108 {
4107 4109 uint_t cookiec;
4108 4110 mptti_t *dmap;
4109 4111 uint32_t flags;
4110 4112 pMpi2SGESimple64_t sge;
4111 4113 pMpi2SGEChain64_t sgechain;
4112 4114 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4113 4115
4114 4116 /*
4115 4117 * Save the number of entries in the DMA
4116 4118 * Scatter/Gather list
4117 4119 */
4118 4120 cookiec = cmd->cmd_cookiec;
4119 4121
4120 4122 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4121 4123
4122 4124 /*
4123 4125 * Set read/write bit in control.
4124 4126 */
4125 4127 if (cmd->cmd_flags & CFLAG_DMASEND) {
4126 4128 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4127 4129 } else {
4128 4130 *control |= MPI2_SCSIIO_CONTROL_READ;
4129 4131 }
4130 4132
4131 4133 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4132 4134
4133 4135 /*
4134 4136 * We have 2 cases here. First where we can fit all the
4135 4137 * SG elements into the main frame, and the case
4136 4138 * where we can't.
4137 4139 * If we have more cookies than we can attach to a frame
4138 4140 * we will need to use a chain element to point
4139 4141 * a location of memory where the rest of the S/G
4140 4142 * elements reside.
4141 4143 */
4142 4144 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4143 4145 dmap = cmd->cmd_sg;
4144 4146 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4145 4147 while (cookiec--) {
4146 4148 ddi_put32(acc_hdl,
4147 4149 &sge->Address.Low, dmap->addr.address64.Low);
4148 4150 ddi_put32(acc_hdl,
4149 4151 &sge->Address.High, dmap->addr.address64.High);
4150 4152 ddi_put32(acc_hdl, &sge->FlagsLength,
4151 4153 dmap->count);
4152 4154 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4153 4155 flags |= ((uint32_t)
4154 4156 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4155 4157 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4156 4158 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4157 4159 MPI2_SGE_FLAGS_SHIFT);
4158 4160
4159 4161 /*
4160 4162 * If this is the last cookie, we set the flags
4161 4163 * to indicate so
4162 4164 */
4163 4165 if (cookiec == 0) {
4164 4166 flags |=
4165 4167 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4166 4168 | MPI2_SGE_FLAGS_END_OF_BUFFER
4167 4169 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4168 4170 MPI2_SGE_FLAGS_SHIFT);
4169 4171 }
4170 4172 if (cmd->cmd_flags & CFLAG_DMASEND) {
4171 4173 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4172 4174 MPI2_SGE_FLAGS_SHIFT);
4173 4175 } else {
4174 4176 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4175 4177 MPI2_SGE_FLAGS_SHIFT);
4176 4178 }
4177 4179 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4178 4180 dmap++;
4179 4181 sge++;
4180 4182 }
4181 4183 } else {
4182 4184 /*
4183 4185 * Hereby we start to deal with multiple frames.
4184 4186 * The process is as follows:
4185 4187 * 1. Determine how many frames are needed for SGL element
4186 4188 * storage; Note that all frames are stored in contiguous
4187 4189 * memory space and in 64-bit DMA mode each element is
4188 4190 * 3 double-words (12 bytes) long.
4189 4191 * 2. Fill up the main frame. We need to do this separately
4190 4192 * since it contains the SCSI IO request header and needs
4191 4193 * dedicated processing. Note that the last 4 double-words
4192 4194 * of the SCSI IO header is for SGL element storage
4193 4195 * (MPI2_SGE_IO_UNION).
4194 4196 * 3. Fill the chain element in the main frame, so the DMA
4195 4197 * engine can use the following frames.
4196 4198 * 4. Enter a loop to fill the remaining frames. Note that the
4197 4199 * last frame contains no chain element. The remaining
4198 4200 * frames go into the mpt SGL buffer allocated on the fly,
4199 4201 * not immediately following the main message frame, as in
4200 4202 * Gen1.
4201 4203 * Some restrictions:
4202 4204 * 1. For 64-bit DMA, the simple element and chain element
4203 4205 * are both of 3 double-words (12 bytes) in size, even
4204 4206 * though all frames are stored in the first 4G of mem
4205 4207 * range and the higher 32-bits of the address are always 0.
4206 4208 * 2. On some controllers (like the 1064/1068), a frame can
4207 4209 * hold SGL elements with the last 1 or 2 double-words
4208 4210 * (4 or 8 bytes) un-used. On these controllers, we should
4209 4211 * recognize that there's not enough room for another SGL
4210 4212 * element and move the sge pointer to the next frame.
4211 4213 */
4212 4214 int i, j, k, l, frames, sgemax;
4213 4215 int temp;
4214 4216 uint8_t chainflags;
4215 4217 uint16_t chainlength;
4216 4218 mptsas_cache_frames_t *p;
4217 4219
4218 4220 /*
4219 4221 * Sgemax is the number of SGE's that will fit
4220 4222 * each extra frame and frames is total
4221 4223 * number of frames we'll need. 1 sge entry per
4222 4224 * frame is reseverd for the chain element thus the -1 below.
4223 4225 */
4224 4226 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4225 4227 - 1);
4226 4228 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4227 4229
4228 4230 /*
4229 4231 * A little check to see if we need to round up the number
4230 4232 * of frames we need
4231 4233 */
4232 4234 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4233 4235 sgemax) > 1) {
4234 4236 frames = (temp + 1);
4235 4237 } else {
4236 4238 frames = temp;
4237 4239 }
4238 4240 dmap = cmd->cmd_sg;
4239 4241 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4240 4242
4241 4243 /*
4242 4244 * First fill in the main frame
4243 4245 */
4244 4246 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4245 4247 ddi_put32(acc_hdl, &sge->Address.Low,
4246 4248 dmap->addr.address64.Low);
4247 4249 ddi_put32(acc_hdl, &sge->Address.High,
4248 4250 dmap->addr.address64.High);
4249 4251 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4250 4252 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4251 4253 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4252 4254 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4253 4255 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4254 4256 MPI2_SGE_FLAGS_SHIFT);
4255 4257
4256 4258 /*
4257 4259 * If this is the last SGE of this frame
4258 4260 * we set the end of list flag
4259 4261 */
4260 4262 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4261 4263 flags |= ((uint32_t)
4262 4264 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4263 4265 MPI2_SGE_FLAGS_SHIFT);
4264 4266 }
4265 4267 if (cmd->cmd_flags & CFLAG_DMASEND) {
4266 4268 flags |=
4267 4269 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4268 4270 MPI2_SGE_FLAGS_SHIFT);
4269 4271 } else {
4270 4272 flags |=
4271 4273 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4272 4274 MPI2_SGE_FLAGS_SHIFT);
4273 4275 }
4274 4276 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4275 4277 dmap++;
4276 4278 sge++;
4277 4279 }
4278 4280
4279 4281 /*
4280 4282 * Fill in the chain element in the main frame.
4281 4283 * About calculation on ChainOffset:
4282 4284 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4283 4285 * in the end reserved for SGL element storage
4284 4286 * (MPI2_SGE_IO_UNION); we should count it in our
4285 4287 * calculation. See its definition in the header file.
4286 4288 * 2. Constant j is the counter of the current SGL element
4287 4289 * that will be processed, and (j - 1) is the number of
4288 4290 * SGL elements that have been processed (stored in the
4289 4291 * main frame).
4290 4292 * 3. ChainOffset value should be in units of double-words (4
4291 4293 * bytes) so the last value should be divided by 4.
4292 4294 */
4293 4295 ddi_put8(acc_hdl, &frame->ChainOffset,
4294 4296 (sizeof (MPI2_SCSI_IO_REQUEST) -
4295 4297 sizeof (MPI2_SGE_IO_UNION) +
4296 4298 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4297 4299 sgechain = (pMpi2SGEChain64_t)sge;
4298 4300 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4299 4301 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4300 4302 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4301 4303 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4302 4304
4303 4305 /*
4304 4306 * The size of the next frame is the accurate size of space
4305 4307 * (in bytes) used to store the SGL elements. j is the counter
4306 4308 * of SGL elements. (j - 1) is the number of SGL elements that
4307 4309 * have been processed (stored in frames).
4308 4310 */
4309 4311 if (frames >= 2) {
4310 4312 chainlength = mpt->m_req_frame_size /
4311 4313 sizeof (MPI2_SGE_SIMPLE64) *
4312 4314 sizeof (MPI2_SGE_SIMPLE64);
4313 4315 } else {
4314 4316 chainlength = ((cookiec - (j - 1)) *
4315 4317 sizeof (MPI2_SGE_SIMPLE64));
4316 4318 }
4317 4319
4318 4320 p = cmd->cmd_extra_frames;
4319 4321
4320 4322 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4321 4323 ddi_put32(acc_hdl, &sgechain->Address.Low,
4322 4324 p->m_phys_addr);
4323 4325 /* SGL is allocated in the first 4G mem range */
4324 4326 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4325 4327
4326 4328 /*
4327 4329 * If there are more than 2 frames left we have to
4328 4330 * fill in the next chain offset to the location of
4329 4331 * the chain element in the next frame.
4330 4332 * sgemax is the number of simple elements in an extra
4331 4333 * frame. Note that the value NextChainOffset should be
4332 4334 * in double-words (4 bytes).
4333 4335 */
4334 4336 if (frames >= 2) {
4335 4337 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4336 4338 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4337 4339 } else {
4338 4340 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4339 4341 }
4340 4342
4341 4343 /*
4342 4344 * Jump to next frame;
4343 4345 * Starting here, chain buffers go into the per command SGL.
4344 4346 * This buffer is allocated when chain buffers are needed.
4345 4347 */
4346 4348 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4347 4349 i = cookiec;
4348 4350
4349 4351 /*
4350 4352 * Start filling in frames with SGE's. If we
4351 4353 * reach the end of frame and still have SGE's
4352 4354 * to fill we need to add a chain element and
4353 4355 * use another frame. j will be our counter
4354 4356 * for what cookie we are at and i will be
4355 4357 * the total cookiec. k is the current frame
4356 4358 */
4357 4359 for (k = 1; k <= frames; k++) {
4358 4360 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4359 4361
4360 4362 /*
4361 4363 * If we have reached the end of frame
4362 4364 * and we have more SGE's to fill in
4363 4365 * we have to fill the final entry
4364 4366 * with a chain element and then
4365 4367 * continue to the next frame
4366 4368 */
4367 4369 if ((l == (sgemax + 1)) && (k != frames)) {
4368 4370 sgechain = (pMpi2SGEChain64_t)sge;
4369 4371 j--;
4370 4372 chainflags = (
4371 4373 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4372 4374 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4373 4375 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4374 4376 ddi_put8(p->m_acc_hdl,
4375 4377 &sgechain->Flags, chainflags);
4376 4378 /*
4377 4379 * k is the frame counter and (k + 1)
4378 4380 * is the number of the next frame.
4379 4381 * Note that frames are in contiguous
4380 4382 * memory space.
4381 4383 */
4382 4384 ddi_put32(p->m_acc_hdl,
4383 4385 &sgechain->Address.Low,
4384 4386 (p->m_phys_addr +
4385 4387 (mpt->m_req_frame_size * k)));
4386 4388 ddi_put32(p->m_acc_hdl,
4387 4389 &sgechain->Address.High, 0);
4388 4390
4389 4391 /*
4390 4392 * If there are more than 2 frames left
4391 4393 * we have to next chain offset to
4392 4394 * the location of the chain element
4393 4395 * in the next frame and fill in the
4394 4396 * length of the next chain
4395 4397 */
4396 4398 if ((frames - k) >= 2) {
4397 4399 ddi_put8(p->m_acc_hdl,
4398 4400 &sgechain->NextChainOffset,
4399 4401 (sgemax *
4400 4402 sizeof (MPI2_SGE_SIMPLE64))
4401 4403 >> 2);
4402 4404 ddi_put16(p->m_acc_hdl,
4403 4405 &sgechain->Length,
4404 4406 mpt->m_req_frame_size /
4405 4407 sizeof (MPI2_SGE_SIMPLE64) *
4406 4408 sizeof (MPI2_SGE_SIMPLE64));
4407 4409 } else {
4408 4410 /*
4409 4411 * This is the last frame. Set
4410 4412 * the NextChainOffset to 0 and
4411 4413 * Length is the total size of
4412 4414 * all remaining simple elements
4413 4415 */
4414 4416 ddi_put8(p->m_acc_hdl,
4415 4417 &sgechain->NextChainOffset,
4416 4418 0);
4417 4419 ddi_put16(p->m_acc_hdl,
4418 4420 &sgechain->Length,
4419 4421 (cookiec - j) *
4420 4422 sizeof (MPI2_SGE_SIMPLE64));
4421 4423 }
4422 4424
4423 4425 /* Jump to the next frame */
4424 4426 sge = (pMpi2SGESimple64_t)
4425 4427 ((char *)p->m_frames_addr +
4426 4428 (int)mpt->m_req_frame_size * k);
4427 4429
4428 4430 continue;
4429 4431 }
4430 4432
4431 4433 ddi_put32(p->m_acc_hdl,
4432 4434 &sge->Address.Low,
4433 4435 dmap->addr.address64.Low);
4434 4436 ddi_put32(p->m_acc_hdl,
4435 4437 &sge->Address.High,
4436 4438 dmap->addr.address64.High);
4437 4439 ddi_put32(p->m_acc_hdl,
4438 4440 &sge->FlagsLength, dmap->count);
4439 4441 flags = ddi_get32(p->m_acc_hdl,
4440 4442 &sge->FlagsLength);
4441 4443 flags |= ((uint32_t)(
4442 4444 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4443 4445 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4444 4446 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4445 4447 MPI2_SGE_FLAGS_SHIFT);
4446 4448
4447 4449 /*
4448 4450 * If we are at the end of the frame and
4449 4451 * there is another frame to fill in
4450 4452 * we set the last simple element as last
4451 4453 * element
4452 4454 */
4453 4455 if ((l == sgemax) && (k != frames)) {
4454 4456 flags |= ((uint32_t)
4455 4457 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4456 4458 MPI2_SGE_FLAGS_SHIFT);
4457 4459 }
4458 4460
4459 4461 /*
4460 4462 * If this is the final cookie we
4461 4463 * indicate it by setting the flags
4462 4464 */
4463 4465 if (j == i) {
4464 4466 flags |= ((uint32_t)
4465 4467 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4466 4468 MPI2_SGE_FLAGS_END_OF_BUFFER |
4467 4469 MPI2_SGE_FLAGS_END_OF_LIST) <<
4468 4470 MPI2_SGE_FLAGS_SHIFT);
4469 4471 }
4470 4472 if (cmd->cmd_flags & CFLAG_DMASEND) {
4471 4473 flags |=
4472 4474 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4473 4475 MPI2_SGE_FLAGS_SHIFT);
4474 4476 } else {
4475 4477 flags |=
4476 4478 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4477 4479 MPI2_SGE_FLAGS_SHIFT);
4478 4480 }
4479 4481 ddi_put32(p->m_acc_hdl,
4480 4482 &sge->FlagsLength, flags);
4481 4483 dmap++;
4482 4484 sge++;
4483 4485 }
4484 4486 }
4485 4487
4486 4488 /*
4487 4489 * Sync DMA with the chain buffers that were just created
4488 4490 */
4489 4491 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4490 4492 }
4491 4493 }
4492 4494
4493 4495 /*
4494 4496 * Interrupt handling
4495 4497 * Utility routine. Poll for status of a command sent to HBA
4496 4498 * without interrupts (a FLAG_NOINTR command).
4497 4499 */
4498 4500 int
4499 4501 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4500 4502 {
4501 4503 int rval = TRUE;
4502 4504
4503 4505 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4504 4506
4505 4507 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4506 4508 mptsas_restart_hba(mpt);
4507 4509 }
4508 4510
4509 4511 /*
4510 4512 * Wait, using drv_usecwait(), long enough for the command to
4511 4513 * reasonably return from the target if the target isn't
4512 4514 * "dead". A polled command may well be sent from scsi_poll, and
4513 4515 * there are retries built in to scsi_poll if the transport
4514 4516 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4515 4517 * and retries the transport up to scsi_poll_busycnt times
4516 4518 * (currently 60) if
4517 4519 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4518 4520 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4519 4521 *
4520 4522 * limit the waiting to avoid a hang in the event that the
4521 4523 * cmd never gets started but we are still receiving interrupts
4522 4524 */
4523 4525 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4524 4526 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4525 4527 NDBG5(("mptsas_poll: command incomplete"));
4526 4528 rval = FALSE;
4527 4529 break;
4528 4530 }
4529 4531 }
4530 4532
4531 4533 if (rval == FALSE) {
4532 4534
4533 4535 /*
4534 4536 * this isn't supposed to happen, the hba must be wedged
4535 4537 * Mark this cmd as a timeout.
4536 4538 */
4537 4539 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4538 4540 (STAT_TIMEOUT|STAT_ABORTED));
4539 4541
4540 4542 if (poll_cmd->cmd_queued == FALSE) {
4541 4543
4542 4544 NDBG5(("mptsas_poll: not on waitq"));
4543 4545
4544 4546 poll_cmd->cmd_pkt->pkt_state |=
4545 4547 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4546 4548 } else {
4547 4549
4548 4550 /* find and remove it from the waitq */
4549 4551 NDBG5(("mptsas_poll: delete from waitq"));
4550 4552 mptsas_waitq_delete(mpt, poll_cmd);
4551 4553 }
4552 4554
4553 4555 }
4554 4556 mptsas_fma_check(mpt, poll_cmd);
4555 4557 NDBG5(("mptsas_poll: done"));
4556 4558 return (rval);
4557 4559 }
4558 4560
4559 4561 /*
4560 4562 * Used for polling cmds and TM function
4561 4563 */
4562 4564 static int
4563 4565 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4564 4566 {
4565 4567 int cnt;
4566 4568 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4567 4569 uint32_t int_mask;
4568 4570
4569 4571 NDBG5(("mptsas_wait_intr"));
4570 4572
4571 4573 mpt->m_polled_intr = 1;
4572 4574
4573 4575 /*
4574 4576 * Get the current interrupt mask and disable interrupts. When
4575 4577 * re-enabling ints, set mask to saved value.
4576 4578 */
4577 4579 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4578 4580 MPTSAS_DISABLE_INTR(mpt);
4579 4581
4580 4582 /*
4581 4583 * Keep polling for at least (polltime * 1000) seconds
4582 4584 */
4583 4585 for (cnt = 0; cnt < polltime; cnt++) {
4584 4586 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4585 4587 DDI_DMA_SYNC_FORCPU);
4586 4588
4587 4589 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4588 4590 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4589 4591
4590 4592 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4591 4593 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4592 4594 ddi_get32(mpt->m_acc_post_queue_hdl,
4593 4595 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4594 4596 drv_usecwait(1000);
4595 4597 continue;
4596 4598 }
4597 4599
4598 4600 /*
4599 4601 * The reply is valid, process it according to its
4600 4602 * type.
4601 4603 */
4602 4604 mptsas_process_intr(mpt, reply_desc_union);
4603 4605
4604 4606 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4605 4607 mpt->m_post_index = 0;
4606 4608 }
4607 4609
4608 4610 /*
4609 4611 * Update the global reply index
4610 4612 */
4611 4613 ddi_put32(mpt->m_datap,
4612 4614 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4613 4615 mpt->m_polled_intr = 0;
4614 4616
4615 4617 /*
4616 4618 * Re-enable interrupts and quit.
4617 4619 */
4618 4620 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4619 4621 int_mask);
4620 4622 return (TRUE);
4621 4623
4622 4624 }
4623 4625
4624 4626 /*
4625 4627 * Clear polling flag, re-enable interrupts and quit.
4626 4628 */
4627 4629 mpt->m_polled_intr = 0;
4628 4630 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4629 4631 return (FALSE);
4630 4632 }
4631 4633
4632 4634 static void
4633 4635 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4634 4636 pMpi2ReplyDescriptorsUnion_t reply_desc)
4635 4637 {
4636 4638 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4637 4639 uint16_t SMID;
4638 4640 mptsas_slots_t *slots = mpt->m_active;
4639 4641 mptsas_cmd_t *cmd = NULL;
4640 4642 struct scsi_pkt *pkt;
4641 4643
4642 4644 ASSERT(mutex_owned(&mpt->m_mutex));
4643 4645
4644 4646 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4645 4647 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4646 4648
4647 4649 /*
4648 4650 * This is a success reply so just complete the IO. First, do a sanity
4649 4651 * check on the SMID. The final slot is used for TM requests, which
4650 4652 * would not come into this reply handler.
4651 4653 */
4652 4654 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4653 4655 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4654 4656 SMID);
4655 4657 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4656 4658 return;
4657 4659 }
4658 4660
4659 4661 cmd = slots->m_slot[SMID];
4660 4662
4661 4663 /*
4662 4664 * print warning and return if the slot is empty
4663 4665 */
4664 4666 if (cmd == NULL) {
4665 4667 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4666 4668 "in slot %d", SMID);
4667 4669 return;
4668 4670 }
4669 4671
4670 4672 pkt = CMD2PKT(cmd);
4671 4673 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4672 4674 STATE_GOT_STATUS);
4673 4675 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4674 4676 pkt->pkt_state |= STATE_XFERRED_DATA;
4675 4677 }
4676 4678 pkt->pkt_resid = 0;
4677 4679
4678 4680 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4679 4681 cmd->cmd_flags |= CFLAG_FINISHED;
4680 4682 cv_broadcast(&mpt->m_passthru_cv);
4681 4683 return;
4682 4684 } else {
4683 4685 mptsas_remove_cmd(mpt, cmd);
4684 4686 }
4685 4687
4686 4688 if (cmd->cmd_flags & CFLAG_RETRY) {
4687 4689 /*
4688 4690 * The target returned QFULL or busy, do not add tihs
4689 4691 * pkt to the doneq since the hba will retry
4690 4692 * this cmd.
4691 4693 *
4692 4694 * The pkt has already been resubmitted in
4693 4695 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4694 4696 * Remove this cmd_flag here.
4695 4697 */
4696 4698 cmd->cmd_flags &= ~CFLAG_RETRY;
4697 4699 } else {
4698 4700 mptsas_doneq_add(mpt, cmd);
4699 4701 }
4700 4702 }
4701 4703
4702 4704 static void
4703 4705 mptsas_handle_address_reply(mptsas_t *mpt,
4704 4706 pMpi2ReplyDescriptorsUnion_t reply_desc)
4705 4707 {
4706 4708 pMpi2AddressReplyDescriptor_t address_reply;
4707 4709 pMPI2DefaultReply_t reply;
4708 4710 mptsas_fw_diagnostic_buffer_t *pBuffer;
4709 4711 uint32_t reply_addr;
4710 4712 uint16_t SMID, iocstatus;
4711 4713 mptsas_slots_t *slots = mpt->m_active;
4712 4714 mptsas_cmd_t *cmd = NULL;
4713 4715 uint8_t function, buffer_type;
4714 4716 m_replyh_arg_t *args;
4715 4717 int reply_frame_no;
4716 4718
4717 4719 ASSERT(mutex_owned(&mpt->m_mutex));
4718 4720
4719 4721 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4720 4722 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4721 4723 &address_reply->ReplyFrameAddress);
4722 4724 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4723 4725
4724 4726 /*
4725 4727 * If reply frame is not in the proper range we should ignore this
4726 4728 * message and exit the interrupt handler.
4727 4729 */
4728 4730 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4729 4731 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4730 4732 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4731 4733 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4732 4734 mpt->m_reply_frame_size != 0)) {
4733 4735 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4734 4736 "address 0x%x\n", reply_addr);
4735 4737 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4736 4738 return;
4737 4739 }
4738 4740
4739 4741 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4740 4742 DDI_DMA_SYNC_FORCPU);
4741 4743 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4742 4744 mpt->m_reply_frame_dma_addr));
4743 4745 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4744 4746
4745 4747 /*
4746 4748 * don't get slot information and command for events since these values
4747 4749 * don't exist
4748 4750 */
4749 4751 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4750 4752 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4751 4753 /*
4752 4754 * This could be a TM reply, which use the last allocated SMID,
4753 4755 * so allow for that.
4754 4756 */
4755 4757 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4756 4758 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4757 4759 "%d\n", SMID);
4758 4760 ddi_fm_service_impact(mpt->m_dip,
4759 4761 DDI_SERVICE_UNAFFECTED);
4760 4762 return;
4761 4763 }
4762 4764
4763 4765 cmd = slots->m_slot[SMID];
4764 4766
4765 4767 /*
4766 4768 * print warning and return if the slot is empty
4767 4769 */
4768 4770 if (cmd == NULL) {
4769 4771 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4770 4772 "reply in slot %d", SMID);
4771 4773 return;
4772 4774 }
4773 4775 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4774 4776 (cmd->cmd_flags & CFLAG_CONFIG) ||
4775 4777 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4776 4778 cmd->cmd_rfm = reply_addr;
4777 4779 cmd->cmd_flags |= CFLAG_FINISHED;
4778 4780 cv_broadcast(&mpt->m_passthru_cv);
4779 4781 cv_broadcast(&mpt->m_config_cv);
4780 4782 cv_broadcast(&mpt->m_fw_diag_cv);
4781 4783 return;
4782 4784 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4783 4785 mptsas_remove_cmd(mpt, cmd);
4784 4786 }
4785 4787 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4786 4788 }
4787 4789 /*
4788 4790 * Depending on the function, we need to handle
4789 4791 * the reply frame (and cmd) differently.
4790 4792 */
4791 4793 switch (function) {
4792 4794 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4793 4795 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4794 4796 break;
4795 4797 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4796 4798 cmd->cmd_rfm = reply_addr;
4797 4799 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4798 4800 cmd);
4799 4801 break;
4800 4802 case MPI2_FUNCTION_FW_DOWNLOAD:
4801 4803 cmd->cmd_flags |= CFLAG_FINISHED;
4802 4804 cv_signal(&mpt->m_fw_cv);
4803 4805 break;
4804 4806 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4805 4807 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4806 4808 mpt->m_reply_frame_size;
4807 4809 args = &mpt->m_replyh_args[reply_frame_no];
4808 4810 args->mpt = (void *)mpt;
4809 4811 args->rfm = reply_addr;
4810 4812
4811 4813 /*
4812 4814 * Record the event if its type is enabled in
4813 4815 * this mpt instance by ioctl.
4814 4816 */
4815 4817 mptsas_record_event(args);
4816 4818
4817 4819 /*
4818 4820 * Handle time critical events
|
↓ open down ↓ |
2181 lines elided |
↑ open up ↑ |
4819 4821 * NOT_RESPONDING/ADDED only now
4820 4822 */
4821 4823 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4822 4824 /*
4823 4825 * Would not return main process,
4824 4826 * just let taskq resolve ack action
4825 4827 * and ack would be sent in taskq thread
4826 4828 */
4827 4829 NDBG20(("send mptsas_handle_event_sync success"));
4828 4830 }
4831 +
4832 + if (mpt->m_in_reset) {
4833 + NDBG20(("dropping event received during reset"));
4834 + return;
4835 + }
4836 +
4829 4837 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4830 4838 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4831 4839 mptsas_log(mpt, CE_WARN, "No memory available"
4832 4840 "for dispatch taskq");
4833 4841 /*
4834 4842 * Return the reply frame to the free queue.
4835 4843 */
4836 4844 ddi_put32(mpt->m_acc_free_queue_hdl,
4837 4845 &((uint32_t *)(void *)
4838 4846 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4839 4847 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4840 4848 DDI_DMA_SYNC_FORDEV);
4841 4849 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4842 4850 mpt->m_free_index = 0;
4843 4851 }
4844 4852
4845 4853 ddi_put32(mpt->m_datap,
4846 4854 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4847 4855 }
4848 4856 return;
4849 4857 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4850 4858 /*
4851 4859 * If SMID is 0, this implies that the reply is due to a
4852 4860 * release function with a status that the buffer has been
4853 4861 * released. Set the buffer flags accordingly.
4854 4862 */
4855 4863 if (SMID == 0) {
4856 4864 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4857 4865 &reply->IOCStatus);
4858 4866 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4859 4867 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4860 4868 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4861 4869 pBuffer =
4862 4870 &mpt->m_fw_diag_buffer_list[buffer_type];
4863 4871 pBuffer->valid_data = TRUE;
4864 4872 pBuffer->owned_by_firmware = FALSE;
4865 4873 pBuffer->immediate = FALSE;
4866 4874 }
4867 4875 } else {
4868 4876 /*
4869 4877 * Normal handling of diag post reply with SMID.
4870 4878 */
4871 4879 cmd = slots->m_slot[SMID];
4872 4880
4873 4881 /*
4874 4882 * print warning and return if the slot is empty
4875 4883 */
4876 4884 if (cmd == NULL) {
4877 4885 mptsas_log(mpt, CE_WARN, "?NULL command for "
4878 4886 "address reply in slot %d", SMID);
4879 4887 return;
4880 4888 }
4881 4889 cmd->cmd_rfm = reply_addr;
4882 4890 cmd->cmd_flags |= CFLAG_FINISHED;
4883 4891 cv_broadcast(&mpt->m_fw_diag_cv);
4884 4892 }
4885 4893 return;
4886 4894 default:
4887 4895 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4888 4896 break;
4889 4897 }
4890 4898
4891 4899 /*
4892 4900 * Return the reply frame to the free queue.
4893 4901 */
4894 4902 ddi_put32(mpt->m_acc_free_queue_hdl,
4895 4903 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4896 4904 reply_addr);
4897 4905 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4898 4906 DDI_DMA_SYNC_FORDEV);
4899 4907 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4900 4908 mpt->m_free_index = 0;
4901 4909 }
4902 4910 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4903 4911 mpt->m_free_index);
4904 4912
4905 4913 if (cmd->cmd_flags & CFLAG_FW_CMD)
4906 4914 return;
4907 4915
4908 4916 if (cmd->cmd_flags & CFLAG_RETRY) {
4909 4917 /*
4910 4918 * The target returned QFULL or busy, do not add tihs
4911 4919 * pkt to the doneq since the hba will retry
4912 4920 * this cmd.
4913 4921 *
4914 4922 * The pkt has already been resubmitted in
4915 4923 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4916 4924 * Remove this cmd_flag here.
4917 4925 */
4918 4926 cmd->cmd_flags &= ~CFLAG_RETRY;
4919 4927 } else {
4920 4928 mptsas_doneq_add(mpt, cmd);
4921 4929 }
4922 4930 }
4923 4931
4924 4932 static void
4925 4933 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4926 4934 mptsas_cmd_t *cmd)
4927 4935 {
4928 4936 uint8_t scsi_status, scsi_state;
4929 4937 uint16_t ioc_status;
4930 4938 uint32_t xferred, sensecount, responsedata, loginfo = 0;
4931 4939 struct scsi_pkt *pkt;
4932 4940 struct scsi_arq_status *arqstat;
4933 4941 struct buf *bp;
4934 4942 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
4935 4943 uint8_t *sensedata = NULL;
4936 4944
4937 4945 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4938 4946 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4939 4947 bp = cmd->cmd_ext_arq_buf;
4940 4948 } else {
4941 4949 bp = cmd->cmd_arq_buf;
4942 4950 }
4943 4951
4944 4952 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4945 4953 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4946 4954 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4947 4955 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
4948 4956 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
4949 4957 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
4950 4958 &reply->ResponseInfo);
4951 4959
4952 4960 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
4953 4961 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
4954 4962 &reply->IOCLogInfo);
4955 4963 mptsas_log(mpt, CE_NOTE,
4956 4964 "?Log info 0x%x received for target %d.\n"
4957 4965 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4958 4966 loginfo, Tgt(cmd), scsi_status, ioc_status,
4959 4967 scsi_state);
4960 4968 }
4961 4969
4962 4970 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4963 4971 scsi_status, ioc_status, scsi_state));
4964 4972
4965 4973 pkt = CMD2PKT(cmd);
4966 4974 *(pkt->pkt_scbp) = scsi_status;
4967 4975
4968 4976 if (loginfo == 0x31170000) {
4969 4977 /*
4970 4978 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4971 4979 * 0x31170000 comes, that means the device missing delay
4972 4980 * is in progressing, the command need retry later.
4973 4981 */
4974 4982 *(pkt->pkt_scbp) = STATUS_BUSY;
4975 4983 return;
4976 4984 }
4977 4985
4978 4986 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4979 4987 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4980 4988 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4981 4989 pkt->pkt_reason = CMD_INCOMPLETE;
4982 4990 pkt->pkt_state |= STATE_GOT_BUS;
4983 4991 if (ptgt->m_reset_delay == 0) {
4984 4992 mptsas_set_throttle(mpt, ptgt,
4985 4993 DRAIN_THROTTLE);
4986 4994 }
4987 4995 return;
4988 4996 }
4989 4997
4990 4998 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4991 4999 responsedata &= 0x000000FF;
4992 5000 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
4993 5001 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
4994 5002 pkt->pkt_reason = CMD_TLR_OFF;
4995 5003 return;
4996 5004 }
4997 5005 }
4998 5006
4999 5007
5000 5008 switch (scsi_status) {
5001 5009 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5002 5010 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5003 5011 arqstat = (void*)(pkt->pkt_scbp);
5004 5012 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5005 5013 (pkt->pkt_scbp));
5006 5014 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5007 5015 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5008 5016 if (cmd->cmd_flags & CFLAG_XARQ) {
5009 5017 pkt->pkt_state |= STATE_XARQ_DONE;
5010 5018 }
5011 5019 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5012 5020 pkt->pkt_state |= STATE_XFERRED_DATA;
5013 5021 }
5014 5022 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5015 5023 arqstat->sts_rqpkt_state = pkt->pkt_state;
5016 5024 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5017 5025 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5018 5026 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5019 5027
5020 5028 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5021 5029 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5022 5030 cmd->cmd_rqslen));
5023 5031 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5024 5032 cmd->cmd_flags |= CFLAG_CMDARQ;
5025 5033 /*
5026 5034 * Set proper status for pkt if autosense was valid
5027 5035 */
5028 5036 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5029 5037 struct scsi_status zero_status = { 0 };
5030 5038 arqstat->sts_rqpkt_status = zero_status;
5031 5039 }
5032 5040
5033 5041 /*
5034 5042 * ASC=0x47 is parity error
5035 5043 * ASC=0x48 is initiator detected error received
5036 5044 */
5037 5045 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5038 5046 ((scsi_sense_asc(sensedata) == 0x47) ||
5039 5047 (scsi_sense_asc(sensedata) == 0x48))) {
5040 5048 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5041 5049 }
5042 5050
5043 5051 /*
5044 5052 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5045 5053 * ASC/ASCQ=0x25/0x00 means invalid lun
5046 5054 */
5047 5055 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5048 5056 (scsi_sense_asc(sensedata) == 0x3F) &&
5049 5057 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5050 5058 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5051 5059 (scsi_sense_asc(sensedata) == 0x25) &&
5052 5060 (scsi_sense_ascq(sensedata) == 0x00))) {
5053 5061 mptsas_topo_change_list_t *topo_node = NULL;
5054 5062
5055 5063 topo_node = kmem_zalloc(
5056 5064 sizeof (mptsas_topo_change_list_t),
5057 5065 KM_NOSLEEP);
5058 5066 if (topo_node == NULL) {
5059 5067 mptsas_log(mpt, CE_NOTE, "No memory"
5060 5068 "resource for handle SAS dynamic"
5061 5069 "reconfigure.\n");
5062 5070 break;
5063 5071 }
5064 5072 topo_node->mpt = mpt;
5065 5073 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5066 5074 topo_node->un.phymask = ptgt->m_phymask;
5067 5075 topo_node->devhdl = ptgt->m_devhdl;
5068 5076 topo_node->object = (void *)ptgt;
5069 5077 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5070 5078
5071 5079 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5072 5080 mptsas_handle_dr,
5073 5081 (void *)topo_node,
5074 5082 DDI_NOSLEEP)) != DDI_SUCCESS) {
5075 5083 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5076 5084 "for handle SAS dynamic reconfigure"
5077 5085 "failed. \n");
5078 5086 }
5079 5087 }
5080 5088 break;
5081 5089 case MPI2_SCSI_STATUS_GOOD:
5082 5090 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5083 5091 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5084 5092 pkt->pkt_reason = CMD_DEV_GONE;
5085 5093 pkt->pkt_state |= STATE_GOT_BUS;
5086 5094 if (ptgt->m_reset_delay == 0) {
5087 5095 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5088 5096 }
5089 5097 NDBG31(("lost disk for target%d, command:%x",
5090 5098 Tgt(cmd), pkt->pkt_cdbp[0]));
5091 5099 break;
5092 5100 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5093 5101 NDBG31(("data overrun: xferred=%d", xferred));
5094 5102 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5095 5103 pkt->pkt_reason = CMD_DATA_OVR;
5096 5104 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5097 5105 | STATE_SENT_CMD | STATE_GOT_STATUS
5098 5106 | STATE_XFERRED_DATA);
5099 5107 pkt->pkt_resid = 0;
5100 5108 break;
5101 5109 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5102 5110 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5103 5111 NDBG31(("data underrun: xferred=%d", xferred));
5104 5112 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5105 5113 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5106 5114 | STATE_SENT_CMD | STATE_GOT_STATUS);
5107 5115 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5108 5116 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5109 5117 pkt->pkt_state |= STATE_XFERRED_DATA;
5110 5118 }
5111 5119 break;
5112 5120 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5113 5121 mptsas_set_pkt_reason(mpt,
5114 5122 cmd, CMD_RESET, STAT_BUS_RESET);
5115 5123 break;
5116 5124 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5117 5125 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5118 5126 mptsas_set_pkt_reason(mpt,
5119 5127 cmd, CMD_RESET, STAT_DEV_RESET);
5120 5128 break;
5121 5129 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5122 5130 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5123 5131 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5124 5132 mptsas_set_pkt_reason(mpt,
5125 5133 cmd, CMD_TERMINATED, STAT_TERMINATED);
5126 5134 break;
5127 5135 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5128 5136 case MPI2_IOCSTATUS_BUSY:
5129 5137 /*
5130 5138 * set throttles to drain
5131 5139 */
5132 5140 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5133 5141 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5134 5142 while (ptgt != NULL) {
5135 5143 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5136 5144
5137 5145 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5138 5146 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5139 5147 }
5140 5148
5141 5149 /*
5142 5150 * retry command
5143 5151 */
5144 5152 cmd->cmd_flags |= CFLAG_RETRY;
5145 5153 cmd->cmd_pkt_flags |= FLAG_HEAD;
5146 5154
5147 5155 (void) mptsas_accept_pkt(mpt, cmd);
5148 5156 break;
5149 5157 default:
5150 5158 mptsas_log(mpt, CE_WARN,
5151 5159 "unknown ioc_status = %x\n", ioc_status);
5152 5160 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5153 5161 "count = %x, scsi_status = %x", scsi_state,
5154 5162 xferred, scsi_status);
5155 5163 break;
5156 5164 }
5157 5165 break;
5158 5166 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5159 5167 mptsas_handle_qfull(mpt, cmd);
5160 5168 break;
5161 5169 case MPI2_SCSI_STATUS_BUSY:
5162 5170 NDBG31(("scsi_status busy received"));
5163 5171 break;
5164 5172 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5165 5173 NDBG31(("scsi_status reservation conflict received"));
5166 5174 break;
5167 5175 default:
5168 5176 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5169 5177 scsi_status, ioc_status);
5170 5178 mptsas_log(mpt, CE_WARN,
5171 5179 "mptsas_process_intr: invalid scsi status\n");
5172 5180 break;
5173 5181 }
5174 5182 }
5175 5183
5176 5184 static void
5177 5185 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5178 5186 mptsas_cmd_t *cmd)
5179 5187 {
5180 5188 uint8_t task_type;
5181 5189 uint16_t ioc_status;
5182 5190 uint32_t log_info;
5183 5191 uint16_t dev_handle;
5184 5192 struct scsi_pkt *pkt = CMD2PKT(cmd);
5185 5193
5186 5194 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5187 5195 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5188 5196 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5189 5197 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5190 5198
5191 5199 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5192 5200 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5193 5201 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5194 5202 task_type, ioc_status, log_info, dev_handle);
5195 5203 pkt->pkt_reason = CMD_INCOMPLETE;
5196 5204 return;
5197 5205 }
5198 5206
5199 5207 switch (task_type) {
5200 5208 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5201 5209 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5202 5210 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5203 5211 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5204 5212 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5205 5213 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5206 5214 break;
5207 5215 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5208 5216 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5209 5217 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5210 5218 /*
5211 5219 * Check for invalid DevHandle of 0 in case application
5212 5220 * sends bad command. DevHandle of 0 could cause problems.
5213 5221 */
5214 5222 if (dev_handle == 0) {
5215 5223 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5216 5224 " DevHandle of 0.");
5217 5225 } else {
5218 5226 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5219 5227 task_type);
5220 5228 }
5221 5229 break;
5222 5230 default:
5223 5231 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5224 5232 task_type);
5225 5233 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5226 5234 break;
5227 5235 }
5228 5236 }
5229 5237
5230 5238 static void
5231 5239 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5232 5240 {
5233 5241 mptsas_t *mpt = arg->mpt;
5234 5242 uint64_t t = arg->t;
5235 5243 mptsas_cmd_t *cmd;
5236 5244 struct scsi_pkt *pkt;
5237 5245 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5238 5246
5239 5247 mutex_enter(&item->mutex);
5240 5248 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5241 5249 if (!item->doneq) {
5242 5250 cv_wait(&item->cv, &item->mutex);
5243 5251 }
5244 5252 pkt = NULL;
5245 5253 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5246 5254 cmd->cmd_flags |= CFLAG_COMPLETED;
5247 5255 pkt = CMD2PKT(cmd);
5248 5256 }
5249 5257 mutex_exit(&item->mutex);
5250 5258 if (pkt) {
5251 5259 mptsas_pkt_comp(pkt, cmd);
5252 5260 }
5253 5261 mutex_enter(&item->mutex);
5254 5262 }
5255 5263 mutex_exit(&item->mutex);
5256 5264 mutex_enter(&mpt->m_doneq_mutex);
5257 5265 mpt->m_doneq_thread_n--;
5258 5266 cv_broadcast(&mpt->m_doneq_thread_cv);
5259 5267 mutex_exit(&mpt->m_doneq_mutex);
5260 5268 }
5261 5269
5262 5270
5263 5271 /*
5264 5272 * mpt interrupt handler.
5265 5273 */
5266 5274 static uint_t
5267 5275 mptsas_intr(caddr_t arg1, caddr_t arg2)
5268 5276 {
5269 5277 mptsas_t *mpt = (void *)arg1;
5270 5278 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5271 5279 uchar_t did_reply = FALSE;
5272 5280
5273 5281 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5274 5282
5275 5283 mutex_enter(&mpt->m_mutex);
5276 5284
5277 5285 /*
5278 5286 * If interrupts are shared by two channels then check whether this
5279 5287 * interrupt is genuinely for this channel by making sure first the
5280 5288 * chip is in high power state.
5281 5289 */
5282 5290 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5283 5291 (mpt->m_power_level != PM_LEVEL_D0)) {
5284 5292 mutex_exit(&mpt->m_mutex);
5285 5293 return (DDI_INTR_UNCLAIMED);
5286 5294 }
5287 5295
5288 5296 /*
5289 5297 * If polling, interrupt was triggered by some shared interrupt because
5290 5298 * IOC interrupts are disabled during polling, so polling routine will
5291 5299 * handle any replies. Considering this, if polling is happening,
5292 5300 * return with interrupt unclaimed.
5293 5301 */
5294 5302 if (mpt->m_polled_intr) {
5295 5303 mutex_exit(&mpt->m_mutex);
5296 5304 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5297 5305 return (DDI_INTR_UNCLAIMED);
5298 5306 }
5299 5307
5300 5308 /*
5301 5309 * Read the istat register.
5302 5310 */
5303 5311 if ((INTPENDING(mpt)) != 0) {
5304 5312 /*
5305 5313 * read fifo until empty.
5306 5314 */
5307 5315 #ifndef __lock_lint
5308 5316 _NOTE(CONSTCOND)
5309 5317 #endif
5310 5318 while (TRUE) {
5311 5319 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5312 5320 DDI_DMA_SYNC_FORCPU);
5313 5321 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5314 5322 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5315 5323
5316 5324 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5317 5325 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5318 5326 ddi_get32(mpt->m_acc_post_queue_hdl,
5319 5327 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5320 5328 break;
5321 5329 }
5322 5330
5323 5331 /*
5324 5332 * The reply is valid, process it according to its
5325 5333 * type. Also, set a flag for updating the reply index
5326 5334 * after they've all been processed.
5327 5335 */
5328 5336 did_reply = TRUE;
5329 5337
5330 5338 mptsas_process_intr(mpt, reply_desc_union);
5331 5339
5332 5340 /*
5333 5341 * Increment post index and roll over if needed.
5334 5342 */
5335 5343 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5336 5344 mpt->m_post_index = 0;
5337 5345 }
5338 5346 }
5339 5347
5340 5348 /*
5341 5349 * Update the global reply index if at least one reply was
5342 5350 * processed.
5343 5351 */
5344 5352 if (did_reply) {
5345 5353 ddi_put32(mpt->m_datap,
5346 5354 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5347 5355 }
5348 5356 } else {
5349 5357 mutex_exit(&mpt->m_mutex);
5350 5358 return (DDI_INTR_UNCLAIMED);
5351 5359 }
5352 5360 NDBG1(("mptsas_intr complete"));
5353 5361
5354 5362 /*
5355 5363 * If no helper threads are created, process the doneq in ISR. If
5356 5364 * helpers are created, use the doneq length as a metric to measure the
5357 5365 * load on the interrupt CPU. If it is long enough, which indicates the
5358 5366 * load is heavy, then we deliver the IO completions to the helpers.
5359 5367 * This measurement has some limitations, although it is simple and
5360 5368 * straightforward and works well for most of the cases at present.
5361 5369 */
5362 5370 if (!mpt->m_doneq_thread_n ||
5363 5371 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5364 5372 mptsas_doneq_empty(mpt);
5365 5373 } else {
5366 5374 mptsas_deliver_doneq_thread(mpt);
5367 5375 }
5368 5376
5369 5377 /*
5370 5378 * If there are queued cmd, start them now.
5371 5379 */
5372 5380 if (mpt->m_waitq != NULL) {
5373 5381 mptsas_restart_waitq(mpt);
5374 5382 }
5375 5383
5376 5384 mutex_exit(&mpt->m_mutex);
5377 5385 return (DDI_INTR_CLAIMED);
5378 5386 }
5379 5387
5380 5388 static void
5381 5389 mptsas_process_intr(mptsas_t *mpt,
5382 5390 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5383 5391 {
5384 5392 uint8_t reply_type;
5385 5393
5386 5394 ASSERT(mutex_owned(&mpt->m_mutex));
5387 5395
5388 5396 /*
5389 5397 * The reply is valid, process it according to its
5390 5398 * type. Also, set a flag for updated the reply index
5391 5399 * after they've all been processed.
5392 5400 */
5393 5401 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5394 5402 &reply_desc_union->Default.ReplyFlags);
5395 5403 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5396 5404 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5397 5405 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5398 5406 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5399 5407 mptsas_handle_address_reply(mpt, reply_desc_union);
5400 5408 } else {
5401 5409 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5402 5410 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5403 5411 }
5404 5412
5405 5413 /*
5406 5414 * Clear the reply descriptor for re-use and increment
5407 5415 * index.
5408 5416 */
5409 5417 ddi_put64(mpt->m_acc_post_queue_hdl,
5410 5418 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5411 5419 0xFFFFFFFFFFFFFFFF);
5412 5420 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5413 5421 DDI_DMA_SYNC_FORDEV);
5414 5422 }
5415 5423
5416 5424 /*
5417 5425 * handle qfull condition
5418 5426 */
5419 5427 static void
5420 5428 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5421 5429 {
5422 5430 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5423 5431
5424 5432 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5425 5433 (ptgt->m_qfull_retries == 0)) {
5426 5434 /*
5427 5435 * We have exhausted the retries on QFULL, or,
5428 5436 * the target driver has indicated that it
5429 5437 * wants to handle QFULL itself by setting
5430 5438 * qfull-retries capability to 0. In either case
5431 5439 * we want the target driver's QFULL handling
5432 5440 * to kick in. We do this by having pkt_reason
5433 5441 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5434 5442 */
5435 5443 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5436 5444 } else {
5437 5445 if (ptgt->m_reset_delay == 0) {
5438 5446 ptgt->m_t_throttle =
5439 5447 max((ptgt->m_t_ncmds - 2), 0);
5440 5448 }
5441 5449
5442 5450 cmd->cmd_pkt_flags |= FLAG_HEAD;
5443 5451 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5444 5452 cmd->cmd_flags |= CFLAG_RETRY;
5445 5453
5446 5454 (void) mptsas_accept_pkt(mpt, cmd);
5447 5455
5448 5456 /*
5449 5457 * when target gives queue full status with no commands
5450 5458 * outstanding (m_t_ncmds == 0), throttle is set to 0
5451 5459 * (HOLD_THROTTLE), and the queue full handling start
5452 5460 * (see psarc/1994/313); if there are commands outstanding,
5453 5461 * throttle is set to (m_t_ncmds - 2)
5454 5462 */
5455 5463 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5456 5464 /*
5457 5465 * By setting throttle to QFULL_THROTTLE, we
5458 5466 * avoid submitting new commands and in
5459 5467 * mptsas_restart_cmd find out slots which need
5460 5468 * their throttles to be cleared.
5461 5469 */
5462 5470 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5463 5471 if (mpt->m_restart_cmd_timeid == 0) {
5464 5472 mpt->m_restart_cmd_timeid =
5465 5473 timeout(mptsas_restart_cmd, mpt,
5466 5474 ptgt->m_qfull_retry_interval);
5467 5475 }
5468 5476 }
5469 5477 }
5470 5478 }
5471 5479
5472 5480 mptsas_phymask_t
5473 5481 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5474 5482 {
5475 5483 mptsas_phymask_t phy_mask = 0;
5476 5484 uint8_t i = 0;
5477 5485
5478 5486 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5479 5487
5480 5488 ASSERT(mutex_owned(&mpt->m_mutex));
5481 5489
5482 5490 /*
5483 5491 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5484 5492 */
5485 5493 if (physport == 0xFF) {
5486 5494 return (0);
5487 5495 }
5488 5496
5489 5497 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5490 5498 if (mpt->m_phy_info[i].attached_devhdl &&
5491 5499 (mpt->m_phy_info[i].phy_mask != 0) &&
5492 5500 (mpt->m_phy_info[i].port_num == physport)) {
5493 5501 phy_mask = mpt->m_phy_info[i].phy_mask;
5494 5502 break;
5495 5503 }
5496 5504 }
5497 5505 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5498 5506 mpt->m_instance, physport, phy_mask));
5499 5507 return (phy_mask);
5500 5508 }
5501 5509
5502 5510 /*
5503 5511 * mpt free device handle after device gone, by use of passthrough
5504 5512 */
5505 5513 static int
5506 5514 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5507 5515 {
5508 5516 Mpi2SasIoUnitControlRequest_t req;
5509 5517 Mpi2SasIoUnitControlReply_t rep;
5510 5518 int ret;
5511 5519
5512 5520 ASSERT(mutex_owned(&mpt->m_mutex));
5513 5521
5514 5522 /*
5515 5523 * Need to compose a SAS IO Unit Control request message
5516 5524 * and call mptsas_do_passthru() function
5517 5525 */
5518 5526 bzero(&req, sizeof (req));
5519 5527 bzero(&rep, sizeof (rep));
5520 5528
5521 5529 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5522 5530 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5523 5531 req.DevHandle = LE_16(devhdl);
5524 5532
5525 5533 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5526 5534 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5527 5535 if (ret != 0) {
5528 5536 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5529 5537 "Control error %d", ret);
5530 5538 return (DDI_FAILURE);
5531 5539 }
5532 5540
5533 5541 /* do passthrough success, check the ioc status */
5534 5542 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5535 5543 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5536 5544 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5537 5545 return (DDI_FAILURE);
5538 5546 }
5539 5547
5540 5548 return (DDI_SUCCESS);
5541 5549 }
5542 5550
5543 5551 static void
5544 5552 mptsas_update_phymask(mptsas_t *mpt)
5545 5553 {
5546 5554 mptsas_phymask_t mask = 0, phy_mask;
5547 5555 char *phy_mask_name;
5548 5556 uint8_t current_port;
5549 5557 int i, j;
5550 5558
5551 5559 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5552 5560
5553 5561 ASSERT(mutex_owned(&mpt->m_mutex));
5554 5562
5555 5563 (void) mptsas_get_sas_io_unit_page(mpt);
5556 5564
5557 5565 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5558 5566
5559 5567 for (i = 0; i < mpt->m_num_phys; i++) {
5560 5568 phy_mask = 0x00;
5561 5569
5562 5570 if (mpt->m_phy_info[i].attached_devhdl == 0)
5563 5571 continue;
5564 5572
5565 5573 bzero(phy_mask_name, sizeof (phy_mask_name));
5566 5574
5567 5575 current_port = mpt->m_phy_info[i].port_num;
5568 5576
5569 5577 if ((mask & (1 << i)) != 0)
5570 5578 continue;
5571 5579
5572 5580 for (j = 0; j < mpt->m_num_phys; j++) {
5573 5581 if (mpt->m_phy_info[j].attached_devhdl &&
5574 5582 (mpt->m_phy_info[j].port_num == current_port)) {
5575 5583 phy_mask |= (1 << j);
5576 5584 }
5577 5585 }
5578 5586 mask = mask | phy_mask;
5579 5587
5580 5588 for (j = 0; j < mpt->m_num_phys; j++) {
5581 5589 if ((phy_mask >> j) & 0x01) {
5582 5590 mpt->m_phy_info[j].phy_mask = phy_mask;
5583 5591 }
5584 5592 }
5585 5593
5586 5594 (void) sprintf(phy_mask_name, "%x", phy_mask);
5587 5595
5588 5596 mutex_exit(&mpt->m_mutex);
5589 5597 /*
5590 5598 * register a iport, if the port has already been existed
5591 5599 * SCSA will do nothing and just return.
5592 5600 */
5593 5601 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5594 5602 mutex_enter(&mpt->m_mutex);
5595 5603 }
5596 5604 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5597 5605 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5598 5606 }
5599 5607
5600 5608 /*
5601 5609 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5602 5610 * 1. Directly attched Device Added/Removed.
5603 5611 * 2. Expander Device Added/Removed.
5604 5612 * 3. Indirectly Attached Device Added/Expander.
5605 5613 * 4. LUNs of a existing device status change.
5606 5614 * 5. RAID volume created/deleted.
5607 5615 * 6. Member of RAID volume is released because of RAID deletion.
5608 5616 * 7. Physical disks are removed because of RAID creation.
5609 5617 */
5610 5618 static void
5611 5619 mptsas_handle_dr(void *args) {
5612 5620 mptsas_topo_change_list_t *topo_node = NULL;
5613 5621 mptsas_topo_change_list_t *save_node = NULL;
5614 5622 mptsas_t *mpt;
5615 5623 dev_info_t *parent = NULL;
5616 5624 mptsas_phymask_t phymask = 0;
5617 5625 char *phy_mask_name;
5618 5626 uint8_t flags = 0, physport = 0xff;
5619 5627 uint8_t port_update = 0;
5620 5628 uint_t event;
5621 5629
5622 5630 topo_node = (mptsas_topo_change_list_t *)args;
5623 5631
5624 5632 mpt = topo_node->mpt;
5625 5633 event = topo_node->event;
5626 5634 flags = topo_node->flags;
5627 5635
5628 5636 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5629 5637
5630 5638 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5631 5639
5632 5640 switch (event) {
5633 5641 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5634 5642 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5635 5643 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5636 5644 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5637 5645 /*
5638 5646 * Direct attached or expander attached device added
5639 5647 * into system or a Phys Disk that is being unhidden.
5640 5648 */
5641 5649 port_update = 1;
5642 5650 }
5643 5651 break;
5644 5652 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5645 5653 /*
5646 5654 * New expander added into system, it must be the head
5647 5655 * of topo_change_list_t
5648 5656 */
5649 5657 port_update = 1;
5650 5658 break;
5651 5659 default:
5652 5660 port_update = 0;
5653 5661 break;
5654 5662 }
5655 5663 /*
5656 5664 * All cases port_update == 1 may cause initiator port form change
5657 5665 */
5658 5666 mutex_enter(&mpt->m_mutex);
5659 5667 if (mpt->m_port_chng && port_update) {
5660 5668 /*
5661 5669 * mpt->m_port_chng flag indicates some PHYs of initiator
5662 5670 * port have changed to online. So when expander added or
5663 5671 * directly attached device online event come, we force to
5664 5672 * update port information by issueing SAS IO Unit Page and
5665 5673 * update PHYMASKs.
5666 5674 */
5667 5675 (void) mptsas_update_phymask(mpt);
5668 5676 mpt->m_port_chng = 0;
5669 5677
5670 5678 }
5671 5679 mutex_exit(&mpt->m_mutex);
5672 5680 while (topo_node) {
5673 5681 phymask = 0;
5674 5682 if (parent == NULL) {
5675 5683 physport = topo_node->un.physport;
5676 5684 event = topo_node->event;
5677 5685 flags = topo_node->flags;
5678 5686 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5679 5687 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5680 5688 /*
5681 5689 * For all offline events, phymask is known
5682 5690 */
5683 5691 phymask = topo_node->un.phymask;
5684 5692 goto find_parent;
5685 5693 }
5686 5694 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5687 5695 goto handle_topo_change;
5688 5696 }
5689 5697 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5690 5698 phymask = topo_node->un.phymask;
5691 5699 goto find_parent;
5692 5700 }
5693 5701
5694 5702 if ((flags ==
5695 5703 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5696 5704 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5697 5705 /*
5698 5706 * There is no any field in IR_CONFIG_CHANGE
5699 5707 * event indicate physport/phynum, let's get
5700 5708 * parent after SAS Device Page0 request.
5701 5709 */
5702 5710 goto handle_topo_change;
5703 5711 }
5704 5712
5705 5713 mutex_enter(&mpt->m_mutex);
5706 5714 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5707 5715 /*
5708 5716 * If the direct attached device added or a
5709 5717 * phys disk is being unhidden, argument
5710 5718 * physport actually is PHY#, so we have to get
5711 5719 * phymask according PHY#.
5712 5720 */
5713 5721 physport = mpt->m_phy_info[physport].port_num;
5714 5722 }
5715 5723
5716 5724 /*
5717 5725 * Translate physport to phymask so that we can search
5718 5726 * parent dip.
5719 5727 */
5720 5728 phymask = mptsas_physport_to_phymask(mpt,
5721 5729 physport);
5722 5730 mutex_exit(&mpt->m_mutex);
5723 5731
5724 5732 find_parent:
5725 5733 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5726 5734 /*
5727 5735 * For RAID topology change node, write the iport name
5728 5736 * as v0.
5729 5737 */
5730 5738 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5731 5739 (void) sprintf(phy_mask_name, "v0");
5732 5740 } else {
5733 5741 /*
5734 5742 * phymask can bo 0 if the drive has been
5735 5743 * pulled by the time an add event is
5736 5744 * processed. If phymask is 0, just skip this
5737 5745 * event and continue.
5738 5746 */
5739 5747 if (phymask == 0) {
5740 5748 mutex_enter(&mpt->m_mutex);
5741 5749 save_node = topo_node;
5742 5750 topo_node = topo_node->next;
5743 5751 ASSERT(save_node);
5744 5752 kmem_free(save_node,
5745 5753 sizeof (mptsas_topo_change_list_t));
5746 5754 mutex_exit(&mpt->m_mutex);
5747 5755
5748 5756 parent = NULL;
5749 5757 continue;
5750 5758 }
5751 5759 (void) sprintf(phy_mask_name, "%x", phymask);
5752 5760 }
5753 5761 parent = scsi_hba_iport_find(mpt->m_dip,
5754 5762 phy_mask_name);
5755 5763 if (parent == NULL) {
|
↓ open down ↓ |
917 lines elided |
↑ open up ↑ |
5756 5764 mptsas_log(mpt, CE_WARN, "Failed to find an "
5757 5765 "iport, should not happen!");
5758 5766 goto out;
5759 5767 }
5760 5768
5761 5769 }
5762 5770 ASSERT(parent);
5763 5771 handle_topo_change:
5764 5772
5765 5773 mutex_enter(&mpt->m_mutex);
5766 -
5767 - mptsas_handle_topo_change(topo_node, parent);
5774 + /*
5775 + * If HBA is being reset, don't perform operations depending
5776 + * on the IOC. We must free the topo list, however.
5777 + */
5778 + if (!mpt->m_in_reset)
5779 + mptsas_handle_topo_change(topo_node, parent);
5780 + else
5781 + NDBG20(("skipping topo change received during reset"));
5768 5782 save_node = topo_node;
5769 5783 topo_node = topo_node->next;
5770 5784 ASSERT(save_node);
5771 5785 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5772 5786 mutex_exit(&mpt->m_mutex);
5773 5787
5774 5788 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5775 5789 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5776 5790 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5777 5791 /*
5778 5792 * If direct attached device associated, make sure
5779 5793 * reset the parent before start the next one. But
5780 5794 * all devices associated with expander shares the
5781 5795 * parent. Also, reset parent if this is for RAID.
5782 5796 */
5783 5797 parent = NULL;
5784 5798 }
5785 5799 }
5786 5800 out:
5787 5801 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5788 5802 }
5789 5803
5790 5804 static void
5791 5805 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5792 5806 dev_info_t *parent)
5793 5807 {
5794 5808 mptsas_target_t *ptgt = NULL;
5795 5809 mptsas_smp_t *psmp = NULL;
5796 5810 mptsas_t *mpt = (void *)topo_node->mpt;
5797 5811 uint16_t devhdl;
5798 5812 uint16_t attached_devhdl;
5799 5813 uint64_t sas_wwn = 0;
5800 5814 int rval = 0;
5801 5815 uint32_t page_address;
5802 5816 uint8_t phy, flags;
5803 5817 char *addr = NULL;
5804 5818 dev_info_t *lundip;
5805 5819 int circ = 0, circ1 = 0;
5806 5820 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5807 5821
5808 5822 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5809 5823
5810 5824 ASSERT(mutex_owned(&mpt->m_mutex));
5811 5825
5812 5826 switch (topo_node->event) {
5813 5827 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5814 5828 {
5815 5829 char *phy_mask_name;
5816 5830 mptsas_phymask_t phymask = 0;
5817 5831
5818 5832 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5819 5833 /*
5820 5834 * Get latest RAID info.
5821 5835 */
5822 5836 (void) mptsas_get_raid_info(mpt);
5823 5837 ptgt = mptsas_search_by_devhdl(
5824 5838 &mpt->m_active->m_tgttbl, topo_node->devhdl);
5825 5839 if (ptgt == NULL)
5826 5840 break;
5827 5841 } else {
5828 5842 ptgt = (void *)topo_node->object;
5829 5843 }
5830 5844
5831 5845 if (ptgt == NULL) {
5832 5846 /*
5833 5847 * If a Phys Disk was deleted, RAID info needs to be
5834 5848 * updated to reflect the new topology.
5835 5849 */
5836 5850 (void) mptsas_get_raid_info(mpt);
5837 5851
5838 5852 /*
5839 5853 * Get sas device page 0 by DevHandle to make sure if
5840 5854 * SSP/SATA end device exist.
5841 5855 */
5842 5856 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5843 5857 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5844 5858 topo_node->devhdl;
5845 5859
5846 5860 rval = mptsas_get_target_device_info(mpt, page_address,
5847 5861 &devhdl, &ptgt);
5848 5862 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5849 5863 mptsas_log(mpt, CE_NOTE,
5850 5864 "mptsas_handle_topo_change: target %d is "
5851 5865 "not a SAS/SATA device. \n",
5852 5866 topo_node->devhdl);
5853 5867 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5854 5868 mptsas_log(mpt, CE_NOTE,
5855 5869 "mptsas_handle_topo_change: could not "
5856 5870 "allocate memory. \n");
5857 5871 }
5858 5872 /*
5859 5873 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5860 5874 * else to do, just leave.
5861 5875 */
5862 5876 if (rval != DEV_INFO_SUCCESS) {
5863 5877 return;
5864 5878 }
5865 5879 }
5866 5880
5867 5881 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5868 5882
5869 5883 mutex_exit(&mpt->m_mutex);
5870 5884 flags = topo_node->flags;
5871 5885
5872 5886 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5873 5887 phymask = ptgt->m_phymask;
5874 5888 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5875 5889 (void) sprintf(phy_mask_name, "%x", phymask);
5876 5890 parent = scsi_hba_iport_find(mpt->m_dip,
5877 5891 phy_mask_name);
5878 5892 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5879 5893 if (parent == NULL) {
5880 5894 mptsas_log(mpt, CE_WARN, "Failed to find a "
5881 5895 "iport for PD, should not happen!");
5882 5896 mutex_enter(&mpt->m_mutex);
5883 5897 break;
5884 5898 }
5885 5899 }
5886 5900
5887 5901 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5888 5902 ndi_devi_enter(parent, &circ1);
5889 5903 (void) mptsas_config_raid(parent, topo_node->devhdl,
5890 5904 &lundip);
5891 5905 ndi_devi_exit(parent, circ1);
5892 5906 } else {
5893 5907 /*
5894 5908 * hold nexus for bus configure
5895 5909 */
5896 5910 ndi_devi_enter(scsi_vhci_dip, &circ);
5897 5911 ndi_devi_enter(parent, &circ1);
5898 5912 rval = mptsas_config_target(parent, ptgt);
5899 5913 /*
5900 5914 * release nexus for bus configure
5901 5915 */
5902 5916 ndi_devi_exit(parent, circ1);
5903 5917 ndi_devi_exit(scsi_vhci_dip, circ);
5904 5918
5905 5919 /*
5906 5920 * Add parent's props for SMHBA support
5907 5921 */
5908 5922 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5909 5923 bzero(attached_wwnstr,
5910 5924 sizeof (attached_wwnstr));
5911 5925 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
5912 5926 ptgt->m_sas_wwn);
5913 5927 if (ddi_prop_update_string(DDI_DEV_T_NONE,
5914 5928 parent,
5915 5929 SCSI_ADDR_PROP_ATTACHED_PORT,
5916 5930 attached_wwnstr)
5917 5931 != DDI_PROP_SUCCESS) {
5918 5932 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5919 5933 parent,
5920 5934 SCSI_ADDR_PROP_ATTACHED_PORT);
5921 5935 mptsas_log(mpt, CE_WARN, "Failed to"
5922 5936 "attached-port props");
5923 5937 return;
5924 5938 }
5925 5939 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5926 5940 MPTSAS_NUM_PHYS, 1) !=
5927 5941 DDI_PROP_SUCCESS) {
5928 5942 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5929 5943 parent, MPTSAS_NUM_PHYS);
5930 5944 mptsas_log(mpt, CE_WARN, "Failed to"
5931 5945 " create num-phys props");
5932 5946 return;
5933 5947 }
5934 5948
5935 5949 /*
5936 5950 * Update PHY info for smhba
5937 5951 */
5938 5952 mutex_enter(&mpt->m_mutex);
5939 5953 if (mptsas_smhba_phy_init(mpt)) {
5940 5954 mutex_exit(&mpt->m_mutex);
5941 5955 mptsas_log(mpt, CE_WARN, "mptsas phy"
5942 5956 " update failed");
5943 5957 return;
5944 5958 }
5945 5959 mutex_exit(&mpt->m_mutex);
5946 5960 mptsas_smhba_set_phy_props(mpt,
5947 5961 ddi_get_name_addr(parent), parent,
5948 5962 1, &attached_devhdl);
5949 5963 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5950 5964 MPTSAS_VIRTUAL_PORT, 0) !=
5951 5965 DDI_PROP_SUCCESS) {
5952 5966 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5953 5967 parent, MPTSAS_VIRTUAL_PORT);
5954 5968 mptsas_log(mpt, CE_WARN,
5955 5969 "mptsas virtual-port"
5956 5970 "port prop update failed");
5957 5971 return;
5958 5972 }
5959 5973 }
5960 5974 }
5961 5975 mutex_enter(&mpt->m_mutex);
5962 5976
5963 5977 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5964 5978 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5965 5979 ptgt->m_phymask));
5966 5980 break;
5967 5981 }
5968 5982 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5969 5983 {
5970 5984 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5971 5985 devhdl = topo_node->devhdl;
5972 5986 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
5973 5987 if (ptgt == NULL)
5974 5988 break;
5975 5989
5976 5990 sas_wwn = ptgt->m_sas_wwn;
5977 5991 phy = ptgt->m_phynum;
5978 5992
5979 5993 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5980 5994
5981 5995 if (sas_wwn) {
5982 5996 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
5983 5997 } else {
5984 5998 (void) sprintf(addr, "p%x", phy);
5985 5999 }
5986 6000 ASSERT(ptgt->m_devhdl == devhdl);
5987 6001
5988 6002 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
5989 6003 (topo_node->flags ==
5990 6004 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5991 6005 /*
5992 6006 * Get latest RAID info if RAID volume status changes
5993 6007 * or Phys Disk status changes
5994 6008 */
5995 6009 (void) mptsas_get_raid_info(mpt);
5996 6010 }
5997 6011 /*
5998 6012 * Abort all outstanding command on the device
5999 6013 */
6000 6014 rval = mptsas_do_scsi_reset(mpt, devhdl);
6001 6015 if (rval) {
6002 6016 NDBG20(("mptsas%d handle_topo_change to reset target "
6003 6017 "before offline devhdl:%x, phymask:%x, rval:%x",
6004 6018 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6005 6019 rval));
6006 6020 }
6007 6021
6008 6022 mutex_exit(&mpt->m_mutex);
6009 6023
6010 6024 ndi_devi_enter(scsi_vhci_dip, &circ);
6011 6025 ndi_devi_enter(parent, &circ1);
6012 6026 rval = mptsas_offline_target(parent, addr);
6013 6027 ndi_devi_exit(parent, circ1);
6014 6028 ndi_devi_exit(scsi_vhci_dip, circ);
6015 6029 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6016 6030 "phymask:%x, rval:%x", mpt->m_instance,
6017 6031 ptgt->m_devhdl, ptgt->m_phymask, rval));
6018 6032
6019 6033 kmem_free(addr, SCSI_MAXNAMELEN);
6020 6034
6021 6035 /*
6022 6036 * Clear parent's props for SMHBA support
6023 6037 */
6024 6038 flags = topo_node->flags;
6025 6039 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6026 6040 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6027 6041 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6028 6042 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6029 6043 DDI_PROP_SUCCESS) {
6030 6044 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6031 6045 SCSI_ADDR_PROP_ATTACHED_PORT);
6032 6046 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6033 6047 "prop update failed");
6034 6048 break;
6035 6049 }
6036 6050 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6037 6051 MPTSAS_NUM_PHYS, 0) !=
6038 6052 DDI_PROP_SUCCESS) {
6039 6053 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6040 6054 MPTSAS_NUM_PHYS);
6041 6055 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6042 6056 "prop update failed");
6043 6057 break;
6044 6058 }
6045 6059 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6046 6060 MPTSAS_VIRTUAL_PORT, 1) !=
|
↓ open down ↓ |
269 lines elided |
↑ open up ↑ |
6047 6061 DDI_PROP_SUCCESS) {
6048 6062 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6049 6063 MPTSAS_VIRTUAL_PORT);
6050 6064 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6051 6065 "prop update failed");
6052 6066 break;
6053 6067 }
6054 6068 }
6055 6069
6056 6070 mutex_enter(&mpt->m_mutex);
6057 - if (mptsas_set_led_status(mpt, ptgt, 0) != DDI_SUCCESS) {
6058 - NDBG14(("mptsas: clear LED for tgt %x failed",
6059 - ptgt->m_slot_num));
6060 - }
6061 6071 if (rval == DDI_SUCCESS) {
6062 6072 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6063 6073 ptgt->m_sas_wwn, ptgt->m_phymask);
6064 6074 ptgt = NULL;
6065 6075 } else {
6066 6076 /*
6067 6077 * clean DR_INTRANSITION flag to allow I/O down to
6068 6078 * PHCI driver since failover finished.
6069 6079 * Invalidate the devhdl
6070 6080 */
6071 6081 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6072 6082 ptgt->m_tgt_unconfigured = 0;
6073 6083 mutex_enter(&mpt->m_tx_waitq_mutex);
6074 6084 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6075 6085 mutex_exit(&mpt->m_tx_waitq_mutex);
6076 6086 }
6077 6087
6078 6088 /*
6079 6089 * Send SAS IO Unit Control to free the dev handle
6080 6090 */
6081 6091 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6082 6092 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6083 6093 rval = mptsas_free_devhdl(mpt, devhdl);
6084 6094
6085 6095 NDBG20(("mptsas%d handle_topo_change to remove "
6086 6096 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6087 6097 rval));
6088 6098 }
6089 6099
6090 6100 break;
6091 6101 }
6092 6102 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6093 6103 {
6094 6104 devhdl = topo_node->devhdl;
6095 6105 /*
6096 6106 * If this is the remove handle event, do a reset first.
6097 6107 */
6098 6108 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6099 6109 rval = mptsas_do_scsi_reset(mpt, devhdl);
6100 6110 if (rval) {
6101 6111 NDBG20(("mpt%d reset target before remove "
6102 6112 "devhdl:%x, rval:%x", mpt->m_instance,
6103 6113 devhdl, rval));
6104 6114 }
6105 6115 }
6106 6116
6107 6117 /*
6108 6118 * Send SAS IO Unit Control to free the dev handle
6109 6119 */
6110 6120 rval = mptsas_free_devhdl(mpt, devhdl);
6111 6121 NDBG20(("mptsas%d handle_topo_change to remove "
6112 6122 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6113 6123 rval));
6114 6124 break;
6115 6125 }
6116 6126 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6117 6127 {
6118 6128 mptsas_smp_t smp;
6119 6129 dev_info_t *smpdip;
6120 6130 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6121 6131
6122 6132 devhdl = topo_node->devhdl;
6123 6133
6124 6134 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6125 6135 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6126 6136 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6127 6137 if (rval != DDI_SUCCESS) {
6128 6138 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6129 6139 "handle %x", devhdl);
6130 6140 return;
6131 6141 }
6132 6142
6133 6143 psmp = mptsas_smp_alloc(smptbl, &smp);
6134 6144 if (psmp == NULL) {
6135 6145 return;
6136 6146 }
6137 6147
6138 6148 mutex_exit(&mpt->m_mutex);
6139 6149 ndi_devi_enter(parent, &circ1);
6140 6150 (void) mptsas_online_smp(parent, psmp, &smpdip);
6141 6151 ndi_devi_exit(parent, circ1);
6142 6152
6143 6153 mutex_enter(&mpt->m_mutex);
6144 6154 break;
6145 6155 }
6146 6156 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6147 6157 {
6148 6158 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6149 6159 devhdl = topo_node->devhdl;
6150 6160 uint32_t dev_info;
6151 6161
6152 6162 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6153 6163 if (psmp == NULL)
6154 6164 break;
6155 6165 /*
6156 6166 * The mptsas_smp_t data is released only if the dip is offlined
6157 6167 * successfully.
6158 6168 */
6159 6169 mutex_exit(&mpt->m_mutex);
6160 6170
6161 6171 ndi_devi_enter(parent, &circ1);
6162 6172 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6163 6173 ndi_devi_exit(parent, circ1);
6164 6174
6165 6175 dev_info = psmp->m_deviceinfo;
6166 6176 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6167 6177 DEVINFO_DIRECT_ATTACHED) {
6168 6178 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6169 6179 MPTSAS_VIRTUAL_PORT, 1) !=
6170 6180 DDI_PROP_SUCCESS) {
6171 6181 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6172 6182 MPTSAS_VIRTUAL_PORT);
6173 6183 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6174 6184 "prop update failed");
6175 6185 return;
6176 6186 }
6177 6187 /*
6178 6188 * Check whether the smp connected to the iport,
6179 6189 */
6180 6190 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6181 6191 MPTSAS_NUM_PHYS, 0) !=
6182 6192 DDI_PROP_SUCCESS) {
6183 6193 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6184 6194 MPTSAS_NUM_PHYS);
6185 6195 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6186 6196 "prop update failed");
6187 6197 return;
6188 6198 }
6189 6199 /*
6190 6200 * Clear parent's attached-port props
6191 6201 */
6192 6202 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6193 6203 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6194 6204 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6195 6205 DDI_PROP_SUCCESS) {
6196 6206 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6197 6207 SCSI_ADDR_PROP_ATTACHED_PORT);
6198 6208 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6199 6209 "prop update failed");
6200 6210 return;
6201 6211 }
6202 6212 }
6203 6213
6204 6214 mutex_enter(&mpt->m_mutex);
6205 6215 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6206 6216 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6207 6217 if (rval == DDI_SUCCESS) {
6208 6218 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6209 6219 psmp->m_phymask);
6210 6220 } else {
6211 6221 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6212 6222 }
6213 6223
6214 6224 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6215 6225
6216 6226 break;
6217 6227 }
6218 6228 default:
6219 6229 return;
6220 6230 }
6221 6231 }
6222 6232
6223 6233 /*
6224 6234 * Record the event if its type is enabled in mpt instance by ioctl.
6225 6235 */
6226 6236 static void
6227 6237 mptsas_record_event(void *args)
6228 6238 {
6229 6239 m_replyh_arg_t *replyh_arg;
6230 6240 pMpi2EventNotificationReply_t eventreply;
6231 6241 uint32_t event, rfm;
6232 6242 mptsas_t *mpt;
6233 6243 int i, j;
6234 6244 uint16_t event_data_len;
6235 6245 boolean_t sendAEN = FALSE;
6236 6246
6237 6247 replyh_arg = (m_replyh_arg_t *)args;
6238 6248 rfm = replyh_arg->rfm;
6239 6249 mpt = replyh_arg->mpt;
6240 6250
6241 6251 eventreply = (pMpi2EventNotificationReply_t)
6242 6252 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6243 6253 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6244 6254
6245 6255
6246 6256 /*
6247 6257 * Generate a system event to let anyone who cares know that a
6248 6258 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6249 6259 * event mask is set to.
6250 6260 */
6251 6261 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6252 6262 sendAEN = TRUE;
6253 6263 }
6254 6264
6255 6265 /*
6256 6266 * Record the event only if it is not masked. Determine which dword
6257 6267 * and bit of event mask to test.
6258 6268 */
6259 6269 i = (uint8_t)(event / 32);
6260 6270 j = (uint8_t)(event % 32);
6261 6271 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6262 6272 i = mpt->m_event_index;
6263 6273 mpt->m_events[i].Type = event;
6264 6274 mpt->m_events[i].Number = ++mpt->m_event_number;
6265 6275 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6266 6276 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6267 6277 &eventreply->EventDataLength);
6268 6278
6269 6279 if (event_data_len > 0) {
6270 6280 /*
6271 6281 * Limit data to size in m_event entry
6272 6282 */
6273 6283 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6274 6284 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6275 6285 }
6276 6286 for (j = 0; j < event_data_len; j++) {
6277 6287 mpt->m_events[i].Data[j] =
6278 6288 ddi_get32(mpt->m_acc_reply_frame_hdl,
6279 6289 &(eventreply->EventData[j]));
6280 6290 }
6281 6291
6282 6292 /*
6283 6293 * check for index wrap-around
6284 6294 */
6285 6295 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6286 6296 i = 0;
6287 6297 }
6288 6298 mpt->m_event_index = (uint8_t)i;
6289 6299
6290 6300 /*
6291 6301 * Set flag to send the event.
6292 6302 */
6293 6303 sendAEN = TRUE;
6294 6304 }
6295 6305 }
6296 6306
6297 6307 /*
6298 6308 * Generate a system event if flag is set to let anyone who cares know
6299 6309 * that an event has occurred.
6300 6310 */
6301 6311 if (sendAEN) {
6302 6312 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6303 6313 "SAS", NULL, NULL, DDI_NOSLEEP);
6304 6314 }
6305 6315 }
6306 6316
6307 6317 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6308 6318 /*
6309 6319 * handle sync events from ioc in interrupt
6310 6320 * return value:
6311 6321 * DDI_SUCCESS: The event is handled by this func
6312 6322 * DDI_FAILURE: Event is not handled
6313 6323 */
6314 6324 static int
6315 6325 mptsas_handle_event_sync(void *args)
6316 6326 {
6317 6327 m_replyh_arg_t *replyh_arg;
6318 6328 pMpi2EventNotificationReply_t eventreply;
6319 6329 uint32_t event, rfm;
6320 6330 mptsas_t *mpt;
6321 6331 uint_t iocstatus;
6322 6332
6323 6333 replyh_arg = (m_replyh_arg_t *)args;
6324 6334 rfm = replyh_arg->rfm;
6325 6335 mpt = replyh_arg->mpt;
6326 6336
6327 6337 ASSERT(mutex_owned(&mpt->m_mutex));
6328 6338
6329 6339 eventreply = (pMpi2EventNotificationReply_t)
6330 6340 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6331 6341 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6332 6342
6333 6343 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6334 6344 &eventreply->IOCStatus)) {
6335 6345 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6336 6346 mptsas_log(mpt, CE_WARN,
6337 6347 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6338 6348 "IOCLogInfo=0x%x", iocstatus,
6339 6349 ddi_get32(mpt->m_acc_reply_frame_hdl,
6340 6350 &eventreply->IOCLogInfo));
6341 6351 } else {
6342 6352 mptsas_log(mpt, CE_WARN,
6343 6353 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6344 6354 "IOCLogInfo=0x%x", iocstatus,
6345 6355 ddi_get32(mpt->m_acc_reply_frame_hdl,
6346 6356 &eventreply->IOCLogInfo));
6347 6357 }
6348 6358 }
6349 6359
6350 6360 /*
6351 6361 * figure out what kind of event we got and handle accordingly
6352 6362 */
6353 6363 switch (event) {
6354 6364 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6355 6365 {
6356 6366 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6357 6367 uint8_t num_entries, expstatus, phy;
6358 6368 uint8_t phystatus, physport, state, i;
6359 6369 uint8_t start_phy_num, link_rate;
6360 6370 uint16_t dev_handle, reason_code;
6361 6371 uint16_t enc_handle, expd_handle;
6362 6372 char string[80], curr[80], prev[80];
6363 6373 mptsas_topo_change_list_t *topo_head = NULL;
6364 6374 mptsas_topo_change_list_t *topo_tail = NULL;
6365 6375 mptsas_topo_change_list_t *topo_node = NULL;
6366 6376 mptsas_target_t *ptgt;
6367 6377 mptsas_smp_t *psmp;
6368 6378 mptsas_hash_table_t *tgttbl, *smptbl;
6369 6379 uint8_t flags = 0, exp_flag;
6370 6380 smhba_info_t *pSmhba = NULL;
6371 6381
6372 6382 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6373 6383
6374 6384 tgttbl = &mpt->m_active->m_tgttbl;
6375 6385 smptbl = &mpt->m_active->m_smptbl;
6376 6386
6377 6387 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6378 6388 eventreply->EventData;
6379 6389
6380 6390 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6381 6391 &sas_topo_change_list->EnclosureHandle);
6382 6392 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6383 6393 &sas_topo_change_list->ExpanderDevHandle);
6384 6394 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6385 6395 &sas_topo_change_list->NumEntries);
6386 6396 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6387 6397 &sas_topo_change_list->StartPhyNum);
6388 6398 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6389 6399 &sas_topo_change_list->ExpStatus);
6390 6400 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6391 6401 &sas_topo_change_list->PhysicalPort);
6392 6402
6393 6403 string[0] = 0;
6394 6404 if (expd_handle) {
6395 6405 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6396 6406 switch (expstatus) {
6397 6407 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6398 6408 (void) sprintf(string, " added");
6399 6409 /*
6400 6410 * New expander device added
6401 6411 */
6402 6412 mpt->m_port_chng = 1;
6403 6413 topo_node = kmem_zalloc(
6404 6414 sizeof (mptsas_topo_change_list_t),
6405 6415 KM_SLEEP);
6406 6416 topo_node->mpt = mpt;
6407 6417 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6408 6418 topo_node->un.physport = physport;
6409 6419 topo_node->devhdl = expd_handle;
6410 6420 topo_node->flags = flags;
6411 6421 topo_node->object = NULL;
6412 6422 if (topo_head == NULL) {
6413 6423 topo_head = topo_tail = topo_node;
6414 6424 } else {
6415 6425 topo_tail->next = topo_node;
6416 6426 topo_tail = topo_node;
6417 6427 }
6418 6428 break;
6419 6429 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6420 6430 (void) sprintf(string, " not responding, "
6421 6431 "removed");
6422 6432 psmp = mptsas_search_by_devhdl(smptbl,
6423 6433 expd_handle);
6424 6434 if (psmp == NULL)
6425 6435 break;
6426 6436
6427 6437 topo_node = kmem_zalloc(
6428 6438 sizeof (mptsas_topo_change_list_t),
6429 6439 KM_SLEEP);
6430 6440 topo_node->mpt = mpt;
6431 6441 topo_node->un.phymask = psmp->m_phymask;
6432 6442 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6433 6443 topo_node->devhdl = expd_handle;
6434 6444 topo_node->flags = flags;
6435 6445 topo_node->object = NULL;
6436 6446 if (topo_head == NULL) {
6437 6447 topo_head = topo_tail = topo_node;
6438 6448 } else {
6439 6449 topo_tail->next = topo_node;
6440 6450 topo_tail = topo_node;
6441 6451 }
6442 6452 break;
6443 6453 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6444 6454 break;
6445 6455 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6446 6456 (void) sprintf(string, " not responding, "
6447 6457 "delaying removal");
6448 6458 break;
6449 6459 default:
6450 6460 break;
6451 6461 }
6452 6462 } else {
6453 6463 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6454 6464 }
6455 6465
6456 6466 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6457 6467 enc_handle, expd_handle, string));
6458 6468 for (i = 0; i < num_entries; i++) {
6459 6469 phy = i + start_phy_num;
6460 6470 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6461 6471 &sas_topo_change_list->PHY[i].PhyStatus);
6462 6472 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6463 6473 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6464 6474 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6465 6475 /*
6466 6476 * Filter out processing of Phy Vacant Status unless
6467 6477 * the reason code is "Not Responding". Process all
6468 6478 * other combinations of Phy Status and Reason Codes.
6469 6479 */
6470 6480 if ((phystatus &
6471 6481 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6472 6482 (reason_code !=
6473 6483 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6474 6484 continue;
6475 6485 }
6476 6486 curr[0] = 0;
6477 6487 prev[0] = 0;
6478 6488 string[0] = 0;
6479 6489 switch (reason_code) {
6480 6490 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6481 6491 {
6482 6492 NDBG20(("mptsas%d phy %d physical_port %d "
6483 6493 "dev_handle %d added", mpt->m_instance, phy,
6484 6494 physport, dev_handle));
6485 6495 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6486 6496 &sas_topo_change_list->PHY[i].LinkRate);
6487 6497 state = (link_rate &
6488 6498 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6489 6499 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6490 6500 switch (state) {
6491 6501 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6492 6502 (void) sprintf(curr, "is disabled");
6493 6503 break;
6494 6504 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6495 6505 (void) sprintf(curr, "is offline, "
6496 6506 "failed speed negotiation");
6497 6507 break;
6498 6508 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6499 6509 (void) sprintf(curr, "SATA OOB "
6500 6510 "complete");
6501 6511 break;
6502 6512 case SMP_RESET_IN_PROGRESS:
6503 6513 (void) sprintf(curr, "SMP reset in "
6504 6514 "progress");
6505 6515 break;
6506 6516 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6507 6517 (void) sprintf(curr, "is online at "
6508 6518 "1.5 Gbps");
6509 6519 break;
6510 6520 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6511 6521 (void) sprintf(curr, "is online at 3.0 "
6512 6522 "Gbps");
6513 6523 break;
6514 6524 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6515 6525 (void) sprintf(curr, "is online at 6.0 "
6516 6526 "Gbps");
6517 6527 break;
6518 6528 default:
6519 6529 (void) sprintf(curr, "state is "
6520 6530 "unknown");
6521 6531 break;
6522 6532 }
6523 6533 /*
6524 6534 * New target device added into the system.
6525 6535 * Set association flag according to if an
6526 6536 * expander is used or not.
6527 6537 */
6528 6538 exp_flag =
6529 6539 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6530 6540 if (flags ==
6531 6541 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6532 6542 flags = exp_flag;
6533 6543 }
6534 6544 topo_node = kmem_zalloc(
6535 6545 sizeof (mptsas_topo_change_list_t),
6536 6546 KM_SLEEP);
6537 6547 topo_node->mpt = mpt;
6538 6548 topo_node->event =
6539 6549 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6540 6550 if (expd_handle == 0) {
6541 6551 /*
6542 6552 * Per MPI 2, if expander dev handle
6543 6553 * is 0, it's a directly attached
6544 6554 * device. So driver use PHY to decide
6545 6555 * which iport is associated
6546 6556 */
6547 6557 physport = phy;
6548 6558 mpt->m_port_chng = 1;
6549 6559 }
6550 6560 topo_node->un.physport = physport;
6551 6561 topo_node->devhdl = dev_handle;
6552 6562 topo_node->flags = flags;
6553 6563 topo_node->object = NULL;
6554 6564 if (topo_head == NULL) {
6555 6565 topo_head = topo_tail = topo_node;
6556 6566 } else {
6557 6567 topo_tail->next = topo_node;
6558 6568 topo_tail = topo_node;
6559 6569 }
6560 6570 break;
6561 6571 }
6562 6572 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6563 6573 {
6564 6574 NDBG20(("mptsas%d phy %d physical_port %d "
6565 6575 "dev_handle %d removed", mpt->m_instance,
6566 6576 phy, physport, dev_handle));
6567 6577 /*
6568 6578 * Set association flag according to if an
6569 6579 * expander is used or not.
6570 6580 */
6571 6581 exp_flag =
6572 6582 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6573 6583 if (flags ==
6574 6584 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6575 6585 flags = exp_flag;
6576 6586 }
6577 6587 /*
6578 6588 * Target device is removed from the system
6579 6589 * Before the device is really offline from
6580 6590 * from system.
6581 6591 */
6582 6592 ptgt = mptsas_search_by_devhdl(tgttbl,
6583 6593 dev_handle);
6584 6594 /*
6585 6595 * If ptgt is NULL here, it means that the
6586 6596 * DevHandle is not in the hash table. This is
6587 6597 * reasonable sometimes. For example, if a
6588 6598 * disk was pulled, then added, then pulled
6589 6599 * again, the disk will not have been put into
6590 6600 * the hash table because the add event will
6591 6601 * have an invalid phymask. BUT, this does not
6592 6602 * mean that the DevHandle is invalid. The
6593 6603 * controller will still have a valid DevHandle
6594 6604 * that must be removed. To do this, use the
6595 6605 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6596 6606 */
6597 6607 if (ptgt == NULL) {
6598 6608 topo_node = kmem_zalloc(
6599 6609 sizeof (mptsas_topo_change_list_t),
6600 6610 KM_SLEEP);
6601 6611 topo_node->mpt = mpt;
6602 6612 topo_node->un.phymask = 0;
6603 6613 topo_node->event =
6604 6614 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6605 6615 topo_node->devhdl = dev_handle;
6606 6616 topo_node->flags = flags;
6607 6617 topo_node->object = NULL;
6608 6618 if (topo_head == NULL) {
6609 6619 topo_head = topo_tail =
6610 6620 topo_node;
6611 6621 } else {
6612 6622 topo_tail->next = topo_node;
6613 6623 topo_tail = topo_node;
6614 6624 }
6615 6625 break;
6616 6626 }
6617 6627
6618 6628 /*
6619 6629 * Update DR flag immediately avoid I/O failure
6620 6630 * before failover finish. Pay attention to the
6621 6631 * mutex protect, we need grab m_tx_waitq_mutex
6622 6632 * during set m_dr_flag because we won't add
6623 6633 * the following command into waitq, instead,
6624 6634 * we need return TRAN_BUSY in the tran_start
6625 6635 * context.
6626 6636 */
6627 6637 mutex_enter(&mpt->m_tx_waitq_mutex);
6628 6638 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6629 6639 mutex_exit(&mpt->m_tx_waitq_mutex);
6630 6640
6631 6641 topo_node = kmem_zalloc(
6632 6642 sizeof (mptsas_topo_change_list_t),
6633 6643 KM_SLEEP);
6634 6644 topo_node->mpt = mpt;
6635 6645 topo_node->un.phymask = ptgt->m_phymask;
6636 6646 topo_node->event =
6637 6647 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6638 6648 topo_node->devhdl = dev_handle;
6639 6649 topo_node->flags = flags;
6640 6650 topo_node->object = NULL;
6641 6651 if (topo_head == NULL) {
6642 6652 topo_head = topo_tail = topo_node;
6643 6653 } else {
6644 6654 topo_tail->next = topo_node;
6645 6655 topo_tail = topo_node;
6646 6656 }
6647 6657 break;
6648 6658 }
6649 6659 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6650 6660 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6651 6661 &sas_topo_change_list->PHY[i].LinkRate);
6652 6662 state = (link_rate &
6653 6663 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6654 6664 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6655 6665 pSmhba = &mpt->m_phy_info[i].smhba_info;
6656 6666 pSmhba->negotiated_link_rate = state;
6657 6667 switch (state) {
6658 6668 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6659 6669 (void) sprintf(curr, "is disabled");
6660 6670 mptsas_smhba_log_sysevent(mpt,
6661 6671 ESC_SAS_PHY_EVENT,
6662 6672 SAS_PHY_REMOVE,
6663 6673 &mpt->m_phy_info[i].smhba_info);
6664 6674 mpt->m_phy_info[i].smhba_info.
6665 6675 negotiated_link_rate
6666 6676 = 0x1;
6667 6677 break;
6668 6678 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6669 6679 (void) sprintf(curr, "is offline, "
6670 6680 "failed speed negotiation");
6671 6681 mptsas_smhba_log_sysevent(mpt,
6672 6682 ESC_SAS_PHY_EVENT,
6673 6683 SAS_PHY_OFFLINE,
6674 6684 &mpt->m_phy_info[i].smhba_info);
6675 6685 break;
6676 6686 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6677 6687 (void) sprintf(curr, "SATA OOB "
6678 6688 "complete");
6679 6689 break;
6680 6690 case SMP_RESET_IN_PROGRESS:
6681 6691 (void) sprintf(curr, "SMP reset in "
6682 6692 "progress");
6683 6693 break;
6684 6694 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6685 6695 (void) sprintf(curr, "is online at "
6686 6696 "1.5 Gbps");
6687 6697 if ((expd_handle == 0) &&
6688 6698 (enc_handle == 1)) {
6689 6699 mpt->m_port_chng = 1;
6690 6700 }
6691 6701 mptsas_smhba_log_sysevent(mpt,
6692 6702 ESC_SAS_PHY_EVENT,
6693 6703 SAS_PHY_ONLINE,
6694 6704 &mpt->m_phy_info[i].smhba_info);
6695 6705 break;
6696 6706 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6697 6707 (void) sprintf(curr, "is online at 3.0 "
6698 6708 "Gbps");
6699 6709 if ((expd_handle == 0) &&
6700 6710 (enc_handle == 1)) {
6701 6711 mpt->m_port_chng = 1;
6702 6712 }
6703 6713 mptsas_smhba_log_sysevent(mpt,
6704 6714 ESC_SAS_PHY_EVENT,
6705 6715 SAS_PHY_ONLINE,
6706 6716 &mpt->m_phy_info[i].smhba_info);
6707 6717 break;
6708 6718 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6709 6719 (void) sprintf(curr, "is online at "
6710 6720 "6.0 Gbps");
6711 6721 if ((expd_handle == 0) &&
6712 6722 (enc_handle == 1)) {
6713 6723 mpt->m_port_chng = 1;
6714 6724 }
6715 6725 mptsas_smhba_log_sysevent(mpt,
6716 6726 ESC_SAS_PHY_EVENT,
6717 6727 SAS_PHY_ONLINE,
6718 6728 &mpt->m_phy_info[i].smhba_info);
6719 6729 break;
6720 6730 default:
6721 6731 (void) sprintf(curr, "state is "
6722 6732 "unknown");
6723 6733 break;
6724 6734 }
6725 6735
6726 6736 state = (link_rate &
6727 6737 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6728 6738 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6729 6739 switch (state) {
6730 6740 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6731 6741 (void) sprintf(prev, ", was disabled");
6732 6742 break;
6733 6743 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6734 6744 (void) sprintf(prev, ", was offline, "
6735 6745 "failed speed negotiation");
6736 6746 break;
6737 6747 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6738 6748 (void) sprintf(prev, ", was SATA OOB "
6739 6749 "complete");
6740 6750 break;
6741 6751 case SMP_RESET_IN_PROGRESS:
6742 6752 (void) sprintf(prev, ", was SMP reset "
6743 6753 "in progress");
6744 6754 break;
6745 6755 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6746 6756 (void) sprintf(prev, ", was online at "
6747 6757 "1.5 Gbps");
6748 6758 break;
6749 6759 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6750 6760 (void) sprintf(prev, ", was online at "
6751 6761 "3.0 Gbps");
6752 6762 break;
6753 6763 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6754 6764 (void) sprintf(prev, ", was online at "
6755 6765 "6.0 Gbps");
6756 6766 break;
6757 6767 default:
6758 6768 break;
6759 6769 }
6760 6770 (void) sprintf(&string[strlen(string)], "link "
6761 6771 "changed, ");
6762 6772 break;
6763 6773 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6764 6774 continue;
6765 6775 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6766 6776 (void) sprintf(&string[strlen(string)],
6767 6777 "target not responding, delaying "
6768 6778 "removal");
6769 6779 break;
6770 6780 }
6771 6781 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6772 6782 mpt->m_instance, phy, dev_handle, string, curr,
6773 6783 prev));
6774 6784 }
6775 6785 if (topo_head != NULL) {
6776 6786 /*
6777 6787 * Launch DR taskq to handle topology change
6778 6788 */
6779 6789 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6780 6790 mptsas_handle_dr, (void *)topo_head,
6781 6791 DDI_NOSLEEP)) != DDI_SUCCESS) {
6782 6792 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6783 6793 "for handle SAS DR event failed. \n");
6784 6794 }
6785 6795 }
6786 6796 break;
6787 6797 }
6788 6798 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6789 6799 {
6790 6800 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6791 6801 mptsas_topo_change_list_t *topo_head = NULL;
6792 6802 mptsas_topo_change_list_t *topo_tail = NULL;
6793 6803 mptsas_topo_change_list_t *topo_node = NULL;
6794 6804 mptsas_target_t *ptgt;
6795 6805 mptsas_hash_table_t *tgttbl;
6796 6806 uint8_t num_entries, i, reason;
6797 6807 uint16_t volhandle, diskhandle;
6798 6808
6799 6809 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6800 6810 eventreply->EventData;
6801 6811 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6802 6812 &irChangeList->NumElements);
6803 6813
6804 6814 tgttbl = &mpt->m_active->m_tgttbl;
6805 6815
6806 6816 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6807 6817 mpt->m_instance));
6808 6818
6809 6819 for (i = 0; i < num_entries; i++) {
6810 6820 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6811 6821 &irChangeList->ConfigElement[i].ReasonCode);
6812 6822 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6813 6823 &irChangeList->ConfigElement[i].VolDevHandle);
6814 6824 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6815 6825 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6816 6826
6817 6827 switch (reason) {
6818 6828 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6819 6829 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6820 6830 {
6821 6831 NDBG20(("mptsas %d volume added\n",
6822 6832 mpt->m_instance));
6823 6833
6824 6834 topo_node = kmem_zalloc(
6825 6835 sizeof (mptsas_topo_change_list_t),
6826 6836 KM_SLEEP);
6827 6837
6828 6838 topo_node->mpt = mpt;
6829 6839 topo_node->event =
6830 6840 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6831 6841 topo_node->un.physport = 0xff;
6832 6842 topo_node->devhdl = volhandle;
6833 6843 topo_node->flags =
6834 6844 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6835 6845 topo_node->object = NULL;
6836 6846 if (topo_head == NULL) {
6837 6847 topo_head = topo_tail = topo_node;
6838 6848 } else {
6839 6849 topo_tail->next = topo_node;
6840 6850 topo_tail = topo_node;
6841 6851 }
6842 6852 break;
6843 6853 }
6844 6854 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6845 6855 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6846 6856 {
6847 6857 NDBG20(("mptsas %d volume deleted\n",
6848 6858 mpt->m_instance));
6849 6859 ptgt = mptsas_search_by_devhdl(tgttbl,
6850 6860 volhandle);
6851 6861 if (ptgt == NULL)
6852 6862 break;
6853 6863
6854 6864 /*
6855 6865 * Clear any flags related to volume
6856 6866 */
6857 6867 (void) mptsas_delete_volume(mpt, volhandle);
6858 6868
6859 6869 /*
6860 6870 * Update DR flag immediately avoid I/O failure
6861 6871 */
6862 6872 mutex_enter(&mpt->m_tx_waitq_mutex);
6863 6873 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6864 6874 mutex_exit(&mpt->m_tx_waitq_mutex);
6865 6875
6866 6876 topo_node = kmem_zalloc(
6867 6877 sizeof (mptsas_topo_change_list_t),
6868 6878 KM_SLEEP);
6869 6879 topo_node->mpt = mpt;
6870 6880 topo_node->un.phymask = ptgt->m_phymask;
6871 6881 topo_node->event =
6872 6882 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6873 6883 topo_node->devhdl = volhandle;
6874 6884 topo_node->flags =
6875 6885 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6876 6886 topo_node->object = (void *)ptgt;
6877 6887 if (topo_head == NULL) {
6878 6888 topo_head = topo_tail = topo_node;
6879 6889 } else {
6880 6890 topo_tail->next = topo_node;
6881 6891 topo_tail = topo_node;
6882 6892 }
6883 6893 break;
6884 6894 }
6885 6895 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6886 6896 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6887 6897 {
6888 6898 ptgt = mptsas_search_by_devhdl(tgttbl,
6889 6899 diskhandle);
6890 6900 if (ptgt == NULL)
6891 6901 break;
6892 6902
6893 6903 /*
6894 6904 * Update DR flag immediately avoid I/O failure
6895 6905 */
6896 6906 mutex_enter(&mpt->m_tx_waitq_mutex);
6897 6907 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6898 6908 mutex_exit(&mpt->m_tx_waitq_mutex);
6899 6909
6900 6910 topo_node = kmem_zalloc(
6901 6911 sizeof (mptsas_topo_change_list_t),
6902 6912 KM_SLEEP);
6903 6913 topo_node->mpt = mpt;
6904 6914 topo_node->un.phymask = ptgt->m_phymask;
6905 6915 topo_node->event =
6906 6916 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6907 6917 topo_node->devhdl = diskhandle;
6908 6918 topo_node->flags =
6909 6919 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6910 6920 topo_node->object = (void *)ptgt;
6911 6921 if (topo_head == NULL) {
6912 6922 topo_head = topo_tail = topo_node;
6913 6923 } else {
6914 6924 topo_tail->next = topo_node;
6915 6925 topo_tail = topo_node;
6916 6926 }
6917 6927 break;
6918 6928 }
6919 6929 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6920 6930 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6921 6931 {
6922 6932 /*
6923 6933 * The physical drive is released by a IR
6924 6934 * volume. But we cannot get the the physport
6925 6935 * or phynum from the event data, so we only
6926 6936 * can get the physport/phynum after SAS
6927 6937 * Device Page0 request for the devhdl.
6928 6938 */
6929 6939 topo_node = kmem_zalloc(
6930 6940 sizeof (mptsas_topo_change_list_t),
6931 6941 KM_SLEEP);
6932 6942 topo_node->mpt = mpt;
6933 6943 topo_node->un.phymask = 0;
6934 6944 topo_node->event =
6935 6945 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6936 6946 topo_node->devhdl = diskhandle;
6937 6947 topo_node->flags =
6938 6948 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6939 6949 topo_node->object = NULL;
6940 6950 mpt->m_port_chng = 1;
6941 6951 if (topo_head == NULL) {
6942 6952 topo_head = topo_tail = topo_node;
6943 6953 } else {
6944 6954 topo_tail->next = topo_node;
6945 6955 topo_tail = topo_node;
6946 6956 }
6947 6957 break;
6948 6958 }
6949 6959 default:
6950 6960 break;
6951 6961 }
6952 6962 }
6953 6963
6954 6964 if (topo_head != NULL) {
6955 6965 /*
6956 6966 * Launch DR taskq to handle topology change
6957 6967 */
6958 6968 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6959 6969 mptsas_handle_dr, (void *)topo_head,
6960 6970 DDI_NOSLEEP)) != DDI_SUCCESS) {
6961 6971 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6962 6972 "for handle SAS DR event failed. \n");
6963 6973 }
6964 6974 }
6965 6975 break;
6966 6976 }
6967 6977 default:
6968 6978 return (DDI_FAILURE);
6969 6979 }
6970 6980
6971 6981 return (DDI_SUCCESS);
6972 6982 }
6973 6983
6974 6984 /*
6975 6985 * handle events from ioc
6976 6986 */
6977 6987 static void
6978 6988 mptsas_handle_event(void *args)
6979 6989 {
6980 6990 m_replyh_arg_t *replyh_arg;
6981 6991 pMpi2EventNotificationReply_t eventreply;
6982 6992 uint32_t event, iocloginfo, rfm;
|
↓ open down ↓ |
912 lines elided |
↑ open up ↑ |
6983 6993 uint32_t status;
6984 6994 uint8_t port;
6985 6995 mptsas_t *mpt;
6986 6996 uint_t iocstatus;
6987 6997
6988 6998 replyh_arg = (m_replyh_arg_t *)args;
6989 6999 rfm = replyh_arg->rfm;
6990 7000 mpt = replyh_arg->mpt;
6991 7001
6992 7002 mutex_enter(&mpt->m_mutex);
7003 + /*
7004 + * If HBA is being reset, drop incoming event.
7005 + */
7006 + if (mpt->m_in_reset) {
7007 + NDBG20(("dropping event received prior to reset"));
7008 + mutex_exit(&mpt->m_mutex);
7009 + return;
7010 + }
6993 7011
6994 7012 eventreply = (pMpi2EventNotificationReply_t)
6995 7013 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6996 7014 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6997 7015
6998 7016 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6999 7017 &eventreply->IOCStatus)) {
7000 7018 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7001 7019 mptsas_log(mpt, CE_WARN,
7002 7020 "!mptsas_handle_event: IOCStatus=0x%x, "
7003 7021 "IOCLogInfo=0x%x", iocstatus,
7004 7022 ddi_get32(mpt->m_acc_reply_frame_hdl,
7005 7023 &eventreply->IOCLogInfo));
7006 7024 } else {
7007 7025 mptsas_log(mpt, CE_WARN,
7008 7026 "mptsas_handle_event: IOCStatus=0x%x, "
7009 7027 "IOCLogInfo=0x%x", iocstatus,
7010 7028 ddi_get32(mpt->m_acc_reply_frame_hdl,
7011 7029 &eventreply->IOCLogInfo));
7012 7030 }
7013 7031 }
7014 7032
7015 7033 /*
7016 7034 * figure out what kind of event we got and handle accordingly
7017 7035 */
7018 7036 switch (event) {
7019 7037 case MPI2_EVENT_LOG_ENTRY_ADDED:
7020 7038 break;
7021 7039 case MPI2_EVENT_LOG_DATA:
7022 7040 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7023 7041 &eventreply->IOCLogInfo);
7024 7042 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7025 7043 iocloginfo));
7026 7044 break;
7027 7045 case MPI2_EVENT_STATE_CHANGE:
7028 7046 NDBG20(("mptsas%d state change.", mpt->m_instance));
7029 7047 break;
7030 7048 case MPI2_EVENT_HARD_RESET_RECEIVED:
7031 7049 NDBG20(("mptsas%d event change.", mpt->m_instance));
7032 7050 break;
7033 7051 case MPI2_EVENT_SAS_DISCOVERY:
7034 7052 {
7035 7053 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7036 7054 char string[80];
7037 7055 uint8_t rc;
7038 7056
7039 7057 sasdiscovery =
7040 7058 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7041 7059
7042 7060 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7043 7061 &sasdiscovery->ReasonCode);
7044 7062 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7045 7063 &sasdiscovery->PhysicalPort);
7046 7064 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7047 7065 &sasdiscovery->DiscoveryStatus);
7048 7066
7049 7067 string[0] = 0;
7050 7068 switch (rc) {
7051 7069 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7052 7070 (void) sprintf(string, "STARTING");
7053 7071 break;
7054 7072 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7055 7073 (void) sprintf(string, "COMPLETED");
7056 7074 break;
7057 7075 default:
7058 7076 (void) sprintf(string, "UNKNOWN");
7059 7077 break;
7060 7078 }
7061 7079
7062 7080 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7063 7081 port, status));
7064 7082
7065 7083 break;
7066 7084 }
7067 7085 case MPI2_EVENT_EVENT_CHANGE:
7068 7086 NDBG20(("mptsas%d event change.", mpt->m_instance));
7069 7087 break;
7070 7088 case MPI2_EVENT_TASK_SET_FULL:
7071 7089 {
7072 7090 pMpi2EventDataTaskSetFull_t taskfull;
7073 7091
7074 7092 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7075 7093
7076 7094 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7077 7095 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7078 7096 &taskfull->CurrentDepth)));
7079 7097 break;
7080 7098 }
7081 7099 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7082 7100 {
7083 7101 /*
7084 7102 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7085 7103 * in mptsas_handle_event_sync() of interrupt context
7086 7104 */
7087 7105 break;
7088 7106 }
7089 7107 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7090 7108 {
7091 7109 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7092 7110 uint8_t rc;
7093 7111 char string[80];
7094 7112
7095 7113 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7096 7114 eventreply->EventData;
7097 7115
7098 7116 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7099 7117 &encstatus->ReasonCode);
7100 7118 switch (rc) {
7101 7119 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7102 7120 (void) sprintf(string, "added");
7103 7121 break;
7104 7122 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7105 7123 (void) sprintf(string, ", not responding");
7106 7124 break;
7107 7125 default:
7108 7126 break;
7109 7127 }
7110 7128 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7111 7129 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7112 7130 &encstatus->EnclosureHandle), string));
7113 7131 break;
7114 7132 }
7115 7133
7116 7134 /*
7117 7135 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7118 7136 * mptsas_handle_event_sync,in here just send ack message.
7119 7137 */
7120 7138 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7121 7139 {
7122 7140 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7123 7141 uint8_t rc;
7124 7142 uint16_t devhdl;
7125 7143 uint64_t wwn = 0;
7126 7144 uint32_t wwn_lo, wwn_hi;
7127 7145
7128 7146 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7129 7147 eventreply->EventData;
7130 7148 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7131 7149 &statuschange->ReasonCode);
7132 7150 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7133 7151 (uint32_t *)(void *)&statuschange->SASAddress);
7134 7152 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7135 7153 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7136 7154 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7137 7155 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7138 7156 &statuschange->DevHandle);
7139 7157
7140 7158 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7141 7159 wwn));
7142 7160
7143 7161 switch (rc) {
7144 7162 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7145 7163 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7146 7164 ddi_get8(mpt->m_acc_reply_frame_hdl,
7147 7165 &statuschange->ASC),
7148 7166 ddi_get8(mpt->m_acc_reply_frame_hdl,
7149 7167 &statuschange->ASCQ)));
7150 7168 break;
7151 7169
7152 7170 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7153 7171 NDBG20(("Device not supported"));
7154 7172 break;
7155 7173
7156 7174 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7157 7175 NDBG20(("IOC internally generated the Target Reset "
7158 7176 "for devhdl:%x", devhdl));
7159 7177 break;
7160 7178
7161 7179 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7162 7180 NDBG20(("IOC's internally generated Target Reset "
7163 7181 "completed for devhdl:%x", devhdl));
7164 7182 break;
7165 7183
7166 7184 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7167 7185 NDBG20(("IOC internally generated Abort Task"));
7168 7186 break;
7169 7187
7170 7188 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7171 7189 NDBG20(("IOC's internally generated Abort Task "
7172 7190 "completed"));
7173 7191 break;
7174 7192
7175 7193 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7176 7194 NDBG20(("IOC internally generated Abort Task Set"));
7177 7195 break;
7178 7196
7179 7197 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7180 7198 NDBG20(("IOC internally generated Clear Task Set"));
7181 7199 break;
7182 7200
7183 7201 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7184 7202 NDBG20(("IOC internally generated Query Task"));
7185 7203 break;
7186 7204
7187 7205 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7188 7206 NDBG20(("Device sent an Asynchronous Notification"));
7189 7207 break;
7190 7208
7191 7209 default:
7192 7210 break;
7193 7211 }
7194 7212 break;
7195 7213 }
7196 7214 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7197 7215 {
7198 7216 /*
7199 7217 * IR TOPOLOGY CHANGE LIST Event has already been handled
7200 7218 * in mpt_handle_event_sync() of interrupt context
7201 7219 */
7202 7220 break;
7203 7221 }
7204 7222 case MPI2_EVENT_IR_OPERATION_STATUS:
7205 7223 {
7206 7224 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7207 7225 char reason_str[80];
7208 7226 uint8_t rc, percent;
7209 7227 uint16_t handle;
7210 7228
7211 7229 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7212 7230 eventreply->EventData;
7213 7231 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7214 7232 &irOpStatus->RAIDOperation);
7215 7233 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7216 7234 &irOpStatus->PercentComplete);
7217 7235 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7218 7236 &irOpStatus->VolDevHandle);
7219 7237
7220 7238 switch (rc) {
7221 7239 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7222 7240 (void) sprintf(reason_str, "resync");
7223 7241 break;
7224 7242 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7225 7243 (void) sprintf(reason_str, "online capacity "
7226 7244 "expansion");
7227 7245 break;
7228 7246 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7229 7247 (void) sprintf(reason_str, "consistency check");
7230 7248 break;
7231 7249 default:
7232 7250 (void) sprintf(reason_str, "unknown reason %x",
7233 7251 rc);
7234 7252 }
7235 7253
7236 7254 NDBG20(("mptsas%d raid operational status: (%s)"
7237 7255 "\thandle(0x%04x), percent complete(%d)\n",
7238 7256 mpt->m_instance, reason_str, handle, percent));
7239 7257 break;
7240 7258 }
7241 7259 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7242 7260 {
7243 7261 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7244 7262 uint8_t phy_num;
7245 7263 uint8_t primitive;
7246 7264
7247 7265 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7248 7266 eventreply->EventData;
7249 7267
7250 7268 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7251 7269 &sas_broadcast->PhyNum);
7252 7270 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7253 7271 &sas_broadcast->Primitive);
7254 7272
7255 7273 switch (primitive) {
7256 7274 case MPI2_EVENT_PRIMITIVE_CHANGE:
7257 7275 mptsas_smhba_log_sysevent(mpt,
7258 7276 ESC_SAS_HBA_PORT_BROADCAST,
7259 7277 SAS_PORT_BROADCAST_CHANGE,
7260 7278 &mpt->m_phy_info[phy_num].smhba_info);
7261 7279 break;
7262 7280 case MPI2_EVENT_PRIMITIVE_SES:
7263 7281 mptsas_smhba_log_sysevent(mpt,
7264 7282 ESC_SAS_HBA_PORT_BROADCAST,
7265 7283 SAS_PORT_BROADCAST_SES,
7266 7284 &mpt->m_phy_info[phy_num].smhba_info);
7267 7285 break;
7268 7286 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7269 7287 mptsas_smhba_log_sysevent(mpt,
7270 7288 ESC_SAS_HBA_PORT_BROADCAST,
7271 7289 SAS_PORT_BROADCAST_D01_4,
7272 7290 &mpt->m_phy_info[phy_num].smhba_info);
7273 7291 break;
7274 7292 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7275 7293 mptsas_smhba_log_sysevent(mpt,
7276 7294 ESC_SAS_HBA_PORT_BROADCAST,
7277 7295 SAS_PORT_BROADCAST_D04_7,
7278 7296 &mpt->m_phy_info[phy_num].smhba_info);
7279 7297 break;
7280 7298 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7281 7299 mptsas_smhba_log_sysevent(mpt,
7282 7300 ESC_SAS_HBA_PORT_BROADCAST,
7283 7301 SAS_PORT_BROADCAST_D16_7,
7284 7302 &mpt->m_phy_info[phy_num].smhba_info);
7285 7303 break;
7286 7304 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7287 7305 mptsas_smhba_log_sysevent(mpt,
7288 7306 ESC_SAS_HBA_PORT_BROADCAST,
7289 7307 SAS_PORT_BROADCAST_D29_7,
7290 7308 &mpt->m_phy_info[phy_num].smhba_info);
7291 7309 break;
7292 7310 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7293 7311 mptsas_smhba_log_sysevent(mpt,
7294 7312 ESC_SAS_HBA_PORT_BROADCAST,
7295 7313 SAS_PORT_BROADCAST_D24_0,
7296 7314 &mpt->m_phy_info[phy_num].smhba_info);
7297 7315 break;
7298 7316 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7299 7317 mptsas_smhba_log_sysevent(mpt,
7300 7318 ESC_SAS_HBA_PORT_BROADCAST,
7301 7319 SAS_PORT_BROADCAST_D27_4,
7302 7320 &mpt->m_phy_info[phy_num].smhba_info);
7303 7321 break;
7304 7322 default:
7305 7323 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7306 7324 " %x received",
7307 7325 mpt->m_instance, primitive));
7308 7326 break;
7309 7327 }
7310 7328 NDBG20(("mptsas%d sas broadcast primitive: "
7311 7329 "\tprimitive(0x%04x), phy(%d) complete\n",
7312 7330 mpt->m_instance, primitive, phy_num));
7313 7331 break;
7314 7332 }
7315 7333 case MPI2_EVENT_IR_VOLUME:
7316 7334 {
7317 7335 Mpi2EventDataIrVolume_t *irVolume;
7318 7336 uint16_t devhandle;
7319 7337 uint32_t state;
7320 7338 int config, vol;
7321 7339 mptsas_slots_t *slots = mpt->m_active;
7322 7340 uint8_t found = FALSE;
7323 7341
7324 7342 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7325 7343 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7326 7344 &irVolume->NewValue);
7327 7345 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7328 7346 &irVolume->VolDevHandle);
7329 7347
7330 7348 NDBG20(("EVENT_IR_VOLUME event is received"));
7331 7349
7332 7350 /*
7333 7351 * Get latest RAID info and then find the DevHandle for this
7334 7352 * event in the configuration. If the DevHandle is not found
7335 7353 * just exit the event.
7336 7354 */
7337 7355 (void) mptsas_get_raid_info(mpt);
7338 7356 for (config = 0; (config < slots->m_num_raid_configs) &&
7339 7357 (!found); config++) {
7340 7358 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7341 7359 if (slots->m_raidconfig[config].m_raidvol[vol].
7342 7360 m_raidhandle == devhandle) {
7343 7361 found = TRUE;
7344 7362 break;
7345 7363 }
7346 7364 }
7347 7365 }
7348 7366 if (!found) {
7349 7367 break;
7350 7368 }
7351 7369
7352 7370 switch (irVolume->ReasonCode) {
7353 7371 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7354 7372 {
7355 7373 uint32_t i;
7356 7374 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7357 7375 state;
7358 7376
7359 7377 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7360 7378 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7361 7379 ", auto-config of hot-swap drives is %s"
7362 7380 ", write caching is %s"
7363 7381 ", hot-spare pool mask is %02x\n",
7364 7382 vol, state &
7365 7383 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7366 7384 ? "disabled" : "enabled",
7367 7385 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7368 7386 ? "controlled by member disks" :
7369 7387 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7370 7388 ? "disabled" :
7371 7389 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7372 7390 ? "enabled" :
7373 7391 "incorrectly set",
7374 7392 (state >> 16) & 0xff);
7375 7393 break;
7376 7394 }
7377 7395 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7378 7396 {
7379 7397 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7380 7398 (uint8_t)state;
7381 7399
7382 7400 mptsas_log(mpt, CE_NOTE,
7383 7401 "Volume %d is now %s\n", vol,
7384 7402 state == MPI2_RAID_VOL_STATE_OPTIMAL
7385 7403 ? "optimal" :
7386 7404 state == MPI2_RAID_VOL_STATE_DEGRADED
7387 7405 ? "degraded" :
7388 7406 state == MPI2_RAID_VOL_STATE_ONLINE
7389 7407 ? "online" :
7390 7408 state == MPI2_RAID_VOL_STATE_INITIALIZING
7391 7409 ? "initializing" :
7392 7410 state == MPI2_RAID_VOL_STATE_FAILED
7393 7411 ? "failed" :
7394 7412 state == MPI2_RAID_VOL_STATE_MISSING
7395 7413 ? "missing" :
7396 7414 "state unknown");
7397 7415 break;
7398 7416 }
7399 7417 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7400 7418 {
7401 7419 slots->m_raidconfig[config].m_raidvol[vol].
7402 7420 m_statusflags = state;
7403 7421
7404 7422 mptsas_log(mpt, CE_NOTE,
7405 7423 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7406 7424 vol,
7407 7425 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7408 7426 ? ", enabled" : ", disabled",
7409 7427 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7410 7428 ? ", quiesced" : "",
7411 7429 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7412 7430 ? ", inactive" : ", active",
7413 7431 state &
7414 7432 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7415 7433 ? ", bad block table is full" : "",
7416 7434 state &
7417 7435 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7418 7436 ? ", resync in progress" : "",
7419 7437 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7420 7438 ? ", background initialization in progress" : "",
7421 7439 state &
7422 7440 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7423 7441 ? ", capacity expansion in progress" : "",
7424 7442 state &
7425 7443 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7426 7444 ? ", consistency check in progress" : "",
7427 7445 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7428 7446 ? ", data scrub in progress" : "");
7429 7447 break;
7430 7448 }
7431 7449 default:
7432 7450 break;
7433 7451 }
7434 7452 break;
7435 7453 }
7436 7454 case MPI2_EVENT_IR_PHYSICAL_DISK:
7437 7455 {
7438 7456 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7439 7457 uint16_t devhandle, enchandle, slot;
7440 7458 uint32_t status, state;
7441 7459 uint8_t physdisknum, reason;
7442 7460
7443 7461 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7444 7462 eventreply->EventData;
7445 7463 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7446 7464 &irPhysDisk->PhysDiskNum);
7447 7465 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7448 7466 &irPhysDisk->PhysDiskDevHandle);
7449 7467 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7450 7468 &irPhysDisk->EnclosureHandle);
7451 7469 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7452 7470 &irPhysDisk->Slot);
7453 7471 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7454 7472 &irPhysDisk->NewValue);
7455 7473 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7456 7474 &irPhysDisk->ReasonCode);
7457 7475
7458 7476 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7459 7477
7460 7478 switch (reason) {
7461 7479 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7462 7480 mptsas_log(mpt, CE_NOTE,
7463 7481 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7464 7482 "for enclosure with handle 0x%x is now in hot "
7465 7483 "spare pool %d",
7466 7484 physdisknum, devhandle, slot, enchandle,
7467 7485 (state >> 16) & 0xff);
7468 7486 break;
7469 7487
7470 7488 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7471 7489 status = state;
7472 7490 mptsas_log(mpt, CE_NOTE,
7473 7491 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7474 7492 "for enclosure with handle 0x%x is now "
7475 7493 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7476 7494 enchandle,
7477 7495 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7478 7496 ? ", inactive" : ", active",
7479 7497 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7480 7498 ? ", out of sync" : "",
7481 7499 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7482 7500 ? ", quiesced" : "",
7483 7501 status &
7484 7502 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7485 7503 ? ", write cache enabled" : "",
7486 7504 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7487 7505 ? ", capacity expansion target" : "");
7488 7506 break;
7489 7507
7490 7508 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7491 7509 mptsas_log(mpt, CE_NOTE,
7492 7510 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7493 7511 "for enclosure with handle 0x%x is now %s\n",
7494 7512 physdisknum, devhandle, slot, enchandle,
7495 7513 state == MPI2_RAID_PD_STATE_OPTIMAL
7496 7514 ? "optimal" :
7497 7515 state == MPI2_RAID_PD_STATE_REBUILDING
7498 7516 ? "rebuilding" :
7499 7517 state == MPI2_RAID_PD_STATE_DEGRADED
7500 7518 ? "degraded" :
7501 7519 state == MPI2_RAID_PD_STATE_HOT_SPARE
7502 7520 ? "a hot spare" :
7503 7521 state == MPI2_RAID_PD_STATE_ONLINE
7504 7522 ? "online" :
7505 7523 state == MPI2_RAID_PD_STATE_OFFLINE
7506 7524 ? "offline" :
7507 7525 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7508 7526 ? "not compatible" :
7509 7527 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7510 7528 ? "not configured" :
7511 7529 "state unknown");
7512 7530 break;
7513 7531 }
7514 7532 break;
7515 7533 }
7516 7534 default:
7517 7535 NDBG20(("mptsas%d: unknown event %x received",
7518 7536 mpt->m_instance, event));
7519 7537 break;
7520 7538 }
7521 7539
7522 7540 /*
7523 7541 * Return the reply frame to the free queue.
7524 7542 */
7525 7543 ddi_put32(mpt->m_acc_free_queue_hdl,
7526 7544 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7527 7545 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7528 7546 DDI_DMA_SYNC_FORDEV);
7529 7547 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7530 7548 mpt->m_free_index = 0;
7531 7549 }
7532 7550 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7533 7551 mpt->m_free_index);
7534 7552 mutex_exit(&mpt->m_mutex);
7535 7553 }
7536 7554
7537 7555 /*
7538 7556 * invoked from timeout() to restart qfull cmds with throttle == 0
7539 7557 */
7540 7558 static void
7541 7559 mptsas_restart_cmd(void *arg)
7542 7560 {
7543 7561 mptsas_t *mpt = arg;
7544 7562 mptsas_target_t *ptgt = NULL;
7545 7563
7546 7564 mutex_enter(&mpt->m_mutex);
7547 7565
7548 7566 mpt->m_restart_cmd_timeid = 0;
7549 7567
7550 7568 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7551 7569 MPTSAS_HASH_FIRST);
7552 7570 while (ptgt != NULL) {
7553 7571 if (ptgt->m_reset_delay == 0) {
7554 7572 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7555 7573 mptsas_set_throttle(mpt, ptgt,
7556 7574 MAX_THROTTLE);
7557 7575 }
7558 7576 }
7559 7577
7560 7578 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7561 7579 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7562 7580 }
7563 7581 mptsas_restart_hba(mpt);
7564 7582 mutex_exit(&mpt->m_mutex);
7565 7583 }
7566 7584
7567 7585 void
7568 7586 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7569 7587 {
7570 7588 int slot;
7571 7589 mptsas_slots_t *slots = mpt->m_active;
7572 7590 int t;
7573 7591 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7574 7592
7575 7593 ASSERT(cmd != NULL);
7576 7594 ASSERT(cmd->cmd_queued == FALSE);
7577 7595
7578 7596 /*
7579 7597 * Task Management cmds are removed in their own routines. Also,
7580 7598 * we don't want to modify timeout based on TM cmds.
7581 7599 */
7582 7600 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7583 7601 return;
7584 7602 }
7585 7603
7586 7604 t = Tgt(cmd);
7587 7605 slot = cmd->cmd_slot;
7588 7606
7589 7607 /*
7590 7608 * remove the cmd.
7591 7609 */
7592 7610 if (cmd == slots->m_slot[slot]) {
7593 7611 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7594 7612 slots->m_slot[slot] = NULL;
7595 7613 mpt->m_ncmds--;
7596 7614
7597 7615 /*
7598 7616 * only decrement per target ncmds if command
7599 7617 * has a target associated with it.
7600 7618 */
7601 7619 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7602 7620 ptgt->m_t_ncmds--;
7603 7621 /*
7604 7622 * reset throttle if we just ran an untagged command
7605 7623 * to a tagged target
7606 7624 */
7607 7625 if ((ptgt->m_t_ncmds == 0) &&
7608 7626 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7609 7627 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7610 7628 }
7611 7629 }
7612 7630
7613 7631 }
7614 7632
7615 7633 /*
7616 7634 * This is all we need to do for ioc commands.
7617 7635 */
7618 7636 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7619 7637 mptsas_return_to_pool(mpt, cmd);
7620 7638 return;
7621 7639 }
7622 7640
7623 7641 /*
7624 7642 * Figure out what to set tag Q timeout for...
7625 7643 *
7626 7644 * Optimize: If we have duplicate's of same timeout
7627 7645 * we're using, then we'll use it again until we run
7628 7646 * out of duplicates. This should be the normal case
7629 7647 * for block and raw I/O.
7630 7648 * If no duplicates, we have to scan through tag que and
7631 7649 * find the longest timeout value and use it. This is
7632 7650 * going to take a while...
7633 7651 * Add 1 to m_n_slots to account for TM request.
7634 7652 */
7635 7653 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7636 7654 if (--(ptgt->m_dups) == 0) {
7637 7655 if (ptgt->m_t_ncmds) {
7638 7656 mptsas_cmd_t *ssp;
7639 7657 uint_t n = 0;
7640 7658 ushort_t nslots = (slots->m_n_slots + 1);
7641 7659 ushort_t i;
7642 7660 /*
7643 7661 * This crude check assumes we don't do
7644 7662 * this too often which seems reasonable
7645 7663 * for block and raw I/O.
7646 7664 */
7647 7665 for (i = 0; i < nslots; i++) {
7648 7666 ssp = slots->m_slot[i];
7649 7667 if (ssp && (Tgt(ssp) == t) &&
7650 7668 (ssp->cmd_pkt->pkt_time > n)) {
7651 7669 n = ssp->cmd_pkt->pkt_time;
7652 7670 ptgt->m_dups = 1;
7653 7671 } else if (ssp && (Tgt(ssp) == t) &&
7654 7672 (ssp->cmd_pkt->pkt_time == n)) {
7655 7673 ptgt->m_dups++;
7656 7674 }
7657 7675 }
7658 7676 ptgt->m_timebase = n;
7659 7677 } else {
7660 7678 ptgt->m_dups = 0;
7661 7679 ptgt->m_timebase = 0;
7662 7680 }
7663 7681 }
7664 7682 }
7665 7683 ptgt->m_timeout = ptgt->m_timebase;
7666 7684
7667 7685 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7668 7686 }
7669 7687
7670 7688 /*
7671 7689 * accept all cmds on the tx_waitq if any and then
7672 7690 * start a fresh request from the top of the device queue.
7673 7691 *
7674 7692 * since there are always cmds queued on the tx_waitq, and rare cmds on
7675 7693 * the instance waitq, so this function should not be invoked in the ISR,
7676 7694 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7677 7695 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7678 7696 */
7679 7697 static void
7680 7698 mptsas_restart_hba(mptsas_t *mpt)
7681 7699 {
7682 7700 ASSERT(mutex_owned(&mpt->m_mutex));
7683 7701
7684 7702 mutex_enter(&mpt->m_tx_waitq_mutex);
7685 7703 if (mpt->m_tx_waitq) {
7686 7704 mptsas_accept_tx_waitq(mpt);
7687 7705 }
7688 7706 mutex_exit(&mpt->m_tx_waitq_mutex);
7689 7707 mptsas_restart_waitq(mpt);
7690 7708 }
7691 7709
7692 7710 /*
7693 7711 * start a fresh request from the top of the device queue
7694 7712 */
7695 7713 static void
7696 7714 mptsas_restart_waitq(mptsas_t *mpt)
7697 7715 {
7698 7716 mptsas_cmd_t *cmd, *next_cmd;
7699 7717 mptsas_target_t *ptgt = NULL;
7700 7718
7701 7719 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7702 7720
7703 7721 ASSERT(mutex_owned(&mpt->m_mutex));
7704 7722
7705 7723 /*
7706 7724 * If there is a reset delay, don't start any cmds. Otherwise, start
7707 7725 * as many cmds as possible.
7708 7726 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7709 7727 * commands is m_max_requests - 2.
7710 7728 */
7711 7729 cmd = mpt->m_waitq;
7712 7730
7713 7731 while (cmd != NULL) {
7714 7732 next_cmd = cmd->cmd_linkp;
7715 7733 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7716 7734 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7717 7735 /*
7718 7736 * passthru command get slot need
7719 7737 * set CFLAG_PREPARED.
7720 7738 */
7721 7739 cmd->cmd_flags |= CFLAG_PREPARED;
7722 7740 mptsas_waitq_delete(mpt, cmd);
7723 7741 mptsas_start_passthru(mpt, cmd);
7724 7742 }
7725 7743 cmd = next_cmd;
7726 7744 continue;
7727 7745 }
7728 7746 if (cmd->cmd_flags & CFLAG_CONFIG) {
7729 7747 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7730 7748 /*
7731 7749 * Send the config page request and delete it
7732 7750 * from the waitq.
7733 7751 */
7734 7752 cmd->cmd_flags |= CFLAG_PREPARED;
7735 7753 mptsas_waitq_delete(mpt, cmd);
7736 7754 mptsas_start_config_page_access(mpt, cmd);
7737 7755 }
7738 7756 cmd = next_cmd;
7739 7757 continue;
7740 7758 }
7741 7759 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7742 7760 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7743 7761 /*
7744 7762 * Send the FW Diag request and delete if from
7745 7763 * the waitq.
7746 7764 */
7747 7765 cmd->cmd_flags |= CFLAG_PREPARED;
7748 7766 mptsas_waitq_delete(mpt, cmd);
7749 7767 mptsas_start_diag(mpt, cmd);
7750 7768 }
7751 7769 cmd = next_cmd;
7752 7770 continue;
7753 7771 }
7754 7772
7755 7773 ptgt = cmd->cmd_tgt_addr;
7756 7774 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7757 7775 (ptgt->m_t_ncmds == 0)) {
7758 7776 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7759 7777 }
7760 7778 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7761 7779 (ptgt && (ptgt->m_reset_delay == 0)) &&
7762 7780 (ptgt && (ptgt->m_t_ncmds <
7763 7781 ptgt->m_t_throttle))) {
7764 7782 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7765 7783 mptsas_waitq_delete(mpt, cmd);
7766 7784 (void) mptsas_start_cmd(mpt, cmd);
7767 7785 }
7768 7786 }
7769 7787 cmd = next_cmd;
7770 7788 }
7771 7789 }
7772 7790 /*
7773 7791 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7774 7792 * Accept all those queued cmds before new cmd is accept so that the
7775 7793 * cmds are sent in order.
7776 7794 */
7777 7795 static void
7778 7796 mptsas_accept_tx_waitq(mptsas_t *mpt)
7779 7797 {
7780 7798 mptsas_cmd_t *cmd;
7781 7799
7782 7800 ASSERT(mutex_owned(&mpt->m_mutex));
7783 7801 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7784 7802
7785 7803 /*
7786 7804 * A Bus Reset could occur at any time and flush the tx_waitq,
7787 7805 * so we cannot count on the tx_waitq to contain even one cmd.
7788 7806 * And when the m_tx_waitq_mutex is released and run
7789 7807 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7790 7808 */
7791 7809 cmd = mpt->m_tx_waitq;
7792 7810 for (;;) {
7793 7811 if ((cmd = mpt->m_tx_waitq) == NULL) {
7794 7812 mpt->m_tx_draining = 0;
7795 7813 break;
7796 7814 }
7797 7815 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7798 7816 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7799 7817 }
7800 7818 cmd->cmd_linkp = NULL;
7801 7819 mutex_exit(&mpt->m_tx_waitq_mutex);
7802 7820 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7803 7821 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7804 7822 "to accept cmd on queue\n");
7805 7823 mutex_enter(&mpt->m_tx_waitq_mutex);
7806 7824 }
7807 7825 }
7808 7826
7809 7827
7810 7828 /*
7811 7829 * mpt tag type lookup
7812 7830 */
7813 7831 static char mptsas_tag_lookup[] =
7814 7832 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7815 7833
7816 7834 static int
7817 7835 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7818 7836 {
7819 7837 struct scsi_pkt *pkt = CMD2PKT(cmd);
7820 7838 uint32_t control = 0;
7821 7839 int n;
7822 7840 caddr_t mem;
7823 7841 pMpi2SCSIIORequest_t io_request;
7824 7842 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7825 7843 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7826 7844 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7827 7845 uint16_t SMID, io_flags = 0;
7828 7846 uint32_t request_desc_low, request_desc_high;
7829 7847
7830 7848 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7831 7849
7832 7850 /*
7833 7851 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7834 7852 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7835 7853 */
7836 7854 SMID = cmd->cmd_slot;
7837 7855
7838 7856 /*
7839 7857 * It is possible for back to back device reset to
7840 7858 * happen before the reset delay has expired. That's
7841 7859 * ok, just let the device reset go out on the bus.
7842 7860 */
7843 7861 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7844 7862 ASSERT(ptgt->m_reset_delay == 0);
7845 7863 }
7846 7864
7847 7865 /*
7848 7866 * if a non-tagged cmd is submitted to an active tagged target
7849 7867 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7850 7868 * to be untagged
7851 7869 */
7852 7870 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7853 7871 (ptgt->m_t_ncmds > 1) &&
7854 7872 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7855 7873 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7856 7874 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7857 7875 NDBG23(("target=%d, untagged cmd, start draining\n",
7858 7876 ptgt->m_devhdl));
7859 7877
7860 7878 if (ptgt->m_reset_delay == 0) {
7861 7879 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7862 7880 }
7863 7881
7864 7882 mptsas_remove_cmd(mpt, cmd);
7865 7883 cmd->cmd_pkt_flags |= FLAG_HEAD;
7866 7884 mptsas_waitq_add(mpt, cmd);
7867 7885 }
7868 7886 return (DDI_FAILURE);
7869 7887 }
7870 7888
7871 7889 /*
7872 7890 * Set correct tag bits.
7873 7891 */
7874 7892 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7875 7893 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7876 7894 FLAG_TAGMASK) >> 12)]) {
7877 7895 case MSG_SIMPLE_QTAG:
7878 7896 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7879 7897 break;
7880 7898 case MSG_HEAD_QTAG:
7881 7899 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7882 7900 break;
7883 7901 case MSG_ORDERED_QTAG:
7884 7902 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7885 7903 break;
7886 7904 default:
7887 7905 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7888 7906 break;
7889 7907 }
7890 7908 } else {
7891 7909 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7892 7910 ptgt->m_t_throttle = 1;
7893 7911 }
7894 7912 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7895 7913 }
7896 7914
7897 7915 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7898 7916 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7899 7917 }
7900 7918
7901 7919 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7902 7920 io_request = (pMpi2SCSIIORequest_t)mem;
7903 7921
7904 7922 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7905 7923 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7906 7924 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7907 7925 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7908 7926 MPI2_FUNCTION_SCSI_IO_REQUEST);
7909 7927
7910 7928 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7911 7929 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7912 7930
7913 7931 io_flags = cmd->cmd_cdblen;
7914 7932 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7915 7933 /*
7916 7934 * setup the Scatter/Gather DMA list for this request
7917 7935 */
7918 7936 if (cmd->cmd_cookiec > 0) {
7919 7937 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7920 7938 } else {
7921 7939 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7922 7940 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7923 7941 MPI2_SGE_FLAGS_END_OF_BUFFER |
7924 7942 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7925 7943 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7926 7944 }
7927 7945
7928 7946 /*
7929 7947 * save ARQ information
7930 7948 */
7931 7949 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7932 7950 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7933 7951 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7934 7952 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7935 7953 cmd->cmd_ext_arqcookie.dmac_address);
7936 7954 } else {
7937 7955 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7938 7956 cmd->cmd_arqcookie.dmac_address);
7939 7957 }
7940 7958
7941 7959 ddi_put32(acc_hdl, &io_request->Control, control);
7942 7960
7943 7961 NDBG31(("starting message=0x%p, with cmd=0x%p",
7944 7962 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7945 7963
7946 7964 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7947 7965
7948 7966 /*
7949 7967 * Build request descriptor and write it to the request desc post reg.
7950 7968 */
7951 7969 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7952 7970 request_desc_high = ptgt->m_devhdl << 16;
7953 7971 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7954 7972
7955 7973 /*
7956 7974 * Start timeout.
7957 7975 */
7958 7976 #ifdef MPTSAS_TEST
7959 7977 /*
7960 7978 * Temporarily set timebase = 0; needed for
7961 7979 * timeout torture test.
7962 7980 */
7963 7981 if (mptsas_test_timeouts) {
7964 7982 ptgt->m_timebase = 0;
7965 7983 }
7966 7984 #endif
7967 7985 n = pkt->pkt_time - ptgt->m_timebase;
7968 7986
7969 7987 if (n == 0) {
7970 7988 (ptgt->m_dups)++;
7971 7989 ptgt->m_timeout = ptgt->m_timebase;
7972 7990 } else if (n > 0) {
7973 7991 ptgt->m_timeout =
7974 7992 ptgt->m_timebase = pkt->pkt_time;
7975 7993 ptgt->m_dups = 1;
7976 7994 } else if (n < 0) {
7977 7995 ptgt->m_timeout = ptgt->m_timebase;
7978 7996 }
7979 7997 #ifdef MPTSAS_TEST
7980 7998 /*
7981 7999 * Set back to a number higher than
7982 8000 * mptsas_scsi_watchdog_tick
7983 8001 * so timeouts will happen in mptsas_watchsubr
7984 8002 */
7985 8003 if (mptsas_test_timeouts) {
7986 8004 ptgt->m_timebase = 60;
7987 8005 }
7988 8006 #endif
7989 8007
7990 8008 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
7991 8009 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
7992 8010 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7993 8011 return (DDI_FAILURE);
7994 8012 }
7995 8013 return (DDI_SUCCESS);
7996 8014 }
7997 8015
7998 8016 /*
7999 8017 * Select a helper thread to handle current doneq
8000 8018 */
8001 8019 static void
8002 8020 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8003 8021 {
8004 8022 uint64_t t, i;
8005 8023 uint32_t min = 0xffffffff;
8006 8024 mptsas_doneq_thread_list_t *item;
8007 8025
8008 8026 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8009 8027 item = &mpt->m_doneq_thread_id[i];
8010 8028 /*
8011 8029 * If the completed command on help thread[i] less than
8012 8030 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8013 8031 * pick a thread which has least completed command.
8014 8032 */
8015 8033
8016 8034 mutex_enter(&item->mutex);
8017 8035 if (item->len < mpt->m_doneq_thread_threshold) {
8018 8036 t = i;
8019 8037 mutex_exit(&item->mutex);
8020 8038 break;
8021 8039 }
8022 8040 if (item->len < min) {
8023 8041 min = item->len;
8024 8042 t = i;
8025 8043 }
8026 8044 mutex_exit(&item->mutex);
8027 8045 }
8028 8046 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8029 8047 mptsas_doneq_mv(mpt, t);
8030 8048 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8031 8049 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8032 8050 }
8033 8051
8034 8052 /*
8035 8053 * move the current global doneq to the doneq of thead[t]
8036 8054 */
8037 8055 static void
8038 8056 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8039 8057 {
8040 8058 mptsas_cmd_t *cmd;
8041 8059 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8042 8060
8043 8061 ASSERT(mutex_owned(&item->mutex));
8044 8062 while ((cmd = mpt->m_doneq) != NULL) {
8045 8063 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8046 8064 mpt->m_donetail = &mpt->m_doneq;
8047 8065 }
8048 8066 cmd->cmd_linkp = NULL;
8049 8067 *item->donetail = cmd;
8050 8068 item->donetail = &cmd->cmd_linkp;
8051 8069 mpt->m_doneq_len--;
8052 8070 item->len++;
8053 8071 }
8054 8072 }
8055 8073
8056 8074 void
8057 8075 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8058 8076 {
8059 8077 struct scsi_pkt *pkt = CMD2PKT(cmd);
8060 8078
8061 8079 /* Check all acc and dma handles */
8062 8080 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8063 8081 DDI_SUCCESS) ||
8064 8082 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8065 8083 DDI_SUCCESS) ||
8066 8084 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8067 8085 DDI_SUCCESS) ||
8068 8086 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8069 8087 DDI_SUCCESS) ||
8070 8088 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8071 8089 DDI_SUCCESS) ||
8072 8090 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8073 8091 DDI_SUCCESS) ||
8074 8092 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8075 8093 DDI_SUCCESS)) {
8076 8094 ddi_fm_service_impact(mpt->m_dip,
8077 8095 DDI_SERVICE_UNAFFECTED);
8078 8096 ddi_fm_acc_err_clear(mpt->m_config_handle,
8079 8097 DDI_FME_VER0);
8080 8098 pkt->pkt_reason = CMD_TRAN_ERR;
8081 8099 pkt->pkt_statistics = 0;
8082 8100 }
8083 8101 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8084 8102 DDI_SUCCESS) ||
8085 8103 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8086 8104 DDI_SUCCESS) ||
8087 8105 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8088 8106 DDI_SUCCESS) ||
8089 8107 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8090 8108 DDI_SUCCESS) ||
8091 8109 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8092 8110 DDI_SUCCESS)) {
8093 8111 ddi_fm_service_impact(mpt->m_dip,
8094 8112 DDI_SERVICE_UNAFFECTED);
8095 8113 pkt->pkt_reason = CMD_TRAN_ERR;
8096 8114 pkt->pkt_statistics = 0;
8097 8115 }
8098 8116 if (cmd->cmd_dmahandle &&
8099 8117 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8100 8118 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8101 8119 pkt->pkt_reason = CMD_TRAN_ERR;
8102 8120 pkt->pkt_statistics = 0;
8103 8121 }
8104 8122 if ((cmd->cmd_extra_frames &&
8105 8123 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8106 8124 DDI_SUCCESS) ||
8107 8125 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8108 8126 DDI_SUCCESS)))) {
8109 8127 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8110 8128 pkt->pkt_reason = CMD_TRAN_ERR;
8111 8129 pkt->pkt_statistics = 0;
8112 8130 }
8113 8131 if (cmd->cmd_arqhandle &&
8114 8132 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8115 8133 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8116 8134 pkt->pkt_reason = CMD_TRAN_ERR;
8117 8135 pkt->pkt_statistics = 0;
8118 8136 }
8119 8137 if (cmd->cmd_ext_arqhandle &&
8120 8138 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8121 8139 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8122 8140 pkt->pkt_reason = CMD_TRAN_ERR;
8123 8141 pkt->pkt_statistics = 0;
8124 8142 }
8125 8143 }
8126 8144
8127 8145 /*
8128 8146 * These routines manipulate the queue of commands that
8129 8147 * are waiting for their completion routines to be called.
8130 8148 * The queue is usually in FIFO order but on an MP system
8131 8149 * it's possible for the completion routines to get out
8132 8150 * of order. If that's a problem you need to add a global
8133 8151 * mutex around the code that calls the completion routine
8134 8152 * in the interrupt handler.
8135 8153 */
8136 8154 static void
8137 8155 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8138 8156 {
8139 8157 struct scsi_pkt *pkt = CMD2PKT(cmd);
8140 8158
8141 8159 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8142 8160
8143 8161 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8144 8162 cmd->cmd_linkp = NULL;
8145 8163 cmd->cmd_flags |= CFLAG_FINISHED;
8146 8164 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8147 8165
8148 8166 mptsas_fma_check(mpt, cmd);
8149 8167
8150 8168 /*
8151 8169 * only add scsi pkts that have completion routines to
8152 8170 * the doneq. no intr cmds do not have callbacks.
8153 8171 */
8154 8172 if (pkt && (pkt->pkt_comp)) {
8155 8173 *mpt->m_donetail = cmd;
8156 8174 mpt->m_donetail = &cmd->cmd_linkp;
8157 8175 mpt->m_doneq_len++;
8158 8176 }
8159 8177 }
8160 8178
8161 8179 static mptsas_cmd_t *
8162 8180 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8163 8181 {
8164 8182 mptsas_cmd_t *cmd;
8165 8183 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8166 8184
8167 8185 /* pop one off the done queue */
8168 8186 if ((cmd = item->doneq) != NULL) {
8169 8187 /* if the queue is now empty fix the tail pointer */
8170 8188 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8171 8189 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8172 8190 item->donetail = &item->doneq;
8173 8191 }
8174 8192 cmd->cmd_linkp = NULL;
8175 8193 item->len--;
8176 8194 }
8177 8195 return (cmd);
8178 8196 }
8179 8197
8180 8198 static void
8181 8199 mptsas_doneq_empty(mptsas_t *mpt)
8182 8200 {
8183 8201 if (mpt->m_doneq && !mpt->m_in_callback) {
8184 8202 mptsas_cmd_t *cmd, *next;
8185 8203 struct scsi_pkt *pkt;
8186 8204
8187 8205 mpt->m_in_callback = 1;
8188 8206 cmd = mpt->m_doneq;
8189 8207 mpt->m_doneq = NULL;
8190 8208 mpt->m_donetail = &mpt->m_doneq;
8191 8209 mpt->m_doneq_len = 0;
8192 8210
8193 8211 mutex_exit(&mpt->m_mutex);
8194 8212 /*
8195 8213 * run the completion routines of all the
8196 8214 * completed commands
8197 8215 */
8198 8216 while (cmd != NULL) {
8199 8217 next = cmd->cmd_linkp;
8200 8218 cmd->cmd_linkp = NULL;
8201 8219 /* run this command's completion routine */
8202 8220 cmd->cmd_flags |= CFLAG_COMPLETED;
8203 8221 pkt = CMD2PKT(cmd);
8204 8222 mptsas_pkt_comp(pkt, cmd);
8205 8223 cmd = next;
8206 8224 }
8207 8225 mutex_enter(&mpt->m_mutex);
8208 8226 mpt->m_in_callback = 0;
8209 8227 }
8210 8228 }
8211 8229
8212 8230 /*
8213 8231 * These routines manipulate the target's queue of pending requests
8214 8232 */
8215 8233 void
8216 8234 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8217 8235 {
8218 8236 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8219 8237 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8220 8238 cmd->cmd_queued = TRUE;
8221 8239 if (ptgt)
8222 8240 ptgt->m_t_nwait++;
8223 8241 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8224 8242 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8225 8243 mpt->m_waitqtail = &cmd->cmd_linkp;
8226 8244 }
8227 8245 mpt->m_waitq = cmd;
8228 8246 } else {
8229 8247 cmd->cmd_linkp = NULL;
8230 8248 *(mpt->m_waitqtail) = cmd;
8231 8249 mpt->m_waitqtail = &cmd->cmd_linkp;
8232 8250 }
8233 8251 }
8234 8252
8235 8253 static mptsas_cmd_t *
8236 8254 mptsas_waitq_rm(mptsas_t *mpt)
8237 8255 {
8238 8256 mptsas_cmd_t *cmd;
8239 8257 mptsas_target_t *ptgt;
8240 8258 NDBG7(("mptsas_waitq_rm"));
8241 8259
8242 8260 MPTSAS_WAITQ_RM(mpt, cmd);
8243 8261
8244 8262 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8245 8263 if (cmd) {
8246 8264 ptgt = cmd->cmd_tgt_addr;
8247 8265 if (ptgt) {
8248 8266 ptgt->m_t_nwait--;
8249 8267 ASSERT(ptgt->m_t_nwait >= 0);
8250 8268 }
8251 8269 }
8252 8270 return (cmd);
8253 8271 }
8254 8272
8255 8273 /*
8256 8274 * remove specified cmd from the middle of the wait queue.
8257 8275 */
8258 8276 static void
8259 8277 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8260 8278 {
8261 8279 mptsas_cmd_t *prevp = mpt->m_waitq;
8262 8280 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8263 8281
8264 8282 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8265 8283 (void *)mpt, (void *)cmd));
8266 8284 if (ptgt) {
8267 8285 ptgt->m_t_nwait--;
8268 8286 ASSERT(ptgt->m_t_nwait >= 0);
8269 8287 }
8270 8288
8271 8289 if (prevp == cmd) {
8272 8290 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8273 8291 mpt->m_waitqtail = &mpt->m_waitq;
8274 8292
8275 8293 cmd->cmd_linkp = NULL;
8276 8294 cmd->cmd_queued = FALSE;
8277 8295 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8278 8296 (void *)mpt, (void *)cmd));
8279 8297 return;
8280 8298 }
8281 8299
8282 8300 while (prevp != NULL) {
8283 8301 if (prevp->cmd_linkp == cmd) {
8284 8302 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8285 8303 mpt->m_waitqtail = &prevp->cmd_linkp;
8286 8304
8287 8305 cmd->cmd_linkp = NULL;
8288 8306 cmd->cmd_queued = FALSE;
8289 8307 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8290 8308 (void *)mpt, (void *)cmd));
8291 8309 return;
8292 8310 }
8293 8311 prevp = prevp->cmd_linkp;
8294 8312 }
8295 8313 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8296 8314 }
8297 8315
8298 8316 static mptsas_cmd_t *
8299 8317 mptsas_tx_waitq_rm(mptsas_t *mpt)
8300 8318 {
8301 8319 mptsas_cmd_t *cmd;
8302 8320 NDBG7(("mptsas_tx_waitq_rm"));
8303 8321
8304 8322 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8305 8323
8306 8324 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8307 8325
8308 8326 return (cmd);
8309 8327 }
8310 8328
8311 8329 /*
8312 8330 * remove specified cmd from the middle of the tx_waitq.
8313 8331 */
8314 8332 static void
8315 8333 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8316 8334 {
8317 8335 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8318 8336
8319 8337 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8320 8338 (void *)mpt, (void *)cmd));
8321 8339
8322 8340 if (prevp == cmd) {
8323 8341 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8324 8342 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8325 8343
8326 8344 cmd->cmd_linkp = NULL;
8327 8345 cmd->cmd_queued = FALSE;
8328 8346 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8329 8347 (void *)mpt, (void *)cmd));
8330 8348 return;
8331 8349 }
8332 8350
8333 8351 while (prevp != NULL) {
8334 8352 if (prevp->cmd_linkp == cmd) {
8335 8353 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8336 8354 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8337 8355
8338 8356 cmd->cmd_linkp = NULL;
8339 8357 cmd->cmd_queued = FALSE;
8340 8358 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8341 8359 (void *)mpt, (void *)cmd));
8342 8360 return;
8343 8361 }
8344 8362 prevp = prevp->cmd_linkp;
8345 8363 }
8346 8364 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8347 8365 }
8348 8366
8349 8367 /*
8350 8368 * device and bus reset handling
8351 8369 *
8352 8370 * Notes:
8353 8371 * - RESET_ALL: reset the controller
8354 8372 * - RESET_TARGET: reset the target specified in scsi_address
8355 8373 */
8356 8374 static int
8357 8375 mptsas_scsi_reset(struct scsi_address *ap, int level)
8358 8376 {
8359 8377 mptsas_t *mpt = ADDR2MPT(ap);
8360 8378 int rval;
8361 8379 mptsas_tgt_private_t *tgt_private;
8362 8380 mptsas_target_t *ptgt = NULL;
8363 8381
8364 8382 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8365 8383 ptgt = tgt_private->t_private;
8366 8384 if (ptgt == NULL) {
8367 8385 return (FALSE);
8368 8386 }
8369 8387 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8370 8388 level));
8371 8389
8372 8390 mutex_enter(&mpt->m_mutex);
8373 8391 /*
8374 8392 * if we are not in panic set up a reset delay for this target
8375 8393 */
8376 8394 if (!ddi_in_panic()) {
8377 8395 mptsas_setup_bus_reset_delay(mpt);
8378 8396 } else {
8379 8397 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8380 8398 }
8381 8399 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8382 8400 mutex_exit(&mpt->m_mutex);
8383 8401
8384 8402 /*
8385 8403 * The transport layer expect to only see TRUE and
8386 8404 * FALSE. Therefore, we will adjust the return value
8387 8405 * if mptsas_do_scsi_reset returns FAILED.
8388 8406 */
8389 8407 if (rval == FAILED)
8390 8408 rval = FALSE;
8391 8409 return (rval);
8392 8410 }
8393 8411
8394 8412 static int
8395 8413 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8396 8414 {
8397 8415 int rval = FALSE;
8398 8416 uint8_t config, disk;
8399 8417 mptsas_slots_t *slots = mpt->m_active;
8400 8418
8401 8419 ASSERT(mutex_owned(&mpt->m_mutex));
8402 8420
8403 8421 if (mptsas_debug_resets) {
8404 8422 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8405 8423 devhdl);
8406 8424 }
8407 8425
8408 8426 /*
8409 8427 * Issue a Target Reset message to the target specified but not to a
8410 8428 * disk making up a raid volume. Just look through the RAID config
8411 8429 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8412 8430 * list, then don't reset this target.
8413 8431 */
8414 8432 for (config = 0; config < slots->m_num_raid_configs; config++) {
8415 8433 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8416 8434 if (devhdl == slots->m_raidconfig[config].
8417 8435 m_physdisk_devhdl[disk]) {
8418 8436 return (TRUE);
8419 8437 }
8420 8438 }
8421 8439 }
8422 8440
8423 8441 rval = mptsas_ioc_task_management(mpt,
8424 8442 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8425 8443
8426 8444 mptsas_doneq_empty(mpt);
8427 8445 return (rval);
8428 8446 }
8429 8447
8430 8448 static int
8431 8449 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8432 8450 void (*callback)(caddr_t), caddr_t arg)
8433 8451 {
8434 8452 mptsas_t *mpt = ADDR2MPT(ap);
8435 8453
8436 8454 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8437 8455
8438 8456 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8439 8457 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8440 8458 }
8441 8459
8442 8460 static int
8443 8461 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8444 8462 {
8445 8463 dev_info_t *lun_dip = NULL;
8446 8464
8447 8465 ASSERT(sd != NULL);
8448 8466 ASSERT(name != NULL);
8449 8467 lun_dip = sd->sd_dev;
8450 8468 ASSERT(lun_dip != NULL);
8451 8469
8452 8470 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8453 8471 return (1);
8454 8472 } else {
8455 8473 return (0);
8456 8474 }
8457 8475 }
8458 8476
8459 8477 static int
8460 8478 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8461 8479 {
8462 8480 return (mptsas_get_name(sd, name, len));
8463 8481 }
8464 8482
8465 8483 void
8466 8484 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8467 8485 {
8468 8486
8469 8487 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8470 8488
8471 8489 /*
8472 8490 * if the bus is draining/quiesced, no changes to the throttles
8473 8491 * are allowed. Not allowing change of throttles during draining
8474 8492 * limits error recovery but will reduce draining time
8475 8493 *
8476 8494 * all throttles should have been set to HOLD_THROTTLE
8477 8495 */
8478 8496 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8479 8497 return;
8480 8498 }
8481 8499
8482 8500 if (what == HOLD_THROTTLE) {
8483 8501 ptgt->m_t_throttle = HOLD_THROTTLE;
8484 8502 } else if (ptgt->m_reset_delay == 0) {
8485 8503 ptgt->m_t_throttle = what;
8486 8504 }
8487 8505 }
8488 8506
8489 8507 /*
8490 8508 * Clean up from a device reset.
8491 8509 * For the case of target reset, this function clears the waitq of all
8492 8510 * commands for a particular target. For the case of abort task set, this
8493 8511 * function clears the waitq of all commonds for a particular target/lun.
8494 8512 */
8495 8513 static void
8496 8514 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8497 8515 {
8498 8516 mptsas_slots_t *slots = mpt->m_active;
8499 8517 mptsas_cmd_t *cmd, *next_cmd;
8500 8518 int slot;
8501 8519 uchar_t reason;
8502 8520 uint_t stat;
8503 8521
8504 8522 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8505 8523
8506 8524 /*
8507 8525 * Make sure the I/O Controller has flushed all cmds
8508 8526 * that are associated with this target for a target reset
8509 8527 * and target/lun for abort task set.
|
↓ open down ↓ |
1507 lines elided |
↑ open up ↑ |
8510 8528 * Account for TM requests, which use the last SMID.
8511 8529 */
8512 8530 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8513 8531 if ((cmd = slots->m_slot[slot]) == NULL)
8514 8532 continue;
8515 8533 reason = CMD_RESET;
8516 8534 stat = STAT_DEV_RESET;
8517 8535 switch (tasktype) {
8518 8536 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8519 8537 if (Tgt(cmd) == target) {
8538 + if (cmd->cmd_tgt_addr->m_timeout < 0) {
8539 + /*
8540 + * When timeout requested, propagate
8541 + * proper reason and statistics to
8542 + * target drivers.
8543 + */
8544 + reason = CMD_TIMEOUT;
8545 + stat |= STAT_TIMEOUT;
8546 + }
8547 +
8520 8548 NDBG25(("mptsas_flush_target discovered non-"
8521 8549 "NULL cmd in slot %d, tasktype 0x%x", slot,
8522 8550 tasktype));
8523 8551 mptsas_dump_cmd(mpt, cmd);
8524 8552 mptsas_remove_cmd(mpt, cmd);
8525 8553 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8526 8554 mptsas_doneq_add(mpt, cmd);
8527 8555 }
8528 8556 break;
8529 8557 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8530 8558 reason = CMD_ABORTED;
8531 8559 stat = STAT_ABORTED;
8532 8560 /*FALLTHROUGH*/
8533 8561 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8534 8562 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8535 8563
8536 8564 NDBG25(("mptsas_flush_target discovered non-"
8537 8565 "NULL cmd in slot %d, tasktype 0x%x", slot,
8538 8566 tasktype));
8539 8567 mptsas_dump_cmd(mpt, cmd);
8540 8568 mptsas_remove_cmd(mpt, cmd);
8541 8569 mptsas_set_pkt_reason(mpt, cmd, reason,
8542 8570 stat);
8543 8571 mptsas_doneq_add(mpt, cmd);
8544 8572 }
8545 8573 break;
8546 8574 default:
8547 8575 break;
8548 8576 }
8549 8577 }
8550 8578
8551 8579 /*
8552 8580 * Flush the waitq and tx_waitq of this target's cmds
8553 8581 */
8554 8582 cmd = mpt->m_waitq;
8555 8583
8556 8584 reason = CMD_RESET;
8557 8585 stat = STAT_DEV_RESET;
8558 8586
8559 8587 switch (tasktype) {
8560 8588 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8561 8589 while (cmd != NULL) {
8562 8590 next_cmd = cmd->cmd_linkp;
8563 8591 if (Tgt(cmd) == target) {
8564 8592 mptsas_waitq_delete(mpt, cmd);
8565 8593 mptsas_set_pkt_reason(mpt, cmd,
8566 8594 reason, stat);
8567 8595 mptsas_doneq_add(mpt, cmd);
8568 8596 }
8569 8597 cmd = next_cmd;
8570 8598 }
8571 8599 mutex_enter(&mpt->m_tx_waitq_mutex);
8572 8600 cmd = mpt->m_tx_waitq;
8573 8601 while (cmd != NULL) {
8574 8602 next_cmd = cmd->cmd_linkp;
8575 8603 if (Tgt(cmd) == target) {
8576 8604 mptsas_tx_waitq_delete(mpt, cmd);
8577 8605 mutex_exit(&mpt->m_tx_waitq_mutex);
8578 8606 mptsas_set_pkt_reason(mpt, cmd,
8579 8607 reason, stat);
8580 8608 mptsas_doneq_add(mpt, cmd);
8581 8609 mutex_enter(&mpt->m_tx_waitq_mutex);
8582 8610 }
8583 8611 cmd = next_cmd;
8584 8612 }
8585 8613 mutex_exit(&mpt->m_tx_waitq_mutex);
8586 8614 break;
8587 8615 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8588 8616 reason = CMD_ABORTED;
8589 8617 stat = STAT_ABORTED;
8590 8618 /*FALLTHROUGH*/
8591 8619 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8592 8620 while (cmd != NULL) {
8593 8621 next_cmd = cmd->cmd_linkp;
8594 8622 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8595 8623 mptsas_waitq_delete(mpt, cmd);
8596 8624 mptsas_set_pkt_reason(mpt, cmd,
8597 8625 reason, stat);
8598 8626 mptsas_doneq_add(mpt, cmd);
8599 8627 }
8600 8628 cmd = next_cmd;
8601 8629 }
8602 8630 mutex_enter(&mpt->m_tx_waitq_mutex);
8603 8631 cmd = mpt->m_tx_waitq;
8604 8632 while (cmd != NULL) {
8605 8633 next_cmd = cmd->cmd_linkp;
8606 8634 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8607 8635 mptsas_tx_waitq_delete(mpt, cmd);
8608 8636 mutex_exit(&mpt->m_tx_waitq_mutex);
8609 8637 mptsas_set_pkt_reason(mpt, cmd,
8610 8638 reason, stat);
8611 8639 mptsas_doneq_add(mpt, cmd);
8612 8640 mutex_enter(&mpt->m_tx_waitq_mutex);
8613 8641 }
8614 8642 cmd = next_cmd;
8615 8643 }
8616 8644 mutex_exit(&mpt->m_tx_waitq_mutex);
8617 8645 break;
8618 8646 default:
8619 8647 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8620 8648 tasktype);
8621 8649 break;
8622 8650 }
8623 8651 }
8624 8652
8625 8653 /*
8626 8654 * Clean up hba state, abort all outstanding command and commands in waitq
8627 8655 * reset timeout of all targets.
8628 8656 */
8629 8657 static void
8630 8658 mptsas_flush_hba(mptsas_t *mpt)
8631 8659 {
8632 8660 mptsas_slots_t *slots = mpt->m_active;
8633 8661 mptsas_cmd_t *cmd;
8634 8662 int slot;
8635 8663
8636 8664 NDBG25(("mptsas_flush_hba"));
8637 8665
8638 8666 /*
8639 8667 * The I/O Controller should have already sent back
8640 8668 * all commands via the scsi I/O reply frame. Make
8641 8669 * sure all commands have been flushed.
8642 8670 * Account for TM request, which use the last SMID.
8643 8671 */
8644 8672 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8645 8673 if ((cmd = slots->m_slot[slot]) == NULL)
8646 8674 continue;
8647 8675
8648 8676 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8649 8677 /*
8650 8678 * Need to make sure to tell everyone that might be
8651 8679 * waiting on this command that it's going to fail. If
8652 8680 * we get here, this command will never timeout because
8653 8681 * the active command table is going to be re-allocated,
8654 8682 * so there will be nothing to check against a time out.
8655 8683 * Instead, mark the command as failed due to reset.
8656 8684 */
8657 8685 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8658 8686 STAT_BUS_RESET);
8659 8687 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8660 8688 (cmd->cmd_flags & CFLAG_CONFIG) ||
8661 8689 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8662 8690 cmd->cmd_flags |= CFLAG_FINISHED;
8663 8691 cv_broadcast(&mpt->m_passthru_cv);
8664 8692 cv_broadcast(&mpt->m_config_cv);
8665 8693 cv_broadcast(&mpt->m_fw_diag_cv);
8666 8694 }
8667 8695 continue;
8668 8696 }
8669 8697
8670 8698 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8671 8699 slot));
8672 8700 mptsas_dump_cmd(mpt, cmd);
8673 8701
8674 8702 mptsas_remove_cmd(mpt, cmd);
8675 8703 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8676 8704 mptsas_doneq_add(mpt, cmd);
8677 8705 }
8678 8706
8679 8707 /*
8680 8708 * Flush the waitq.
8681 8709 */
8682 8710 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8683 8711 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8684 8712 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8685 8713 (cmd->cmd_flags & CFLAG_CONFIG) ||
8686 8714 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8687 8715 cmd->cmd_flags |= CFLAG_FINISHED;
8688 8716 cv_broadcast(&mpt->m_passthru_cv);
8689 8717 cv_broadcast(&mpt->m_config_cv);
8690 8718 cv_broadcast(&mpt->m_fw_diag_cv);
8691 8719 } else {
8692 8720 mptsas_doneq_add(mpt, cmd);
8693 8721 }
8694 8722 }
8695 8723
8696 8724 /*
|
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
8697 8725 * Flush the tx_waitq
8698 8726 */
8699 8727 mutex_enter(&mpt->m_tx_waitq_mutex);
8700 8728 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8701 8729 mutex_exit(&mpt->m_tx_waitq_mutex);
8702 8730 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8703 8731 mptsas_doneq_add(mpt, cmd);
8704 8732 mutex_enter(&mpt->m_tx_waitq_mutex);
8705 8733 }
8706 8734 mutex_exit(&mpt->m_tx_waitq_mutex);
8735 +
8736 + /*
8737 + * Drain the taskqs prior to reallocating resources.
8738 + */
8739 + mutex_exit(&mpt->m_mutex);
8740 + ddi_taskq_wait(mpt->m_event_taskq);
8741 + ddi_taskq_wait(mpt->m_dr_taskq);
8742 + mutex_enter(&mpt->m_mutex);
8707 8743 }
8708 8744
8709 8745 /*
8710 8746 * set pkt_reason and OR in pkt_statistics flag
8711 8747 */
8712 8748 static void
8713 8749 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8714 8750 uint_t stat)
8715 8751 {
8716 8752 #ifndef __lock_lint
8717 8753 _NOTE(ARGUNUSED(mpt))
8718 8754 #endif
8719 8755
8720 8756 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8721 8757 (void *)cmd, reason, stat));
8722 8758
8723 8759 if (cmd) {
8724 8760 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8725 8761 cmd->cmd_pkt->pkt_reason = reason;
8726 8762 }
8727 8763 cmd->cmd_pkt->pkt_statistics |= stat;
8728 8764 }
8729 8765 }
8730 8766
8731 8767 static void
8732 8768 mptsas_start_watch_reset_delay()
8733 8769 {
8734 8770 NDBG22(("mptsas_start_watch_reset_delay"));
8735 8771
8736 8772 mutex_enter(&mptsas_global_mutex);
8737 8773 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8738 8774 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8739 8775 drv_usectohz((clock_t)
8740 8776 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8741 8777 ASSERT(mptsas_reset_watch != NULL);
8742 8778 }
8743 8779 mutex_exit(&mptsas_global_mutex);
8744 8780 }
8745 8781
8746 8782 static void
8747 8783 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8748 8784 {
8749 8785 mptsas_target_t *ptgt = NULL;
8750 8786
8751 8787 NDBG22(("mptsas_setup_bus_reset_delay"));
8752 8788 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8753 8789 MPTSAS_HASH_FIRST);
8754 8790 while (ptgt != NULL) {
8755 8791 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8756 8792 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8757 8793
8758 8794 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8759 8795 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8760 8796 }
8761 8797
8762 8798 mptsas_start_watch_reset_delay();
8763 8799 }
8764 8800
8765 8801 /*
8766 8802 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8767 8803 * mpt instance for active reset delays
8768 8804 */
8769 8805 static void
8770 8806 mptsas_watch_reset_delay(void *arg)
8771 8807 {
8772 8808 #ifndef __lock_lint
8773 8809 _NOTE(ARGUNUSED(arg))
8774 8810 #endif
8775 8811
8776 8812 mptsas_t *mpt;
8777 8813 int not_done = 0;
8778 8814
8779 8815 NDBG22(("mptsas_watch_reset_delay"));
8780 8816
8781 8817 mutex_enter(&mptsas_global_mutex);
8782 8818 mptsas_reset_watch = 0;
8783 8819 mutex_exit(&mptsas_global_mutex);
8784 8820 rw_enter(&mptsas_global_rwlock, RW_READER);
8785 8821 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8786 8822 if (mpt->m_tran == 0) {
8787 8823 continue;
8788 8824 }
8789 8825 mutex_enter(&mpt->m_mutex);
8790 8826 not_done += mptsas_watch_reset_delay_subr(mpt);
8791 8827 mutex_exit(&mpt->m_mutex);
8792 8828 }
8793 8829 rw_exit(&mptsas_global_rwlock);
8794 8830
8795 8831 if (not_done) {
8796 8832 mptsas_start_watch_reset_delay();
8797 8833 }
8798 8834 }
8799 8835
8800 8836 static int
8801 8837 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8802 8838 {
8803 8839 int done = 0;
8804 8840 int restart = 0;
8805 8841 mptsas_target_t *ptgt = NULL;
8806 8842
8807 8843 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8808 8844
8809 8845 ASSERT(mutex_owned(&mpt->m_mutex));
8810 8846
8811 8847 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8812 8848 MPTSAS_HASH_FIRST);
8813 8849 while (ptgt != NULL) {
8814 8850 if (ptgt->m_reset_delay != 0) {
8815 8851 ptgt->m_reset_delay -=
8816 8852 MPTSAS_WATCH_RESET_DELAY_TICK;
8817 8853 if (ptgt->m_reset_delay <= 0) {
8818 8854 ptgt->m_reset_delay = 0;
8819 8855 mptsas_set_throttle(mpt, ptgt,
8820 8856 MAX_THROTTLE);
8821 8857 restart++;
8822 8858 } else {
8823 8859 done = -1;
8824 8860 }
8825 8861 }
8826 8862
8827 8863 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8828 8864 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8829 8865 }
8830 8866
8831 8867 if (restart > 0) {
8832 8868 mptsas_restart_hba(mpt);
8833 8869 }
8834 8870 return (done);
8835 8871 }
8836 8872
8837 8873 #ifdef MPTSAS_TEST
8838 8874 static void
8839 8875 mptsas_test_reset(mptsas_t *mpt, int target)
8840 8876 {
8841 8877 mptsas_target_t *ptgt = NULL;
8842 8878
8843 8879 if (mptsas_rtest == target) {
8844 8880 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8845 8881 mptsas_rtest = -1;
8846 8882 }
8847 8883 if (mptsas_rtest == -1) {
8848 8884 NDBG22(("mptsas_test_reset success"));
8849 8885 }
8850 8886 }
8851 8887 }
8852 8888 #endif
8853 8889
8854 8890 /*
8855 8891 * abort handling:
8856 8892 *
8857 8893 * Notes:
8858 8894 * - if pkt is not NULL, abort just that command
8859 8895 * - if pkt is NULL, abort all outstanding commands for target
8860 8896 */
8861 8897 static int
8862 8898 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8863 8899 {
8864 8900 mptsas_t *mpt = ADDR2MPT(ap);
8865 8901 int rval;
8866 8902 mptsas_tgt_private_t *tgt_private;
8867 8903 int target, lun;
8868 8904
8869 8905 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8870 8906 tran_tgt_private;
8871 8907 ASSERT(tgt_private != NULL);
8872 8908 target = tgt_private->t_private->m_devhdl;
8873 8909 lun = tgt_private->t_lun;
8874 8910
8875 8911 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8876 8912
8877 8913 mutex_enter(&mpt->m_mutex);
8878 8914 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8879 8915 mutex_exit(&mpt->m_mutex);
8880 8916 return (rval);
8881 8917 }
8882 8918
8883 8919 static int
8884 8920 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8885 8921 {
8886 8922 mptsas_cmd_t *sp = NULL;
8887 8923 mptsas_slots_t *slots = mpt->m_active;
8888 8924 int rval = FALSE;
8889 8925
8890 8926 ASSERT(mutex_owned(&mpt->m_mutex));
8891 8927
8892 8928 /*
8893 8929 * Abort the command pkt on the target/lun in ap. If pkt is
8894 8930 * NULL, abort all outstanding commands on that target/lun.
8895 8931 * If you can abort them, return 1, else return 0.
8896 8932 * Each packet that's aborted should be sent back to the target
8897 8933 * driver through the callback routine, with pkt_reason set to
8898 8934 * CMD_ABORTED.
8899 8935 *
8900 8936 * abort cmd pkt on HBA hardware; clean out of outstanding
8901 8937 * command lists, etc.
8902 8938 */
8903 8939 if (pkt != NULL) {
8904 8940 /* abort the specified packet */
8905 8941 sp = PKT2CMD(pkt);
8906 8942
8907 8943 if (sp->cmd_queued) {
8908 8944 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8909 8945 (void *)sp));
8910 8946 mptsas_waitq_delete(mpt, sp);
8911 8947 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8912 8948 STAT_ABORTED);
8913 8949 mptsas_doneq_add(mpt, sp);
8914 8950 rval = TRUE;
8915 8951 goto done;
8916 8952 }
8917 8953
8918 8954 /*
8919 8955 * Have mpt firmware abort this command
8920 8956 */
8921 8957
8922 8958 if (slots->m_slot[sp->cmd_slot] != NULL) {
8923 8959 rval = mptsas_ioc_task_management(mpt,
8924 8960 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8925 8961 lun, NULL, 0, 0);
8926 8962
8927 8963 /*
8928 8964 * The transport layer expects only TRUE and FALSE.
8929 8965 * Therefore, if mptsas_ioc_task_management returns
8930 8966 * FAILED we will return FALSE.
8931 8967 */
8932 8968 if (rval == FAILED)
8933 8969 rval = FALSE;
8934 8970 goto done;
8935 8971 }
8936 8972 }
8937 8973
8938 8974 /*
8939 8975 * If pkt is NULL then abort task set
8940 8976 */
8941 8977 rval = mptsas_ioc_task_management(mpt,
8942 8978 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8943 8979
8944 8980 /*
8945 8981 * The transport layer expects only TRUE and FALSE.
8946 8982 * Therefore, if mptsas_ioc_task_management returns
8947 8983 * FAILED we will return FALSE.
8948 8984 */
8949 8985 if (rval == FAILED)
8950 8986 rval = FALSE;
8951 8987
8952 8988 #ifdef MPTSAS_TEST
8953 8989 if (rval && mptsas_test_stop) {
8954 8990 debug_enter("mptsas_do_scsi_abort");
8955 8991 }
8956 8992 #endif
8957 8993
8958 8994 done:
8959 8995 mptsas_doneq_empty(mpt);
8960 8996 return (rval);
8961 8997 }
8962 8998
8963 8999 /*
8964 9000 * capability handling:
8965 9001 * (*tran_getcap). Get the capability named, and return its value.
8966 9002 */
8967 9003 static int
8968 9004 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
8969 9005 {
8970 9006 mptsas_t *mpt = ADDR2MPT(ap);
8971 9007 int ckey;
8972 9008 int rval = FALSE;
8973 9009
8974 9010 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
8975 9011 ap->a_target, cap, tgtonly));
8976 9012
8977 9013 mutex_enter(&mpt->m_mutex);
8978 9014
8979 9015 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
8980 9016 mutex_exit(&mpt->m_mutex);
8981 9017 return (UNDEFINED);
8982 9018 }
8983 9019
8984 9020 switch (ckey) {
8985 9021 case SCSI_CAP_DMA_MAX:
8986 9022 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
8987 9023 break;
8988 9024 case SCSI_CAP_ARQ:
8989 9025 rval = TRUE;
8990 9026 break;
8991 9027 case SCSI_CAP_MSG_OUT:
8992 9028 case SCSI_CAP_PARITY:
8993 9029 case SCSI_CAP_UNTAGGED_QING:
8994 9030 rval = TRUE;
8995 9031 break;
8996 9032 case SCSI_CAP_TAGGED_QING:
8997 9033 rval = TRUE;
8998 9034 break;
8999 9035 case SCSI_CAP_RESET_NOTIFICATION:
9000 9036 rval = TRUE;
9001 9037 break;
9002 9038 case SCSI_CAP_LINKED_CMDS:
9003 9039 rval = FALSE;
9004 9040 break;
9005 9041 case SCSI_CAP_QFULL_RETRIES:
9006 9042 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9007 9043 tran_tgt_private))->t_private->m_qfull_retries;
9008 9044 break;
9009 9045 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9010 9046 rval = drv_hztousec(((mptsas_tgt_private_t *)
9011 9047 (ap->a_hba_tran->tran_tgt_private))->
9012 9048 t_private->m_qfull_retry_interval) / 1000;
9013 9049 break;
9014 9050 case SCSI_CAP_CDB_LEN:
9015 9051 rval = CDB_GROUP4;
9016 9052 break;
9017 9053 case SCSI_CAP_INTERCONNECT_TYPE:
9018 9054 rval = INTERCONNECT_SAS;
9019 9055 break;
9020 9056 case SCSI_CAP_TRAN_LAYER_RETRIES:
9021 9057 if (mpt->m_ioc_capabilities &
9022 9058 MPI2_IOCFACTS_CAPABILITY_TLR)
9023 9059 rval = TRUE;
9024 9060 else
9025 9061 rval = FALSE;
9026 9062 break;
9027 9063 default:
9028 9064 rval = UNDEFINED;
9029 9065 break;
9030 9066 }
9031 9067
9032 9068 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9033 9069
9034 9070 mutex_exit(&mpt->m_mutex);
9035 9071 return (rval);
9036 9072 }
9037 9073
9038 9074 /*
9039 9075 * (*tran_setcap). Set the capability named to the value given.
9040 9076 */
9041 9077 static int
9042 9078 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9043 9079 {
9044 9080 mptsas_t *mpt = ADDR2MPT(ap);
9045 9081 int ckey;
9046 9082 int rval = FALSE;
9047 9083
9048 9084 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9049 9085 ap->a_target, cap, value, tgtonly));
9050 9086
9051 9087 if (!tgtonly) {
9052 9088 return (rval);
9053 9089 }
9054 9090
9055 9091 mutex_enter(&mpt->m_mutex);
9056 9092
9057 9093 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9058 9094 mutex_exit(&mpt->m_mutex);
9059 9095 return (UNDEFINED);
9060 9096 }
9061 9097
9062 9098 switch (ckey) {
9063 9099 case SCSI_CAP_DMA_MAX:
9064 9100 case SCSI_CAP_MSG_OUT:
9065 9101 case SCSI_CAP_PARITY:
9066 9102 case SCSI_CAP_INITIATOR_ID:
9067 9103 case SCSI_CAP_LINKED_CMDS:
9068 9104 case SCSI_CAP_UNTAGGED_QING:
9069 9105 case SCSI_CAP_RESET_NOTIFICATION:
9070 9106 /*
9071 9107 * None of these are settable via
9072 9108 * the capability interface.
9073 9109 */
9074 9110 break;
9075 9111 case SCSI_CAP_ARQ:
9076 9112 /*
9077 9113 * We cannot turn off arq so return false if asked to
9078 9114 */
9079 9115 if (value) {
9080 9116 rval = TRUE;
9081 9117 } else {
9082 9118 rval = FALSE;
9083 9119 }
9084 9120 break;
9085 9121 case SCSI_CAP_TAGGED_QING:
9086 9122 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9087 9123 (ap->a_hba_tran->tran_tgt_private))->t_private,
9088 9124 MAX_THROTTLE);
9089 9125 rval = TRUE;
9090 9126 break;
9091 9127 case SCSI_CAP_QFULL_RETRIES:
9092 9128 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9093 9129 t_private->m_qfull_retries = (uchar_t)value;
9094 9130 rval = TRUE;
9095 9131 break;
9096 9132 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9097 9133 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9098 9134 t_private->m_qfull_retry_interval =
9099 9135 drv_usectohz(value * 1000);
9100 9136 rval = TRUE;
9101 9137 break;
9102 9138 default:
9103 9139 rval = UNDEFINED;
9104 9140 break;
9105 9141 }
9106 9142 mutex_exit(&mpt->m_mutex);
9107 9143 return (rval);
9108 9144 }
9109 9145
9110 9146 /*
9111 9147 * Utility routine for mptsas_ifsetcap/ifgetcap
9112 9148 */
9113 9149 /*ARGSUSED*/
9114 9150 static int
9115 9151 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9116 9152 {
9117 9153 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9118 9154
9119 9155 if (!cap)
9120 9156 return (FALSE);
9121 9157
9122 9158 *cidxp = scsi_hba_lookup_capstr(cap);
9123 9159 return (TRUE);
9124 9160 }
9125 9161
9126 9162 static int
9127 9163 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9128 9164 {
9129 9165 mptsas_slots_t *old_active = mpt->m_active;
9130 9166 mptsas_slots_t *new_active;
9131 9167 size_t size;
9132 9168 int rval = -1, i;
9133 9169
9134 9170 /*
9135 9171 * if there are active commands, then we cannot
9136 9172 * change size of active slots array.
9137 9173 */
9138 9174 ASSERT(mpt->m_ncmds == 0);
9139 9175
9140 9176 size = MPTSAS_SLOTS_SIZE(mpt);
9141 9177 new_active = kmem_zalloc(size, flag);
9142 9178 if (new_active == NULL) {
9143 9179 NDBG1(("new active alloc failed"));
9144 9180 return (rval);
9145 9181 }
9146 9182 /*
9147 9183 * Since SMID 0 is reserved and the TM slot is reserved, the
9148 9184 * number of slots that can be used at any one time is
9149 9185 * m_max_requests - 2.
9150 9186 */
9151 9187 new_active->m_n_slots = (mpt->m_max_requests - 2);
9152 9188 new_active->m_size = size;
9153 9189 new_active->m_tags = 1;
9154 9190 if (old_active) {
9155 9191 new_active->m_tgttbl = old_active->m_tgttbl;
9156 9192 new_active->m_smptbl = old_active->m_smptbl;
9157 9193 new_active->m_num_raid_configs =
9158 9194 old_active->m_num_raid_configs;
9159 9195 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9160 9196 new_active->m_raidconfig[i] =
9161 9197 old_active->m_raidconfig[i];
9162 9198 }
9163 9199 mptsas_free_active_slots(mpt);
9164 9200 }
9165 9201 mpt->m_active = new_active;
9166 9202 rval = 0;
9167 9203
9168 9204 return (rval);
9169 9205 }
9170 9206
9171 9207 static void
9172 9208 mptsas_free_active_slots(mptsas_t *mpt)
9173 9209 {
9174 9210 mptsas_slots_t *active = mpt->m_active;
9175 9211 size_t size;
9176 9212
9177 9213 if (active == NULL)
9178 9214 return;
9179 9215 size = active->m_size;
9180 9216 kmem_free(active, size);
9181 9217 mpt->m_active = NULL;
9182 9218 }
9183 9219
9184 9220 /*
9185 9221 * Error logging, printing, and debug print routines.
9186 9222 */
9187 9223 static char *mptsas_label = "mpt_sas";
9188 9224
9189 9225 /*PRINTFLIKE3*/
9190 9226 void
9191 9227 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9192 9228 {
9193 9229 dev_info_t *dev;
9194 9230 va_list ap;
9195 9231
9196 9232 if (mpt) {
9197 9233 dev = mpt->m_dip;
9198 9234 } else {
9199 9235 dev = 0;
9200 9236 }
9201 9237
9202 9238 mutex_enter(&mptsas_log_mutex);
9203 9239
9204 9240 va_start(ap, fmt);
9205 9241 (void) vsprintf(mptsas_log_buf, fmt, ap);
9206 9242 va_end(ap);
9207 9243
9208 9244 if (level == CE_CONT) {
9209 9245 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9210 9246 } else {
9211 9247 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9212 9248 }
9213 9249
9214 9250 mutex_exit(&mptsas_log_mutex);
9215 9251 }
9216 9252
9217 9253 #ifdef MPTSAS_DEBUG
9218 9254 /*PRINTFLIKE1*/
9219 9255 void
9220 9256 mptsas_printf(char *fmt, ...)
9221 9257 {
9222 9258 dev_info_t *dev = 0;
9223 9259 va_list ap;
9224 9260
9225 9261 mutex_enter(&mptsas_log_mutex);
9226 9262
9227 9263 va_start(ap, fmt);
9228 9264 (void) vsprintf(mptsas_log_buf, fmt, ap);
9229 9265 va_end(ap);
9230 9266
9231 9267 #ifdef PROM_PRINTF
9232 9268 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9233 9269 #else
9234 9270 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9235 9271 #endif
9236 9272 mutex_exit(&mptsas_log_mutex);
9237 9273 }
9238 9274 #endif
9239 9275
9240 9276 /*
9241 9277 * timeout handling
9242 9278 */
9243 9279 static void
9244 9280 mptsas_watch(void *arg)
9245 9281 {
9246 9282 #ifndef __lock_lint
9247 9283 _NOTE(ARGUNUSED(arg))
9248 9284 #endif
9249 9285
9250 9286 mptsas_t *mpt;
9251 9287 uint32_t doorbell;
9252 9288
9253 9289 NDBG30(("mptsas_watch"));
9254 9290
9255 9291 rw_enter(&mptsas_global_rwlock, RW_READER);
9256 9292 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9257 9293
9258 9294 mutex_enter(&mpt->m_mutex);
9259 9295
9260 9296 /* Skip device if not powered on */
9261 9297 if (mpt->m_options & MPTSAS_OPT_PM) {
9262 9298 if (mpt->m_power_level == PM_LEVEL_D0) {
9263 9299 (void) pm_busy_component(mpt->m_dip, 0);
9264 9300 mpt->m_busy = 1;
9265 9301 } else {
9266 9302 mutex_exit(&mpt->m_mutex);
9267 9303 continue;
9268 9304 }
9269 9305 }
9270 9306
9271 9307 /*
9272 9308 * Check if controller is in a FAULT state. If so, reset it.
9273 9309 */
9274 9310 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9275 9311 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9276 9312 doorbell &= MPI2_DOORBELL_DATA_MASK;
9277 9313 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9278 9314 "code: %04x", doorbell);
9279 9315 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9280 9316 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9281 9317 mptsas_log(mpt, CE_WARN, "Reset failed"
9282 9318 "after fault was detected");
9283 9319 }
9284 9320 }
9285 9321
9286 9322 /*
9287 9323 * For now, always call mptsas_watchsubr.
9288 9324 */
9289 9325 mptsas_watchsubr(mpt);
9290 9326
9291 9327 if (mpt->m_options & MPTSAS_OPT_PM) {
9292 9328 mpt->m_busy = 0;
9293 9329 (void) pm_idle_component(mpt->m_dip, 0);
9294 9330 }
9295 9331
9296 9332 mutex_exit(&mpt->m_mutex);
9297 9333 }
9298 9334 rw_exit(&mptsas_global_rwlock);
9299 9335
9300 9336 mutex_enter(&mptsas_global_mutex);
9301 9337 if (mptsas_timeouts_enabled)
9302 9338 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9303 9339 mutex_exit(&mptsas_global_mutex);
9304 9340 }
9305 9341
9306 9342 static void
9307 9343 mptsas_watchsubr(mptsas_t *mpt)
9308 9344 {
9309 9345 int i;
9310 9346 mptsas_cmd_t *cmd;
9311 9347 mptsas_target_t *ptgt = NULL;
9312 9348
9313 9349 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9314 9350
9315 9351 #ifdef MPTSAS_TEST
9316 9352 if (mptsas_enable_untagged) {
9317 9353 mptsas_test_untagged++;
9318 9354 }
9319 9355 #endif
9320 9356
9321 9357 /*
9322 9358 * Check for commands stuck in active slot
9323 9359 * Account for TM requests, which use the last SMID.
9324 9360 */
9325 9361 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9326 9362 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9327 9363 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9328 9364 cmd->cmd_active_timeout -=
9329 9365 mptsas_scsi_watchdog_tick;
9330 9366 if (cmd->cmd_active_timeout <= 0) {
9331 9367 /*
9332 9368 * There seems to be a command stuck
9333 9369 * in the active slot. Drain throttle.
9334 9370 */
9335 9371 mptsas_set_throttle(mpt,
9336 9372 cmd->cmd_tgt_addr,
9337 9373 DRAIN_THROTTLE);
9338 9374 }
9339 9375 }
9340 9376 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9341 9377 (cmd->cmd_flags & CFLAG_CONFIG) ||
9342 9378 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9343 9379 cmd->cmd_active_timeout -=
9344 9380 mptsas_scsi_watchdog_tick;
9345 9381 if (cmd->cmd_active_timeout <= 0) {
9346 9382 /*
9347 9383 * passthrough command timeout
9348 9384 */
9349 9385 cmd->cmd_flags |= (CFLAG_FINISHED |
9350 9386 CFLAG_TIMEOUT);
9351 9387 cv_broadcast(&mpt->m_passthru_cv);
9352 9388 cv_broadcast(&mpt->m_config_cv);
9353 9389 cv_broadcast(&mpt->m_fw_diag_cv);
9354 9390 }
9355 9391 }
9356 9392 }
9357 9393 }
9358 9394
9359 9395 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9360 9396 MPTSAS_HASH_FIRST);
9361 9397 while (ptgt != NULL) {
9362 9398 /*
9363 9399 * If we were draining due to a qfull condition,
9364 9400 * go back to full throttle.
9365 9401 */
9366 9402 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9367 9403 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9368 9404 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9369 9405 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9370 9406 mptsas_restart_hba(mpt);
9371 9407 }
9372 9408
9373 9409 if ((ptgt->m_t_ncmds > 0) &&
9374 9410 (ptgt->m_timebase)) {
9375 9411
9376 9412 if (ptgt->m_timebase <=
|
↓ open down ↓ |
660 lines elided |
↑ open up ↑ |
9377 9413 mptsas_scsi_watchdog_tick) {
9378 9414 ptgt->m_timebase +=
9379 9415 mptsas_scsi_watchdog_tick;
9380 9416 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9381 9417 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9382 9418 continue;
9383 9419 }
9384 9420
9385 9421 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9386 9422
9423 + if (ptgt->m_timeout_count > 0) {
9424 + ptgt->m_timeout_interval +=
9425 + mptsas_scsi_watchdog_tick;
9426 + }
9427 + if (ptgt->m_timeout_interval > mptsas_timeout_interval) {
9428 + ptgt->m_timeout_interval = 0;
9429 + ptgt->m_timeout_count = 0;
9430 + }
9431 +
9387 9432 if (ptgt->m_timeout < 0) {
9388 - mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9433 + ptgt->m_timeout_count++;
9434 + if (ptgt->m_timeout_count >
9435 + mptsas_timeout_threshold) {
9436 + ptgt->m_timeout_count = 0;
9437 + mptsas_kill_target(mpt, ptgt);
9438 + } else {
9439 + mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9440 + }
9389 9441 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9390 9442 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9391 9443 continue;
9392 9444 }
9393 9445
9394 9446 if ((ptgt->m_timeout) <=
9395 9447 mptsas_scsi_watchdog_tick) {
9396 9448 NDBG23(("pending timeout"));
9397 9449 mptsas_set_throttle(mpt, ptgt,
9398 9450 DRAIN_THROTTLE);
9399 9451 }
9400 9452 }
9401 9453
9402 9454 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9403 9455 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9404 9456 }
9405 9457 }
9406 9458
9407 9459 /*
9408 9460 * timeout recovery
9409 9461 */
9410 9462 static void
9411 9463 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9412 9464 {
9413 9465
9414 9466 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9415 9467 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9416 9468 "Target %d", devhdl);
9417 9469
9418 9470 /*
9419 9471 * If the current target is not the target passed in,
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
9420 9472 * try to reset that target.
9421 9473 */
9422 9474 NDBG29(("mptsas_cmd_timeout: device reset"));
9423 9475 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9424 9476 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9425 9477 "recovery failed!", devhdl);
9426 9478 }
9427 9479 }
9428 9480
9429 9481 /*
9482 + * target causing too many timeouts
9483 + */
9484 +static void
9485 +mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt)
9486 +{
9487 + mptsas_topo_change_list_t *topo_node = NULL;
9488 +
9489 + NDBG29(("mptsas_tgt_kill: target=%d", ptgt->m_devhdl));
9490 + mptsas_log(mpt, CE_WARN, "timeout threshold exceeded for "
9491 + "Target %d", ptgt->m_devhdl);
9492 +
9493 + topo_node = kmem_zalloc(sizeof (mptsas_topo_change_list_t), KM_SLEEP);
9494 + topo_node->mpt = mpt;
9495 + topo_node->un.phymask = ptgt->m_phymask;
9496 + topo_node->event = MPTSAS_DR_EVENT_OFFLINE_TARGET;
9497 + topo_node->devhdl = ptgt->m_devhdl;
9498 + if (ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
9499 + topo_node->flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
9500 + else
9501 + topo_node->flags = MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
9502 + topo_node->object = NULL;
9503 +
9504 + /*
9505 + * Launch DR taskq to fake topology change
9506 + */
9507 + if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
9508 + mptsas_handle_dr, (void *)topo_node,
9509 + DDI_NOSLEEP)) != DDI_SUCCESS) {
9510 + mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
9511 + "for fake offline event failed. \n");
9512 + }
9513 +}
9514 +
9515 +/*
9430 9516 * Device / Hotplug control
9431 9517 */
9432 9518 static int
9433 9519 mptsas_scsi_quiesce(dev_info_t *dip)
9434 9520 {
9435 9521 mptsas_t *mpt;
9436 9522 scsi_hba_tran_t *tran;
9437 9523
9438 9524 tran = ddi_get_driver_private(dip);
9439 9525 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9440 9526 return (-1);
9441 9527
9442 9528 return (mptsas_quiesce_bus(mpt));
9443 9529 }
9444 9530
9445 9531 static int
9446 9532 mptsas_scsi_unquiesce(dev_info_t *dip)
9447 9533 {
9448 9534 mptsas_t *mpt;
9449 9535 scsi_hba_tran_t *tran;
9450 9536
9451 9537 tran = ddi_get_driver_private(dip);
9452 9538 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9453 9539 return (-1);
9454 9540
9455 9541 return (mptsas_unquiesce_bus(mpt));
9456 9542 }
9457 9543
9458 9544 static int
9459 9545 mptsas_quiesce_bus(mptsas_t *mpt)
9460 9546 {
9461 9547 mptsas_target_t *ptgt = NULL;
9462 9548
9463 9549 NDBG28(("mptsas_quiesce_bus"));
9464 9550 mutex_enter(&mpt->m_mutex);
9465 9551
9466 9552 /* Set all the throttles to zero */
9467 9553 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9468 9554 MPTSAS_HASH_FIRST);
9469 9555 while (ptgt != NULL) {
9470 9556 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9471 9557
9472 9558 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9473 9559 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9474 9560 }
9475 9561
9476 9562 /* If there are any outstanding commands in the queue */
9477 9563 if (mpt->m_ncmds) {
9478 9564 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9479 9565 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9480 9566 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9481 9567 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9482 9568 /*
9483 9569 * Quiesce has been interrupted
9484 9570 */
9485 9571 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9486 9572 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9487 9573 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9488 9574 while (ptgt != NULL) {
9489 9575 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9490 9576
9491 9577 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9492 9578 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9493 9579 }
9494 9580 mptsas_restart_hba(mpt);
9495 9581 if (mpt->m_quiesce_timeid != 0) {
9496 9582 timeout_id_t tid = mpt->m_quiesce_timeid;
9497 9583 mpt->m_quiesce_timeid = 0;
9498 9584 mutex_exit(&mpt->m_mutex);
9499 9585 (void) untimeout(tid);
9500 9586 return (-1);
9501 9587 }
9502 9588 mutex_exit(&mpt->m_mutex);
9503 9589 return (-1);
9504 9590 } else {
9505 9591 /* Bus has been quiesced */
9506 9592 ASSERT(mpt->m_quiesce_timeid == 0);
9507 9593 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9508 9594 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9509 9595 mutex_exit(&mpt->m_mutex);
9510 9596 return (0);
9511 9597 }
9512 9598 }
9513 9599 /* Bus was not busy - QUIESCED */
9514 9600 mutex_exit(&mpt->m_mutex);
9515 9601
9516 9602 return (0);
9517 9603 }
9518 9604
9519 9605 static int
9520 9606 mptsas_unquiesce_bus(mptsas_t *mpt)
9521 9607 {
9522 9608 mptsas_target_t *ptgt = NULL;
9523 9609
9524 9610 NDBG28(("mptsas_unquiesce_bus"));
9525 9611 mutex_enter(&mpt->m_mutex);
9526 9612 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9527 9613 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9528 9614 MPTSAS_HASH_FIRST);
9529 9615 while (ptgt != NULL) {
9530 9616 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9531 9617
9532 9618 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9533 9619 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9534 9620 }
9535 9621 mptsas_restart_hba(mpt);
9536 9622 mutex_exit(&mpt->m_mutex);
9537 9623 return (0);
9538 9624 }
9539 9625
9540 9626 static void
9541 9627 mptsas_ncmds_checkdrain(void *arg)
9542 9628 {
9543 9629 mptsas_t *mpt = arg;
9544 9630 mptsas_target_t *ptgt = NULL;
9545 9631
9546 9632 mutex_enter(&mpt->m_mutex);
9547 9633 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9548 9634 mpt->m_quiesce_timeid = 0;
9549 9635 if (mpt->m_ncmds == 0) {
9550 9636 /* Command queue has been drained */
9551 9637 cv_signal(&mpt->m_cv);
9552 9638 } else {
9553 9639 /*
9554 9640 * The throttle may have been reset because
9555 9641 * of a SCSI bus reset
9556 9642 */
9557 9643 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9558 9644 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9559 9645 while (ptgt != NULL) {
9560 9646 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9561 9647
9562 9648 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9563 9649 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9564 9650 }
9565 9651
9566 9652 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9567 9653 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9568 9654 drv_usectohz(1000000)));
9569 9655 }
9570 9656 }
9571 9657 mutex_exit(&mpt->m_mutex);
9572 9658 }
9573 9659
9574 9660 /*ARGSUSED*/
9575 9661 static void
9576 9662 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9577 9663 {
9578 9664 int i;
9579 9665 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9580 9666 char buf[128];
9581 9667
9582 9668 buf[0] = '\0';
9583 9669 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9584 9670 Tgt(cmd), Lun(cmd)));
9585 9671 (void) sprintf(&buf[0], "\tcdb=[");
9586 9672 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9587 9673 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9588 9674 }
9589 9675 (void) sprintf(&buf[strlen(buf)], " ]");
9590 9676 NDBG25(("?%s\n", buf));
9591 9677 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9592 9678 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9593 9679 cmd->cmd_pkt->pkt_state));
9594 9680 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9595 9681 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9596 9682 }
9597 9683
9598 9684 static void
9599 9685 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9600 9686 {
9601 9687 caddr_t memp;
9602 9688 pMPI2RequestHeader_t request_hdrp;
9603 9689 struct scsi_pkt *pkt = cmd->cmd_pkt;
9604 9690 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9605 9691 uint32_t request_size, data_size, dataout_size;
9606 9692 uint32_t direction;
9607 9693 ddi_dma_cookie_t data_cookie;
9608 9694 ddi_dma_cookie_t dataout_cookie;
9609 9695 uint32_t request_desc_low, request_desc_high = 0;
9610 9696 uint32_t i, sense_bufp;
9611 9697 uint8_t desc_type;
9612 9698 uint8_t *request, function;
9613 9699 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9614 9700 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9615 9701
9616 9702 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9617 9703
9618 9704 request = pt->request;
9619 9705 direction = pt->direction;
9620 9706 request_size = pt->request_size;
9621 9707 data_size = pt->data_size;
9622 9708 dataout_size = pt->dataout_size;
9623 9709 data_cookie = pt->data_cookie;
9624 9710 dataout_cookie = pt->dataout_cookie;
9625 9711
9626 9712 /*
9627 9713 * Store the passthrough message in memory location
9628 9714 * corresponding to our slot number
9629 9715 */
9630 9716 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9631 9717 request_hdrp = (pMPI2RequestHeader_t)memp;
9632 9718 bzero(memp, mpt->m_req_frame_size);
9633 9719
9634 9720 for (i = 0; i < request_size; i++) {
9635 9721 bcopy(request + i, memp + i, 1);
9636 9722 }
9637 9723
9638 9724 if (data_size || dataout_size) {
9639 9725 pMpi2SGESimple64_t sgep;
9640 9726 uint32_t sge_flags;
9641 9727
9642 9728 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9643 9729 request_size);
9644 9730 if (dataout_size) {
9645 9731
9646 9732 sge_flags = dataout_size |
9647 9733 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9648 9734 MPI2_SGE_FLAGS_END_OF_BUFFER |
9649 9735 MPI2_SGE_FLAGS_HOST_TO_IOC |
9650 9736 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9651 9737 MPI2_SGE_FLAGS_SHIFT);
9652 9738 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9653 9739 ddi_put32(acc_hdl, &sgep->Address.Low,
9654 9740 (uint32_t)(dataout_cookie.dmac_laddress &
9655 9741 0xffffffffull));
9656 9742 ddi_put32(acc_hdl, &sgep->Address.High,
9657 9743 (uint32_t)(dataout_cookie.dmac_laddress
9658 9744 >> 32));
9659 9745 sgep++;
9660 9746 }
9661 9747 sge_flags = data_size;
9662 9748 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9663 9749 MPI2_SGE_FLAGS_LAST_ELEMENT |
9664 9750 MPI2_SGE_FLAGS_END_OF_BUFFER |
9665 9751 MPI2_SGE_FLAGS_END_OF_LIST |
9666 9752 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9667 9753 MPI2_SGE_FLAGS_SHIFT);
9668 9754 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9669 9755 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9670 9756 MPI2_SGE_FLAGS_SHIFT);
9671 9757 } else {
9672 9758 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9673 9759 MPI2_SGE_FLAGS_SHIFT);
9674 9760 }
9675 9761 ddi_put32(acc_hdl, &sgep->FlagsLength,
9676 9762 sge_flags);
9677 9763 ddi_put32(acc_hdl, &sgep->Address.Low,
9678 9764 (uint32_t)(data_cookie.dmac_laddress &
9679 9765 0xffffffffull));
9680 9766 ddi_put32(acc_hdl, &sgep->Address.High,
9681 9767 (uint32_t)(data_cookie.dmac_laddress >> 32));
9682 9768 }
9683 9769
9684 9770 function = request_hdrp->Function;
9685 9771 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9686 9772 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9687 9773 pMpi2SCSIIORequest_t scsi_io_req;
9688 9774
9689 9775 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9690 9776 /*
9691 9777 * Put SGE for data and data_out buffer at the end of
9692 9778 * scsi_io_request message header.(64 bytes in total)
9693 9779 * Following above SGEs, the residual space will be
9694 9780 * used by sense data.
9695 9781 */
9696 9782 ddi_put8(acc_hdl,
9697 9783 &scsi_io_req->SenseBufferLength,
9698 9784 (uint8_t)(request_size - 64));
9699 9785
9700 9786 sense_bufp = mpt->m_req_frame_dma_addr +
9701 9787 (mpt->m_req_frame_size * cmd->cmd_slot);
9702 9788 sense_bufp += 64;
9703 9789 ddi_put32(acc_hdl,
9704 9790 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9705 9791
9706 9792 /*
9707 9793 * Set SGLOffset0 value
9708 9794 */
9709 9795 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9710 9796 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9711 9797
9712 9798 /*
9713 9799 * Setup descriptor info. RAID passthrough must use the
9714 9800 * default request descriptor which is already set, so if this
9715 9801 * is a SCSI IO request, change the descriptor to SCSI IO.
9716 9802 */
9717 9803 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9718 9804 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9719 9805 request_desc_high = (ddi_get16(acc_hdl,
9720 9806 &scsi_io_req->DevHandle) << 16);
9721 9807 }
9722 9808 }
9723 9809
9724 9810 /*
9725 9811 * We must wait till the message has been completed before
9726 9812 * beginning the next message so we wait for this one to
9727 9813 * finish.
9728 9814 */
9729 9815 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9730 9816 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9731 9817 cmd->cmd_rfm = NULL;
9732 9818 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9733 9819 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9734 9820 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9735 9821 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9736 9822 }
9737 9823 }
9738 9824
9739 9825
9740 9826
9741 9827 static int
9742 9828 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9743 9829 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9744 9830 uint32_t data_size, uint32_t direction, uint8_t *dataout,
9745 9831 uint32_t dataout_size, short timeout, int mode)
9746 9832 {
9747 9833 mptsas_pt_request_t pt;
9748 9834 mptsas_dma_alloc_state_t data_dma_state;
9749 9835 mptsas_dma_alloc_state_t dataout_dma_state;
9750 9836 caddr_t memp;
9751 9837 mptsas_cmd_t *cmd = NULL;
9752 9838 struct scsi_pkt *pkt;
9753 9839 uint32_t reply_len = 0, sense_len = 0;
9754 9840 pMPI2RequestHeader_t request_hdrp;
9755 9841 pMPI2RequestHeader_t request_msg;
9756 9842 pMPI2DefaultReply_t reply_msg;
9757 9843 Mpi2SCSIIOReply_t rep_msg;
9758 9844 int i, status = 0, pt_flags = 0, rv = 0;
9759 9845 int rvalue;
9760 9846 uint8_t function;
9761 9847
9762 9848 ASSERT(mutex_owned(&mpt->m_mutex));
9763 9849
9764 9850 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9765 9851 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9766 9852 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9767 9853
9768 9854 mutex_exit(&mpt->m_mutex);
9769 9855 /*
9770 9856 * copy in the request buffer since it could be used by
9771 9857 * another thread when the pt request into waitq
9772 9858 */
9773 9859 if (ddi_copyin(request, request_msg, request_size, mode)) {
9774 9860 mutex_enter(&mpt->m_mutex);
9775 9861 status = EFAULT;
9776 9862 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9777 9863 goto out;
9778 9864 }
9779 9865 mutex_enter(&mpt->m_mutex);
9780 9866
9781 9867 function = request_msg->Function;
9782 9868 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9783 9869 pMpi2SCSITaskManagementRequest_t task;
9784 9870 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9785 9871 mptsas_setup_bus_reset_delay(mpt);
9786 9872 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9787 9873 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9788 9874 mode);
9789 9875
9790 9876 if (rv != TRUE) {
9791 9877 status = EIO;
9792 9878 mptsas_log(mpt, CE_WARN, "task management failed");
9793 9879 }
9794 9880 goto out;
9795 9881 }
9796 9882
9797 9883 if (data_size != 0) {
9798 9884 data_dma_state.size = data_size;
9799 9885 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9800 9886 status = ENOMEM;
9801 9887 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9802 9888 "resource");
9803 9889 goto out;
9804 9890 }
9805 9891 pt_flags |= MPTSAS_DATA_ALLOCATED;
9806 9892 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9807 9893 mutex_exit(&mpt->m_mutex);
9808 9894 for (i = 0; i < data_size; i++) {
9809 9895 if (ddi_copyin(data + i, (uint8_t *)
9810 9896 data_dma_state.memp + i, 1, mode)) {
9811 9897 mutex_enter(&mpt->m_mutex);
9812 9898 status = EFAULT;
9813 9899 mptsas_log(mpt, CE_WARN, "failed to "
9814 9900 "copy read data");
9815 9901 goto out;
9816 9902 }
9817 9903 }
9818 9904 mutex_enter(&mpt->m_mutex);
9819 9905 }
9820 9906 }
9821 9907
9822 9908 if (dataout_size != 0) {
9823 9909 dataout_dma_state.size = dataout_size;
9824 9910 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9825 9911 status = ENOMEM;
9826 9912 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9827 9913 "resource");
9828 9914 goto out;
9829 9915 }
9830 9916 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9831 9917 mutex_exit(&mpt->m_mutex);
9832 9918 for (i = 0; i < dataout_size; i++) {
9833 9919 if (ddi_copyin(dataout + i, (uint8_t *)
9834 9920 dataout_dma_state.memp + i, 1, mode)) {
9835 9921 mutex_enter(&mpt->m_mutex);
9836 9922 mptsas_log(mpt, CE_WARN, "failed to copy out"
9837 9923 " data");
9838 9924 status = EFAULT;
9839 9925 goto out;
9840 9926 }
9841 9927 }
9842 9928 mutex_enter(&mpt->m_mutex);
9843 9929 }
9844 9930
9845 9931 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9846 9932 status = EAGAIN;
9847 9933 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9848 9934 goto out;
9849 9935 }
9850 9936 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9851 9937
9852 9938 bzero((caddr_t)cmd, sizeof (*cmd));
9853 9939 bzero((caddr_t)pkt, scsi_pkt_size());
9854 9940 bzero((caddr_t)&pt, sizeof (pt));
9855 9941
9856 9942 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9857 9943
9858 9944 pt.request = (uint8_t *)request_msg;
9859 9945 pt.direction = direction;
9860 9946 pt.request_size = request_size;
9861 9947 pt.data_size = data_size;
9862 9948 pt.dataout_size = dataout_size;
9863 9949 pt.data_cookie = data_dma_state.cookie;
9864 9950 pt.dataout_cookie = dataout_dma_state.cookie;
9865 9951
9866 9952 /*
9867 9953 * Form a blank cmd/pkt to store the acknowledgement message
9868 9954 */
9869 9955 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9870 9956 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9871 9957 pkt->pkt_ha_private = (opaque_t)&pt;
9872 9958 pkt->pkt_flags = FLAG_HEAD;
9873 9959 pkt->pkt_time = timeout;
9874 9960 cmd->cmd_pkt = pkt;
9875 9961 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9876 9962
9877 9963 /*
9878 9964 * Save the command in a slot
9879 9965 */
9880 9966 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9881 9967 /*
9882 9968 * Once passthru command get slot, set cmd_flags
9883 9969 * CFLAG_PREPARED.
9884 9970 */
9885 9971 cmd->cmd_flags |= CFLAG_PREPARED;
9886 9972 mptsas_start_passthru(mpt, cmd);
9887 9973 } else {
9888 9974 mptsas_waitq_add(mpt, cmd);
9889 9975 }
9890 9976
9891 9977 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9892 9978 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9893 9979 }
9894 9980
9895 9981 if (cmd->cmd_flags & CFLAG_PREPARED) {
9896 9982 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9897 9983 cmd->cmd_slot);
9898 9984 request_hdrp = (pMPI2RequestHeader_t)memp;
9899 9985 }
9900 9986
9901 9987 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9902 9988 status = ETIMEDOUT;
9903 9989 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9904 9990 pt_flags |= MPTSAS_CMD_TIMEOUT;
9905 9991 goto out;
9906 9992 }
9907 9993
9908 9994 if (cmd->cmd_rfm) {
9909 9995 /*
9910 9996 * cmd_rfm is zero means the command reply is a CONTEXT
9911 9997 * reply and no PCI Write to post the free reply SMFA
9912 9998 * because no reply message frame is used.
9913 9999 * cmd_rfm is non-zero means the reply is a ADDRESS
9914 10000 * reply and reply message frame is used.
9915 10001 */
9916 10002 pt_flags |= MPTSAS_ADDRESS_REPLY;
9917 10003 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9918 10004 DDI_DMA_SYNC_FORCPU);
9919 10005 reply_msg = (pMPI2DefaultReply_t)
9920 10006 (mpt->m_reply_frame + (cmd->cmd_rfm -
9921 10007 mpt->m_reply_frame_dma_addr));
9922 10008 }
9923 10009
9924 10010 mptsas_fma_check(mpt, cmd);
9925 10011 if (pkt->pkt_reason == CMD_TRAN_ERR) {
9926 10012 status = EAGAIN;
9927 10013 mptsas_log(mpt, CE_WARN, "passthru fma error");
9928 10014 goto out;
9929 10015 }
9930 10016 if (pkt->pkt_reason == CMD_RESET) {
9931 10017 status = EAGAIN;
9932 10018 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
9933 10019 goto out;
9934 10020 }
9935 10021
9936 10022 if (pkt->pkt_reason == CMD_INCOMPLETE) {
9937 10023 status = EIO;
9938 10024 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
9939 10025 goto out;
9940 10026 }
9941 10027
9942 10028 mutex_exit(&mpt->m_mutex);
9943 10029 if (cmd->cmd_flags & CFLAG_PREPARED) {
9944 10030 function = request_hdrp->Function;
9945 10031 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9946 10032 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9947 10033 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
9948 10034 sense_len = reply_size - reply_len;
9949 10035 } else {
9950 10036 reply_len = reply_size;
9951 10037 sense_len = 0;
9952 10038 }
9953 10039
9954 10040 for (i = 0; i < reply_len; i++) {
9955 10041 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
9956 10042 mode)) {
9957 10043 mutex_enter(&mpt->m_mutex);
9958 10044 status = EFAULT;
9959 10045 mptsas_log(mpt, CE_WARN, "failed to copy out "
9960 10046 "reply data");
9961 10047 goto out;
9962 10048 }
9963 10049 }
9964 10050 for (i = 0; i < sense_len; i++) {
9965 10051 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
9966 10052 reply + reply_len + i, 1, mode)) {
9967 10053 mutex_enter(&mpt->m_mutex);
9968 10054 status = EFAULT;
9969 10055 mptsas_log(mpt, CE_WARN, "failed to copy out "
9970 10056 "sense data");
9971 10057 goto out;
9972 10058 }
9973 10059 }
9974 10060 }
9975 10061
9976 10062 if (data_size) {
9977 10063 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9978 10064 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
9979 10065 DDI_DMA_SYNC_FORCPU);
9980 10066 for (i = 0; i < data_size; i++) {
9981 10067 if (ddi_copyout((uint8_t *)(
9982 10068 data_dma_state.memp + i), data + i, 1,
9983 10069 mode)) {
9984 10070 mutex_enter(&mpt->m_mutex);
9985 10071 status = EFAULT;
9986 10072 mptsas_log(mpt, CE_WARN, "failed to "
9987 10073 "copy out the reply data");
9988 10074 goto out;
9989 10075 }
9990 10076 }
9991 10077 }
9992 10078 }
9993 10079 mutex_enter(&mpt->m_mutex);
9994 10080 out:
9995 10081 /*
9996 10082 * Put the reply frame back on the free queue, increment the free
9997 10083 * index, and write the new index to the free index register. But only
9998 10084 * if this reply is an ADDRESS reply.
9999 10085 */
10000 10086 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10001 10087 ddi_put32(mpt->m_acc_free_queue_hdl,
10002 10088 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10003 10089 cmd->cmd_rfm);
10004 10090 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10005 10091 DDI_DMA_SYNC_FORDEV);
10006 10092 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10007 10093 mpt->m_free_index = 0;
10008 10094 }
10009 10095 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10010 10096 mpt->m_free_index);
10011 10097 }
10012 10098 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10013 10099 mptsas_remove_cmd(mpt, cmd);
10014 10100 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10015 10101 }
10016 10102 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10017 10103 mptsas_return_to_pool(mpt, cmd);
10018 10104 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10019 10105 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10020 10106 DDI_SUCCESS) {
10021 10107 ddi_fm_service_impact(mpt->m_dip,
10022 10108 DDI_SERVICE_UNAFFECTED);
10023 10109 status = EFAULT;
10024 10110 }
10025 10111 mptsas_dma_free(&data_dma_state);
10026 10112 }
10027 10113 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10028 10114 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10029 10115 DDI_SUCCESS) {
10030 10116 ddi_fm_service_impact(mpt->m_dip,
10031 10117 DDI_SERVICE_UNAFFECTED);
10032 10118 status = EFAULT;
10033 10119 }
10034 10120 mptsas_dma_free(&dataout_dma_state);
10035 10121 }
10036 10122 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10037 10123 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10038 10124 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10039 10125 }
10040 10126 }
10041 10127 if (request_msg)
10042 10128 kmem_free(request_msg, request_size);
10043 10129
10044 10130 return (status);
10045 10131 }
10046 10132
10047 10133 static int
10048 10134 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10049 10135 {
10050 10136 /*
10051 10137 * If timeout is 0, set timeout to default of 60 seconds.
10052 10138 */
10053 10139 if (data->Timeout == 0) {
10054 10140 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10055 10141 }
10056 10142
10057 10143 if (((data->DataSize == 0) &&
10058 10144 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10059 10145 ((data->DataSize != 0) &&
10060 10146 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10061 10147 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10062 10148 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10063 10149 (data->DataOutSize != 0))))) {
10064 10150 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10065 10151 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10066 10152 } else {
10067 10153 data->DataOutSize = 0;
10068 10154 }
10069 10155 /*
10070 10156 * Send passthru request messages
10071 10157 */
10072 10158 return (mptsas_do_passthru(mpt,
10073 10159 (uint8_t *)((uintptr_t)data->PtrRequest),
10074 10160 (uint8_t *)((uintptr_t)data->PtrReply),
10075 10161 (uint8_t *)((uintptr_t)data->PtrData),
10076 10162 data->RequestSize, data->ReplySize,
10077 10163 data->DataSize, data->DataDirection,
10078 10164 (uint8_t *)((uintptr_t)data->PtrDataOut),
10079 10165 data->DataOutSize, data->Timeout, mode));
10080 10166 } else {
10081 10167 return (EINVAL);
10082 10168 }
10083 10169 }
10084 10170
10085 10171 static uint8_t
10086 10172 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10087 10173 {
10088 10174 uint8_t index;
10089 10175
10090 10176 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10091 10177 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10092 10178 return (index);
10093 10179 }
10094 10180 }
10095 10181
10096 10182 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10097 10183 }
10098 10184
10099 10185 static void
10100 10186 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10101 10187 {
10102 10188 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10103 10189 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10104 10190 struct scsi_pkt *pkt = cmd->cmd_pkt;
10105 10191 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10106 10192 uint32_t request_desc_low, i;
10107 10193
10108 10194 ASSERT(mutex_owned(&mpt->m_mutex));
10109 10195
10110 10196 /*
10111 10197 * Form the diag message depending on the post or release function.
10112 10198 */
10113 10199 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10114 10200 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10115 10201 (mpt->m_req_frame + (mpt->m_req_frame_size *
10116 10202 cmd->cmd_slot));
10117 10203 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10118 10204 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10119 10205 diag->function);
10120 10206 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10121 10207 diag->pBuffer->buffer_type);
10122 10208 ddi_put8(mpt->m_acc_req_frame_hdl,
10123 10209 &pDiag_post_msg->ExtendedType,
10124 10210 diag->pBuffer->extended_type);
10125 10211 ddi_put32(mpt->m_acc_req_frame_hdl,
10126 10212 &pDiag_post_msg->BufferLength,
10127 10213 diag->pBuffer->buffer_data.size);
10128 10214 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10129 10215 i++) {
10130 10216 ddi_put32(mpt->m_acc_req_frame_hdl,
10131 10217 &pDiag_post_msg->ProductSpecific[i],
10132 10218 diag->pBuffer->product_specific[i]);
10133 10219 }
10134 10220 ddi_put32(mpt->m_acc_req_frame_hdl,
10135 10221 &pDiag_post_msg->BufferAddress.Low,
10136 10222 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10137 10223 & 0xffffffffull));
10138 10224 ddi_put32(mpt->m_acc_req_frame_hdl,
10139 10225 &pDiag_post_msg->BufferAddress.High,
10140 10226 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10141 10227 >> 32));
10142 10228 } else {
10143 10229 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10144 10230 (mpt->m_req_frame + (mpt->m_req_frame_size *
10145 10231 cmd->cmd_slot));
10146 10232 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10147 10233 ddi_put8(mpt->m_acc_req_frame_hdl,
10148 10234 &pDiag_release_msg->Function, diag->function);
10149 10235 ddi_put8(mpt->m_acc_req_frame_hdl,
10150 10236 &pDiag_release_msg->BufferType,
10151 10237 diag->pBuffer->buffer_type);
10152 10238 }
10153 10239
10154 10240 /*
10155 10241 * Send the message
10156 10242 */
10157 10243 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10158 10244 DDI_DMA_SYNC_FORDEV);
10159 10245 request_desc_low = (cmd->cmd_slot << 16) +
10160 10246 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10161 10247 cmd->cmd_rfm = NULL;
10162 10248 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10163 10249 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10164 10250 DDI_SUCCESS) ||
10165 10251 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10166 10252 DDI_SUCCESS)) {
10167 10253 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10168 10254 }
10169 10255 }
10170 10256
10171 10257 static int
10172 10258 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10173 10259 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10174 10260 {
10175 10261 mptsas_diag_request_t diag;
10176 10262 int status, slot_num, post_flags = 0;
10177 10263 mptsas_cmd_t *cmd = NULL;
10178 10264 struct scsi_pkt *pkt;
10179 10265 pMpi2DiagBufferPostReply_t reply;
10180 10266 uint16_t iocstatus;
10181 10267 uint32_t iocloginfo, transfer_length;
10182 10268
10183 10269 /*
10184 10270 * If buffer is not enabled, just leave.
10185 10271 */
10186 10272 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10187 10273 if (!pBuffer->enabled) {
10188 10274 status = DDI_FAILURE;
10189 10275 goto out;
10190 10276 }
10191 10277
10192 10278 /*
10193 10279 * Clear some flags initially.
10194 10280 */
10195 10281 pBuffer->force_release = FALSE;
10196 10282 pBuffer->valid_data = FALSE;
10197 10283 pBuffer->owned_by_firmware = FALSE;
10198 10284
10199 10285 /*
10200 10286 * Get a cmd buffer from the cmd buffer pool
10201 10287 */
10202 10288 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10203 10289 status = DDI_FAILURE;
10204 10290 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10205 10291 goto out;
10206 10292 }
10207 10293 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10208 10294
10209 10295 bzero((caddr_t)cmd, sizeof (*cmd));
10210 10296 bzero((caddr_t)pkt, scsi_pkt_size());
10211 10297
10212 10298 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10213 10299
10214 10300 diag.pBuffer = pBuffer;
10215 10301 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10216 10302
10217 10303 /*
10218 10304 * Form a blank cmd/pkt to store the acknowledgement message
10219 10305 */
10220 10306 pkt->pkt_ha_private = (opaque_t)&diag;
10221 10307 pkt->pkt_flags = FLAG_HEAD;
10222 10308 pkt->pkt_time = 60;
10223 10309 cmd->cmd_pkt = pkt;
10224 10310 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10225 10311
10226 10312 /*
10227 10313 * Save the command in a slot
10228 10314 */
10229 10315 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10230 10316 /*
10231 10317 * Once passthru command get slot, set cmd_flags
10232 10318 * CFLAG_PREPARED.
10233 10319 */
10234 10320 cmd->cmd_flags |= CFLAG_PREPARED;
10235 10321 mptsas_start_diag(mpt, cmd);
10236 10322 } else {
10237 10323 mptsas_waitq_add(mpt, cmd);
10238 10324 }
10239 10325
10240 10326 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10241 10327 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10242 10328 }
10243 10329
10244 10330 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10245 10331 status = DDI_FAILURE;
10246 10332 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10247 10333 goto out;
10248 10334 }
10249 10335
10250 10336 /*
10251 10337 * cmd_rfm points to the reply message if a reply was given. Check the
10252 10338 * IOCStatus to make sure everything went OK with the FW diag request
10253 10339 * and set buffer flags.
10254 10340 */
10255 10341 if (cmd->cmd_rfm) {
10256 10342 post_flags |= MPTSAS_ADDRESS_REPLY;
10257 10343 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10258 10344 DDI_DMA_SYNC_FORCPU);
10259 10345 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10260 10346 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10261 10347
10262 10348 /*
10263 10349 * Get the reply message data
10264 10350 */
10265 10351 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10266 10352 &reply->IOCStatus);
10267 10353 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10268 10354 &reply->IOCLogInfo);
10269 10355 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10270 10356 &reply->TransferLength);
10271 10357
10272 10358 /*
10273 10359 * If post failed quit.
10274 10360 */
10275 10361 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10276 10362 status = DDI_FAILURE;
10277 10363 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10278 10364 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10279 10365 iocloginfo, transfer_length));
10280 10366 goto out;
10281 10367 }
10282 10368
10283 10369 /*
10284 10370 * Post was successful.
10285 10371 */
10286 10372 pBuffer->valid_data = TRUE;
10287 10373 pBuffer->owned_by_firmware = TRUE;
10288 10374 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10289 10375 status = DDI_SUCCESS;
10290 10376 }
10291 10377
10292 10378 out:
10293 10379 /*
10294 10380 * Put the reply frame back on the free queue, increment the free
10295 10381 * index, and write the new index to the free index register. But only
10296 10382 * if this reply is an ADDRESS reply.
10297 10383 */
10298 10384 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10299 10385 ddi_put32(mpt->m_acc_free_queue_hdl,
10300 10386 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10301 10387 cmd->cmd_rfm);
10302 10388 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10303 10389 DDI_DMA_SYNC_FORDEV);
10304 10390 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10305 10391 mpt->m_free_index = 0;
10306 10392 }
10307 10393 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10308 10394 mpt->m_free_index);
10309 10395 }
10310 10396 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10311 10397 mptsas_remove_cmd(mpt, cmd);
10312 10398 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10313 10399 }
10314 10400 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10315 10401 mptsas_return_to_pool(mpt, cmd);
10316 10402 }
10317 10403
10318 10404 return (status);
10319 10405 }
10320 10406
10321 10407 static int
10322 10408 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10323 10409 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10324 10410 uint32_t diag_type)
10325 10411 {
10326 10412 mptsas_diag_request_t diag;
10327 10413 int status, slot_num, rel_flags = 0;
10328 10414 mptsas_cmd_t *cmd = NULL;
10329 10415 struct scsi_pkt *pkt;
10330 10416 pMpi2DiagReleaseReply_t reply;
10331 10417 uint16_t iocstatus;
10332 10418 uint32_t iocloginfo;
10333 10419
10334 10420 /*
10335 10421 * If buffer is not enabled, just leave.
10336 10422 */
10337 10423 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10338 10424 if (!pBuffer->enabled) {
10339 10425 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10340 10426 "by the IOC");
10341 10427 status = DDI_FAILURE;
10342 10428 goto out;
10343 10429 }
10344 10430
10345 10431 /*
10346 10432 * Clear some flags initially.
10347 10433 */
10348 10434 pBuffer->force_release = FALSE;
10349 10435 pBuffer->valid_data = FALSE;
10350 10436 pBuffer->owned_by_firmware = FALSE;
10351 10437
10352 10438 /*
10353 10439 * Get a cmd buffer from the cmd buffer pool
10354 10440 */
10355 10441 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10356 10442 status = DDI_FAILURE;
10357 10443 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10358 10444 "Diag");
10359 10445 goto out;
10360 10446 }
10361 10447 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10362 10448
10363 10449 bzero((caddr_t)cmd, sizeof (*cmd));
10364 10450 bzero((caddr_t)pkt, scsi_pkt_size());
10365 10451
10366 10452 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10367 10453
10368 10454 diag.pBuffer = pBuffer;
10369 10455 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10370 10456
10371 10457 /*
10372 10458 * Form a blank cmd/pkt to store the acknowledgement message
10373 10459 */
10374 10460 pkt->pkt_ha_private = (opaque_t)&diag;
10375 10461 pkt->pkt_flags = FLAG_HEAD;
10376 10462 pkt->pkt_time = 60;
10377 10463 cmd->cmd_pkt = pkt;
10378 10464 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10379 10465
10380 10466 /*
10381 10467 * Save the command in a slot
10382 10468 */
10383 10469 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10384 10470 /*
10385 10471 * Once passthru command get slot, set cmd_flags
10386 10472 * CFLAG_PREPARED.
10387 10473 */
10388 10474 cmd->cmd_flags |= CFLAG_PREPARED;
10389 10475 mptsas_start_diag(mpt, cmd);
10390 10476 } else {
10391 10477 mptsas_waitq_add(mpt, cmd);
10392 10478 }
10393 10479
10394 10480 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10395 10481 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10396 10482 }
10397 10483
10398 10484 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10399 10485 status = DDI_FAILURE;
10400 10486 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10401 10487 goto out;
10402 10488 }
10403 10489
10404 10490 /*
10405 10491 * cmd_rfm points to the reply message if a reply was given. Check the
10406 10492 * IOCStatus to make sure everything went OK with the FW diag request
10407 10493 * and set buffer flags.
10408 10494 */
10409 10495 if (cmd->cmd_rfm) {
10410 10496 rel_flags |= MPTSAS_ADDRESS_REPLY;
10411 10497 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10412 10498 DDI_DMA_SYNC_FORCPU);
10413 10499 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10414 10500 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10415 10501
10416 10502 /*
10417 10503 * Get the reply message data
10418 10504 */
10419 10505 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10420 10506 &reply->IOCStatus);
10421 10507 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10422 10508 &reply->IOCLogInfo);
10423 10509
10424 10510 /*
10425 10511 * If release failed quit.
10426 10512 */
10427 10513 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10428 10514 pBuffer->owned_by_firmware) {
10429 10515 status = DDI_FAILURE;
10430 10516 NDBG13(("release FW Diag Buffer failed: "
10431 10517 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10432 10518 iocloginfo));
10433 10519 goto out;
10434 10520 }
10435 10521
10436 10522 /*
10437 10523 * Release was successful.
10438 10524 */
10439 10525 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10440 10526 status = DDI_SUCCESS;
10441 10527
10442 10528 /*
10443 10529 * If this was for an UNREGISTER diag type command, clear the
10444 10530 * unique ID.
10445 10531 */
10446 10532 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10447 10533 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10448 10534 }
10449 10535 }
10450 10536
10451 10537 out:
10452 10538 /*
10453 10539 * Put the reply frame back on the free queue, increment the free
10454 10540 * index, and write the new index to the free index register. But only
10455 10541 * if this reply is an ADDRESS reply.
10456 10542 */
10457 10543 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10458 10544 ddi_put32(mpt->m_acc_free_queue_hdl,
10459 10545 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10460 10546 cmd->cmd_rfm);
10461 10547 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10462 10548 DDI_DMA_SYNC_FORDEV);
10463 10549 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10464 10550 mpt->m_free_index = 0;
10465 10551 }
10466 10552 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10467 10553 mpt->m_free_index);
10468 10554 }
10469 10555 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10470 10556 mptsas_remove_cmd(mpt, cmd);
10471 10557 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10472 10558 }
10473 10559 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10474 10560 mptsas_return_to_pool(mpt, cmd);
10475 10561 }
10476 10562
10477 10563 return (status);
10478 10564 }
10479 10565
10480 10566 static int
10481 10567 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10482 10568 uint32_t *return_code)
10483 10569 {
10484 10570 mptsas_fw_diagnostic_buffer_t *pBuffer;
10485 10571 uint8_t extended_type, buffer_type, i;
10486 10572 uint32_t buffer_size;
10487 10573 uint32_t unique_id;
10488 10574 int status;
10489 10575
10490 10576 ASSERT(mutex_owned(&mpt->m_mutex));
10491 10577
10492 10578 extended_type = diag_register->ExtendedType;
10493 10579 buffer_type = diag_register->BufferType;
10494 10580 buffer_size = diag_register->RequestedBufferSize;
10495 10581 unique_id = diag_register->UniqueId;
10496 10582
10497 10583 /*
10498 10584 * Check for valid buffer type
10499 10585 */
10500 10586 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10501 10587 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10502 10588 return (DDI_FAILURE);
10503 10589 }
10504 10590
10505 10591 /*
10506 10592 * Get the current buffer and look up the unique ID. The unique ID
10507 10593 * should not be found. If it is, the ID is already in use.
10508 10594 */
10509 10595 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10510 10596 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10511 10597 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10512 10598 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10513 10599 return (DDI_FAILURE);
10514 10600 }
10515 10601
10516 10602 /*
10517 10603 * The buffer's unique ID should not be registered yet, and the given
10518 10604 * unique ID cannot be 0.
10519 10605 */
10520 10606 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10521 10607 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10522 10608 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10523 10609 return (DDI_FAILURE);
10524 10610 }
10525 10611
10526 10612 /*
10527 10613 * If this buffer is already posted as immediate, just change owner.
10528 10614 */
10529 10615 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10530 10616 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10531 10617 pBuffer->immediate = FALSE;
10532 10618 pBuffer->unique_id = unique_id;
10533 10619 return (DDI_SUCCESS);
10534 10620 }
10535 10621
10536 10622 /*
10537 10623 * Post a new buffer after checking if it's enabled. The DMA buffer
10538 10624 * that is allocated will be contiguous (sgl_len = 1).
10539 10625 */
10540 10626 if (!pBuffer->enabled) {
10541 10627 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10542 10628 return (DDI_FAILURE);
10543 10629 }
10544 10630 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10545 10631 pBuffer->buffer_data.size = buffer_size;
10546 10632 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10547 10633 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10548 10634 "diag buffer: size = %d bytes", buffer_size);
10549 10635 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10550 10636 return (DDI_FAILURE);
10551 10637 }
10552 10638
10553 10639 /*
10554 10640 * Copy the given info to the diag buffer and post the buffer.
10555 10641 */
10556 10642 pBuffer->buffer_type = buffer_type;
10557 10643 pBuffer->immediate = FALSE;
10558 10644 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10559 10645 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10560 10646 i++) {
10561 10647 pBuffer->product_specific[i] =
10562 10648 diag_register->ProductSpecific[i];
10563 10649 }
10564 10650 }
10565 10651 pBuffer->extended_type = extended_type;
10566 10652 pBuffer->unique_id = unique_id;
10567 10653 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10568 10654
10569 10655 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10570 10656 DDI_SUCCESS) {
10571 10657 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10572 10658 "mptsas_diag_register.");
10573 10659 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10574 10660 status = DDI_FAILURE;
10575 10661 }
10576 10662
10577 10663 /*
10578 10664 * In case there was a failure, free the DMA buffer.
10579 10665 */
10580 10666 if (status == DDI_FAILURE) {
10581 10667 mptsas_dma_free(&pBuffer->buffer_data);
10582 10668 }
10583 10669
10584 10670 return (status);
10585 10671 }
10586 10672
10587 10673 static int
10588 10674 mptsas_diag_unregister(mptsas_t *mpt,
10589 10675 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10590 10676 {
10591 10677 mptsas_fw_diagnostic_buffer_t *pBuffer;
10592 10678 uint8_t i;
10593 10679 uint32_t unique_id;
10594 10680 int status;
10595 10681
10596 10682 ASSERT(mutex_owned(&mpt->m_mutex));
10597 10683
10598 10684 unique_id = diag_unregister->UniqueId;
10599 10685
10600 10686 /*
10601 10687 * Get the current buffer and look up the unique ID. The unique ID
10602 10688 * should be there.
10603 10689 */
10604 10690 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10605 10691 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10606 10692 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10607 10693 return (DDI_FAILURE);
10608 10694 }
10609 10695
10610 10696 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10611 10697
10612 10698 /*
10613 10699 * Try to release the buffer from FW before freeing it. If release
10614 10700 * fails, don't free the DMA buffer in case FW tries to access it
10615 10701 * later. If buffer is not owned by firmware, can't release it.
10616 10702 */
10617 10703 if (!pBuffer->owned_by_firmware) {
10618 10704 status = DDI_SUCCESS;
10619 10705 } else {
10620 10706 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10621 10707 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10622 10708 }
10623 10709
10624 10710 /*
10625 10711 * At this point, return the current status no matter what happens with
10626 10712 * the DMA buffer.
10627 10713 */
10628 10714 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10629 10715 if (status == DDI_SUCCESS) {
10630 10716 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10631 10717 DDI_SUCCESS) {
10632 10718 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10633 10719 "in mptsas_diag_unregister.");
10634 10720 ddi_fm_service_impact(mpt->m_dip,
10635 10721 DDI_SERVICE_UNAFFECTED);
10636 10722 }
10637 10723 mptsas_dma_free(&pBuffer->buffer_data);
10638 10724 }
10639 10725
10640 10726 return (status);
10641 10727 }
10642 10728
10643 10729 static int
10644 10730 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10645 10731 uint32_t *return_code)
10646 10732 {
10647 10733 mptsas_fw_diagnostic_buffer_t *pBuffer;
10648 10734 uint8_t i;
10649 10735 uint32_t unique_id;
10650 10736
10651 10737 ASSERT(mutex_owned(&mpt->m_mutex));
10652 10738
10653 10739 unique_id = diag_query->UniqueId;
10654 10740
10655 10741 /*
10656 10742 * If ID is valid, query on ID.
10657 10743 * If ID is invalid, query on buffer type.
10658 10744 */
10659 10745 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10660 10746 i = diag_query->BufferType;
10661 10747 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10662 10748 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10663 10749 return (DDI_FAILURE);
10664 10750 }
10665 10751 } else {
10666 10752 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10667 10753 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10668 10754 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10669 10755 return (DDI_FAILURE);
10670 10756 }
10671 10757 }
10672 10758
10673 10759 /*
10674 10760 * Fill query structure with the diag buffer info.
10675 10761 */
10676 10762 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10677 10763 diag_query->BufferType = pBuffer->buffer_type;
10678 10764 diag_query->ExtendedType = pBuffer->extended_type;
10679 10765 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10680 10766 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10681 10767 i++) {
10682 10768 diag_query->ProductSpecific[i] =
10683 10769 pBuffer->product_specific[i];
10684 10770 }
10685 10771 }
10686 10772 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10687 10773 diag_query->DriverAddedBufferSize = 0;
10688 10774 diag_query->UniqueId = pBuffer->unique_id;
10689 10775 diag_query->ApplicationFlags = 0;
10690 10776 diag_query->DiagnosticFlags = 0;
10691 10777
10692 10778 /*
10693 10779 * Set/Clear application flags
10694 10780 */
10695 10781 if (pBuffer->immediate) {
10696 10782 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10697 10783 } else {
10698 10784 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10699 10785 }
10700 10786 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10701 10787 diag_query->ApplicationFlags |=
10702 10788 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10703 10789 } else {
10704 10790 diag_query->ApplicationFlags &=
10705 10791 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10706 10792 }
10707 10793 if (pBuffer->owned_by_firmware) {
10708 10794 diag_query->ApplicationFlags |=
10709 10795 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10710 10796 } else {
10711 10797 diag_query->ApplicationFlags &=
10712 10798 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10713 10799 }
10714 10800
10715 10801 return (DDI_SUCCESS);
10716 10802 }
10717 10803
10718 10804 static int
10719 10805 mptsas_diag_read_buffer(mptsas_t *mpt,
10720 10806 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10721 10807 uint32_t *return_code, int ioctl_mode)
10722 10808 {
10723 10809 mptsas_fw_diagnostic_buffer_t *pBuffer;
10724 10810 uint8_t i, *pData;
10725 10811 uint32_t unique_id, byte;
10726 10812 int status;
10727 10813
10728 10814 ASSERT(mutex_owned(&mpt->m_mutex));
10729 10815
10730 10816 unique_id = diag_read_buffer->UniqueId;
10731 10817
10732 10818 /*
10733 10819 * Get the current buffer and look up the unique ID. The unique ID
10734 10820 * should be there.
10735 10821 */
10736 10822 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10737 10823 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10738 10824 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10739 10825 return (DDI_FAILURE);
10740 10826 }
10741 10827
10742 10828 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10743 10829
10744 10830 /*
10745 10831 * Make sure requested read is within limits
10746 10832 */
10747 10833 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10748 10834 pBuffer->buffer_data.size) {
10749 10835 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10750 10836 return (DDI_FAILURE);
10751 10837 }
10752 10838
10753 10839 /*
10754 10840 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10755 10841 * buffer that was allocated is one contiguous buffer.
10756 10842 */
10757 10843 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10758 10844 diag_read_buffer->StartingOffset);
10759 10845 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10760 10846 DDI_DMA_SYNC_FORCPU);
10761 10847 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10762 10848 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10763 10849 != 0) {
10764 10850 return (DDI_FAILURE);
10765 10851 }
10766 10852 }
10767 10853 diag_read_buffer->Status = 0;
10768 10854
10769 10855 /*
10770 10856 * Set or clear the Force Release flag.
10771 10857 */
10772 10858 if (pBuffer->force_release) {
10773 10859 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10774 10860 } else {
10775 10861 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10776 10862 }
10777 10863
10778 10864 /*
10779 10865 * If buffer is to be reregistered, make sure it's not already owned by
10780 10866 * firmware first.
10781 10867 */
10782 10868 status = DDI_SUCCESS;
10783 10869 if (!pBuffer->owned_by_firmware) {
10784 10870 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10785 10871 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10786 10872 return_code);
10787 10873 }
10788 10874 }
10789 10875
10790 10876 return (status);
10791 10877 }
10792 10878
10793 10879 static int
10794 10880 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10795 10881 uint32_t *return_code)
10796 10882 {
10797 10883 mptsas_fw_diagnostic_buffer_t *pBuffer;
10798 10884 uint8_t i;
10799 10885 uint32_t unique_id;
10800 10886 int status;
10801 10887
10802 10888 ASSERT(mutex_owned(&mpt->m_mutex));
10803 10889
10804 10890 unique_id = diag_release->UniqueId;
10805 10891
10806 10892 /*
10807 10893 * Get the current buffer and look up the unique ID. The unique ID
10808 10894 * should be there.
10809 10895 */
10810 10896 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10811 10897 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10812 10898 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10813 10899 return (DDI_FAILURE);
10814 10900 }
10815 10901
10816 10902 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10817 10903
10818 10904 /*
10819 10905 * If buffer is not owned by firmware, it's already been released.
10820 10906 */
10821 10907 if (!pBuffer->owned_by_firmware) {
10822 10908 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10823 10909 return (DDI_FAILURE);
10824 10910 }
10825 10911
10826 10912 /*
10827 10913 * Release the buffer.
10828 10914 */
10829 10915 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10830 10916 MPTSAS_FW_DIAG_TYPE_RELEASE);
10831 10917 return (status);
10832 10918 }
10833 10919
10834 10920 static int
10835 10921 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10836 10922 uint32_t length, uint32_t *return_code, int ioctl_mode)
10837 10923 {
10838 10924 mptsas_fw_diag_register_t diag_register;
10839 10925 mptsas_fw_diag_unregister_t diag_unregister;
10840 10926 mptsas_fw_diag_query_t diag_query;
10841 10927 mptsas_diag_read_buffer_t diag_read_buffer;
10842 10928 mptsas_fw_diag_release_t diag_release;
10843 10929 int status = DDI_SUCCESS;
10844 10930 uint32_t original_return_code, read_buf_len;
10845 10931
10846 10932 ASSERT(mutex_owned(&mpt->m_mutex));
10847 10933
10848 10934 original_return_code = *return_code;
10849 10935 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10850 10936
10851 10937 switch (action) {
10852 10938 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10853 10939 if (!length) {
10854 10940 *return_code =
10855 10941 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10856 10942 status = DDI_FAILURE;
10857 10943 break;
10858 10944 }
10859 10945 if (ddi_copyin(diag_action, &diag_register,
10860 10946 sizeof (diag_register), ioctl_mode) != 0) {
10861 10947 return (DDI_FAILURE);
10862 10948 }
10863 10949 status = mptsas_diag_register(mpt, &diag_register,
10864 10950 return_code);
10865 10951 break;
10866 10952
10867 10953 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10868 10954 if (length < sizeof (diag_unregister)) {
10869 10955 *return_code =
10870 10956 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10871 10957 status = DDI_FAILURE;
10872 10958 break;
10873 10959 }
10874 10960 if (ddi_copyin(diag_action, &diag_unregister,
10875 10961 sizeof (diag_unregister), ioctl_mode) != 0) {
10876 10962 return (DDI_FAILURE);
10877 10963 }
10878 10964 status = mptsas_diag_unregister(mpt, &diag_unregister,
10879 10965 return_code);
10880 10966 break;
10881 10967
10882 10968 case MPTSAS_FW_DIAG_TYPE_QUERY:
10883 10969 if (length < sizeof (diag_query)) {
10884 10970 *return_code =
10885 10971 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10886 10972 status = DDI_FAILURE;
10887 10973 break;
10888 10974 }
10889 10975 if (ddi_copyin(diag_action, &diag_query,
10890 10976 sizeof (diag_query), ioctl_mode) != 0) {
10891 10977 return (DDI_FAILURE);
10892 10978 }
10893 10979 status = mptsas_diag_query(mpt, &diag_query,
10894 10980 return_code);
10895 10981 if (status == DDI_SUCCESS) {
10896 10982 if (ddi_copyout(&diag_query, diag_action,
10897 10983 sizeof (diag_query), ioctl_mode) != 0) {
10898 10984 return (DDI_FAILURE);
10899 10985 }
10900 10986 }
10901 10987 break;
10902 10988
10903 10989 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10904 10990 if (ddi_copyin(diag_action, &diag_read_buffer,
10905 10991 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10906 10992 return (DDI_FAILURE);
10907 10993 }
10908 10994 read_buf_len = sizeof (diag_read_buffer) -
10909 10995 sizeof (diag_read_buffer.DataBuffer) +
10910 10996 diag_read_buffer.BytesToRead;
10911 10997 if (length < read_buf_len) {
10912 10998 *return_code =
10913 10999 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10914 11000 status = DDI_FAILURE;
10915 11001 break;
10916 11002 }
10917 11003 status = mptsas_diag_read_buffer(mpt,
10918 11004 &diag_read_buffer, diag_action +
10919 11005 sizeof (diag_read_buffer) - 4, return_code,
10920 11006 ioctl_mode);
10921 11007 if (status == DDI_SUCCESS) {
10922 11008 if (ddi_copyout(&diag_read_buffer, diag_action,
10923 11009 sizeof (diag_read_buffer) - 4, ioctl_mode)
10924 11010 != 0) {
10925 11011 return (DDI_FAILURE);
10926 11012 }
10927 11013 }
10928 11014 break;
10929 11015
10930 11016 case MPTSAS_FW_DIAG_TYPE_RELEASE:
10931 11017 if (length < sizeof (diag_release)) {
10932 11018 *return_code =
10933 11019 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10934 11020 status = DDI_FAILURE;
10935 11021 break;
10936 11022 }
10937 11023 if (ddi_copyin(diag_action, &diag_release,
10938 11024 sizeof (diag_release), ioctl_mode) != 0) {
10939 11025 return (DDI_FAILURE);
10940 11026 }
10941 11027 status = mptsas_diag_release(mpt, &diag_release,
10942 11028 return_code);
10943 11029 break;
10944 11030
10945 11031 default:
10946 11032 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10947 11033 status = DDI_FAILURE;
10948 11034 break;
10949 11035 }
10950 11036
10951 11037 if ((status == DDI_FAILURE) &&
10952 11038 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
10953 11039 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
10954 11040 status = DDI_SUCCESS;
10955 11041 }
10956 11042
10957 11043 return (status);
10958 11044 }
10959 11045
10960 11046 static int
10961 11047 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
10962 11048 {
10963 11049 int status;
10964 11050 mptsas_diag_action_t driver_data;
10965 11051
10966 11052 ASSERT(mutex_owned(&mpt->m_mutex));
10967 11053
10968 11054 /*
10969 11055 * Copy the user data to a driver data buffer.
10970 11056 */
10971 11057 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
10972 11058 mode) == 0) {
10973 11059 /*
10974 11060 * Send diag action request if Action is valid
10975 11061 */
10976 11062 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
10977 11063 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
10978 11064 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
10979 11065 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
10980 11066 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
10981 11067 status = mptsas_do_diag_action(mpt, driver_data.Action,
10982 11068 (void *)(uintptr_t)driver_data.PtrDiagAction,
10983 11069 driver_data.Length, &driver_data.ReturnCode,
10984 11070 mode);
10985 11071 if (status == DDI_SUCCESS) {
10986 11072 if (ddi_copyout(&driver_data.ReturnCode,
10987 11073 &user_data->ReturnCode,
10988 11074 sizeof (user_data->ReturnCode), mode)
10989 11075 != 0) {
10990 11076 status = EFAULT;
10991 11077 } else {
10992 11078 status = 0;
10993 11079 }
10994 11080 } else {
10995 11081 status = EIO;
10996 11082 }
10997 11083 } else {
10998 11084 status = EINVAL;
10999 11085 }
11000 11086 } else {
11001 11087 status = EFAULT;
11002 11088 }
11003 11089
11004 11090 return (status);
11005 11091 }
11006 11092
11007 11093 /*
11008 11094 * This routine handles the "event query" ioctl.
11009 11095 */
11010 11096 static int
11011 11097 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11012 11098 int *rval)
11013 11099 {
11014 11100 int status;
11015 11101 mptsas_event_query_t driverdata;
11016 11102 uint8_t i;
11017 11103
11018 11104 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11019 11105
11020 11106 mutex_enter(&mpt->m_mutex);
11021 11107 for (i = 0; i < 4; i++) {
11022 11108 driverdata.Types[i] = mpt->m_event_mask[i];
11023 11109 }
11024 11110 mutex_exit(&mpt->m_mutex);
11025 11111
11026 11112 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11027 11113 status = EFAULT;
11028 11114 } else {
11029 11115 *rval = MPTIOCTL_STATUS_GOOD;
11030 11116 status = 0;
11031 11117 }
11032 11118
11033 11119 return (status);
11034 11120 }
11035 11121
11036 11122 /*
11037 11123 * This routine handles the "event enable" ioctl.
11038 11124 */
11039 11125 static int
11040 11126 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11041 11127 int *rval)
11042 11128 {
11043 11129 int status;
11044 11130 mptsas_event_enable_t driverdata;
11045 11131 uint8_t i;
11046 11132
11047 11133 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11048 11134 mutex_enter(&mpt->m_mutex);
11049 11135 for (i = 0; i < 4; i++) {
11050 11136 mpt->m_event_mask[i] = driverdata.Types[i];
11051 11137 }
11052 11138 mutex_exit(&mpt->m_mutex);
11053 11139
11054 11140 *rval = MPTIOCTL_STATUS_GOOD;
11055 11141 status = 0;
11056 11142 } else {
11057 11143 status = EFAULT;
11058 11144 }
11059 11145 return (status);
11060 11146 }
11061 11147
11062 11148 /*
11063 11149 * This routine handles the "event report" ioctl.
11064 11150 */
11065 11151 static int
11066 11152 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11067 11153 int *rval)
11068 11154 {
11069 11155 int status;
11070 11156 mptsas_event_report_t driverdata;
11071 11157
11072 11158 mutex_enter(&mpt->m_mutex);
11073 11159
11074 11160 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11075 11161 mode) == 0) {
11076 11162 if (driverdata.Size >= sizeof (mpt->m_events)) {
11077 11163 if (ddi_copyout(mpt->m_events, data->Events,
11078 11164 sizeof (mpt->m_events), mode) != 0) {
11079 11165 status = EFAULT;
11080 11166 } else {
11081 11167 if (driverdata.Size > sizeof (mpt->m_events)) {
11082 11168 driverdata.Size =
11083 11169 sizeof (mpt->m_events);
11084 11170 if (ddi_copyout(&driverdata.Size,
11085 11171 &data->Size,
11086 11172 sizeof (driverdata.Size),
11087 11173 mode) != 0) {
11088 11174 status = EFAULT;
11089 11175 } else {
11090 11176 *rval = MPTIOCTL_STATUS_GOOD;
11091 11177 status = 0;
11092 11178 }
11093 11179 } else {
11094 11180 *rval = MPTIOCTL_STATUS_GOOD;
11095 11181 status = 0;
11096 11182 }
11097 11183 }
11098 11184 } else {
11099 11185 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11100 11186 status = 0;
11101 11187 }
11102 11188 } else {
11103 11189 status = EFAULT;
11104 11190 }
11105 11191
11106 11192 mutex_exit(&mpt->m_mutex);
11107 11193 return (status);
11108 11194 }
11109 11195
11110 11196 static void
11111 11197 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11112 11198 {
11113 11199 int *reg_data;
11114 11200 uint_t reglen;
11115 11201
11116 11202 /*
11117 11203 * Lookup the 'reg' property and extract the other data
11118 11204 */
11119 11205 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11120 11206 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11121 11207 DDI_PROP_SUCCESS) {
11122 11208 /*
11123 11209 * Extract the PCI data from the 'reg' property first DWORD.
11124 11210 * The entry looks like the following:
11125 11211 * First DWORD:
11126 11212 * Bits 0 - 7 8-bit Register number
11127 11213 * Bits 8 - 10 3-bit Function number
11128 11214 * Bits 11 - 15 5-bit Device number
11129 11215 * Bits 16 - 23 8-bit Bus number
11130 11216 * Bits 24 - 25 2-bit Address Space type identifier
11131 11217 *
11132 11218 */
11133 11219 adapter_data->PciInformation.u.bits.BusNumber =
11134 11220 (reg_data[0] & 0x00FF0000) >> 16;
11135 11221 adapter_data->PciInformation.u.bits.DeviceNumber =
11136 11222 (reg_data[0] & 0x0000F800) >> 11;
11137 11223 adapter_data->PciInformation.u.bits.FunctionNumber =
11138 11224 (reg_data[0] & 0x00000700) >> 8;
11139 11225 ddi_prop_free((void *)reg_data);
11140 11226 } else {
11141 11227 /*
11142 11228 * If we can't determine the PCI data then we fill in FF's for
11143 11229 * the data to indicate this.
11144 11230 */
11145 11231 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11146 11232 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11147 11233 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11148 11234 }
11149 11235
11150 11236 /*
11151 11237 * Saved in the mpt->m_fwversion
11152 11238 */
11153 11239 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11154 11240 }
11155 11241
11156 11242 static void
11157 11243 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11158 11244 {
11159 11245 char *driver_verstr = MPTSAS_MOD_STRING;
11160 11246
11161 11247 mptsas_lookup_pci_data(mpt, adapter_data);
11162 11248 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11163 11249 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11164 11250 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11165 11251 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11166 11252 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11167 11253 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11168 11254 adapter_data->BiosVersion = 0;
11169 11255 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11170 11256 }
11171 11257
11172 11258 static void
11173 11259 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11174 11260 {
11175 11261 int *reg_data, i;
11176 11262 uint_t reglen;
11177 11263
11178 11264 /*
11179 11265 * Lookup the 'reg' property and extract the other data
11180 11266 */
11181 11267 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11182 11268 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11183 11269 DDI_PROP_SUCCESS) {
11184 11270 /*
11185 11271 * Extract the PCI data from the 'reg' property first DWORD.
11186 11272 * The entry looks like the following:
11187 11273 * First DWORD:
11188 11274 * Bits 8 - 10 3-bit Function number
11189 11275 * Bits 11 - 15 5-bit Device number
11190 11276 * Bits 16 - 23 8-bit Bus number
11191 11277 */
11192 11278 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11193 11279 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11194 11280 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11195 11281 ddi_prop_free((void *)reg_data);
11196 11282 } else {
11197 11283 /*
11198 11284 * If we can't determine the PCI info then we fill in FF's for
11199 11285 * the data to indicate this.
11200 11286 */
11201 11287 pci_info->BusNumber = 0xFFFFFFFF;
11202 11288 pci_info->DeviceNumber = 0xFF;
11203 11289 pci_info->FunctionNumber = 0xFF;
11204 11290 }
11205 11291
11206 11292 /*
11207 11293 * Now get the interrupt vector and the pci header. The vector can
11208 11294 * only be 0 right now. The header is the first 256 bytes of config
11209 11295 * space.
11210 11296 */
11211 11297 pci_info->InterruptVector = 0;
11212 11298 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11213 11299 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11214 11300 i);
11215 11301 }
11216 11302 }
11217 11303
11218 11304 static int
11219 11305 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11220 11306 {
11221 11307 int status = 0;
11222 11308 mptsas_reg_access_t driverdata;
11223 11309
11224 11310 mutex_enter(&mpt->m_mutex);
11225 11311 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11226 11312 switch (driverdata.Command) {
11227 11313 /*
11228 11314 * IO access is not supported.
11229 11315 */
11230 11316 case REG_IO_READ:
11231 11317 case REG_IO_WRITE:
11232 11318 mptsas_log(mpt, CE_WARN, "IO access is not "
11233 11319 "supported. Use memory access.");
11234 11320 status = EINVAL;
11235 11321 break;
11236 11322
11237 11323 case REG_MEM_READ:
11238 11324 driverdata.RegData = ddi_get32(mpt->m_datap,
11239 11325 (uint32_t *)(void *)mpt->m_reg +
11240 11326 driverdata.RegOffset);
11241 11327 if (ddi_copyout(&driverdata.RegData,
11242 11328 &data->RegData,
11243 11329 sizeof (driverdata.RegData), mode) != 0) {
11244 11330 mptsas_log(mpt, CE_WARN, "Register "
11245 11331 "Read Failed");
11246 11332 status = EFAULT;
11247 11333 }
11248 11334 break;
11249 11335
11250 11336 case REG_MEM_WRITE:
11251 11337 ddi_put32(mpt->m_datap,
11252 11338 (uint32_t *)(void *)mpt->m_reg +
11253 11339 driverdata.RegOffset,
11254 11340 driverdata.RegData);
11255 11341 break;
11256 11342
11257 11343 default:
11258 11344 status = EINVAL;
11259 11345 break;
11260 11346 }
11261 11347 } else {
11262 11348 status = EFAULT;
11263 11349 }
11264 11350
11265 11351 mutex_exit(&mpt->m_mutex);
11266 11352 return (status);
11267 11353 }
11268 11354
11269 11355 static int
11270 11356 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11271 11357 int *rval)
11272 11358 {
11273 11359 int status = 0;
11274 11360 mptsas_t *mpt;
11275 11361 mptsas_update_flash_t flashdata;
11276 11362 mptsas_pass_thru_t passthru_data;
11277 11363 mptsas_adapter_data_t adapter_data;
11278 11364 mptsas_pci_info_t pci_info;
11279 11365 int copylen;
11280 11366
11281 11367 int iport_flag = 0;
11282 11368 dev_info_t *dip = NULL;
11283 11369 mptsas_phymask_t phymask = 0;
11284 11370 struct devctl_iocdata *dcp = NULL;
11285 11371 uint32_t slotstatus = 0;
11286 11372 char *addr = NULL;
11287 11373 mptsas_target_t *ptgt = NULL;
11288 11374
11289 11375 *rval = MPTIOCTL_STATUS_GOOD;
11290 11376 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11291 11377 return (EPERM);
11292 11378 }
11293 11379
11294 11380 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11295 11381 if (mpt == NULL) {
11296 11382 /*
11297 11383 * Called from iport node, get the states
11298 11384 */
11299 11385 iport_flag = 1;
11300 11386 dip = mptsas_get_dip_from_dev(dev, &phymask);
11301 11387 if (dip == NULL) {
11302 11388 return (ENXIO);
11303 11389 }
11304 11390 mpt = DIP2MPT(dip);
11305 11391 }
11306 11392 /* Make sure power level is D0 before accessing registers */
11307 11393 mutex_enter(&mpt->m_mutex);
11308 11394 if (mpt->m_options & MPTSAS_OPT_PM) {
11309 11395 (void) pm_busy_component(mpt->m_dip, 0);
11310 11396 if (mpt->m_power_level != PM_LEVEL_D0) {
11311 11397 mutex_exit(&mpt->m_mutex);
11312 11398 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11313 11399 DDI_SUCCESS) {
11314 11400 mptsas_log(mpt, CE_WARN,
11315 11401 "mptsas%d: mptsas_ioctl: Raise power "
11316 11402 "request failed.", mpt->m_instance);
11317 11403 (void) pm_idle_component(mpt->m_dip, 0);
11318 11404 return (ENXIO);
|
↓ open down ↓ |
1879 lines elided |
↑ open up ↑ |
11319 11405 }
11320 11406 } else {
11321 11407 mutex_exit(&mpt->m_mutex);
11322 11408 }
11323 11409 } else {
11324 11410 mutex_exit(&mpt->m_mutex);
11325 11411 }
11326 11412
11327 11413 if (iport_flag) {
11328 11414 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11329 - if (status != 0) {
11330 - goto out;
11331 - }
11332 - /*
11333 - * The following code control the OK2RM LED, it doesn't affect
11334 - * the ioctl return status.
11335 - */
11336 - if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11337 - (cmd == DEVCTL_DEVICE_OFFLINE)) {
11338 - if (ndi_dc_allochdl((void *)data, &dcp) !=
11339 - NDI_SUCCESS) {
11340 - goto out;
11341 - }
11342 - addr = ndi_dc_getaddr(dcp);
11343 - ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11344 - if (ptgt == NULL) {
11345 - NDBG14(("mptsas_ioctl led control: tgt %s not "
11346 - "found", addr));
11347 - ndi_dc_freehdl(dcp);
11348 - goto out;
11349 - }
11350 - mutex_enter(&mpt->m_mutex);
11351 - if (cmd == DEVCTL_DEVICE_ONLINE) {
11352 - ptgt->m_tgt_unconfigured = 0;
11353 - } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11354 - ptgt->m_tgt_unconfigured = 1;
11355 - }
11356 - slotstatus = 0;
11357 -#ifdef MPTSAS_GET_LED
11358 - /*
11359 - * The get led status can't get a valid/reasonable
11360 - * state, so ignore the get led status, and write the
11361 - * required value directly
11362 - */
11363 - if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
11364 - DDI_SUCCESS) {
11365 - NDBG14(("mptsas_ioctl: get LED for tgt %s "
11366 - "failed %x", addr, slotstatus));
11367 - slotstatus = 0;
11368 - }
11369 - NDBG14(("mptsas_ioctl: LED status %x for %s",
11370 - slotstatus, addr));
11371 -#endif
11372 - if (cmd == DEVCTL_DEVICE_OFFLINE) {
11373 - slotstatus |=
11374 - MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11375 - } else {
11376 - slotstatus &=
11377 - ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11378 - }
11379 - if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
11380 - DDI_SUCCESS) {
11381 - NDBG14(("mptsas_ioctl: set LED for tgt %s "
11382 - "failed %x", addr, slotstatus));
11383 - }
11384 - mutex_exit(&mpt->m_mutex);
11385 - ndi_dc_freehdl(dcp);
11386 - }
11387 11415 goto out;
11388 11416 }
11389 11417 switch (cmd) {
11390 11418 case MPTIOCTL_UPDATE_FLASH:
11391 11419 if (ddi_copyin((void *)data, &flashdata,
11392 11420 sizeof (struct mptsas_update_flash), mode)) {
11393 11421 status = EFAULT;
11394 11422 break;
11395 11423 }
11396 11424
11397 11425 mutex_enter(&mpt->m_mutex);
11398 11426 if (mptsas_update_flash(mpt,
11399 11427 (caddr_t)(long)flashdata.PtrBuffer,
11400 11428 flashdata.ImageSize, flashdata.ImageType, mode)) {
11401 11429 status = EFAULT;
11402 11430 }
11403 11431
11404 11432 /*
11405 11433 * Reset the chip to start using the new
11406 11434 * firmware. Reset if failed also.
11407 11435 */
11408 11436 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11409 11437 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11410 11438 status = EFAULT;
11411 11439 }
11412 11440 mutex_exit(&mpt->m_mutex);
11413 11441 break;
11414 11442 case MPTIOCTL_PASS_THRU:
11415 11443 /*
11416 11444 * The user has requested to pass through a command to
11417 11445 * be executed by the MPT firmware. Call our routine
11418 11446 * which does this. Only allow one passthru IOCTL at
11419 11447 * one time. Other threads will block on
11420 11448 * m_passthru_mutex, which is of adaptive variant.
11421 11449 */
11422 11450 if (ddi_copyin((void *)data, &passthru_data,
11423 11451 sizeof (mptsas_pass_thru_t), mode)) {
11424 11452 status = EFAULT;
11425 11453 break;
11426 11454 }
11427 11455 mutex_enter(&mpt->m_passthru_mutex);
11428 11456 mutex_enter(&mpt->m_mutex);
11429 11457 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11430 11458 mutex_exit(&mpt->m_mutex);
11431 11459 mutex_exit(&mpt->m_passthru_mutex);
11432 11460
11433 11461 break;
11434 11462 case MPTIOCTL_GET_ADAPTER_DATA:
11435 11463 /*
11436 11464 * The user has requested to read adapter data. Call
11437 11465 * our routine which does this.
11438 11466 */
11439 11467 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11440 11468 if (ddi_copyin((void *)data, (void *)&adapter_data,
11441 11469 sizeof (mptsas_adapter_data_t), mode)) {
11442 11470 status = EFAULT;
11443 11471 break;
11444 11472 }
11445 11473 if (adapter_data.StructureLength >=
11446 11474 sizeof (mptsas_adapter_data_t)) {
11447 11475 adapter_data.StructureLength = (uint32_t)
11448 11476 sizeof (mptsas_adapter_data_t);
11449 11477 copylen = sizeof (mptsas_adapter_data_t);
11450 11478 mutex_enter(&mpt->m_mutex);
11451 11479 mptsas_read_adapter_data(mpt, &adapter_data);
11452 11480 mutex_exit(&mpt->m_mutex);
11453 11481 } else {
11454 11482 adapter_data.StructureLength = (uint32_t)
11455 11483 sizeof (mptsas_adapter_data_t);
11456 11484 copylen = sizeof (adapter_data.StructureLength);
11457 11485 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11458 11486 }
11459 11487 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11460 11488 copylen, mode) != 0) {
11461 11489 status = EFAULT;
11462 11490 }
11463 11491 break;
11464 11492 case MPTIOCTL_GET_PCI_INFO:
11465 11493 /*
11466 11494 * The user has requested to read pci info. Call
11467 11495 * our routine which does this.
11468 11496 */
11469 11497 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11470 11498 mutex_enter(&mpt->m_mutex);
11471 11499 mptsas_read_pci_info(mpt, &pci_info);
11472 11500 mutex_exit(&mpt->m_mutex);
11473 11501 if (ddi_copyout((void *)(&pci_info), (void *)data,
11474 11502 sizeof (mptsas_pci_info_t), mode) != 0) {
11475 11503 status = EFAULT;
11476 11504 }
11477 11505 break;
11478 11506 case MPTIOCTL_RESET_ADAPTER:
11479 11507 mutex_enter(&mpt->m_mutex);
11480 11508 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11481 11509 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11482 11510 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11483 11511 "failed");
11484 11512 status = EFAULT;
11485 11513 }
11486 11514 mutex_exit(&mpt->m_mutex);
11487 11515 break;
11488 11516 case MPTIOCTL_DIAG_ACTION:
11489 11517 /*
11490 11518 * The user has done a diag buffer action. Call our
11491 11519 * routine which does this. Only allow one diag action
11492 11520 * at one time.
11493 11521 */
11494 11522 mutex_enter(&mpt->m_mutex);
11495 11523 if (mpt->m_diag_action_in_progress) {
11496 11524 mutex_exit(&mpt->m_mutex);
11497 11525 status = EBUSY;
11498 11526 goto out;
11499 11527 }
11500 11528 mpt->m_diag_action_in_progress = 1;
11501 11529 status = mptsas_diag_action(mpt,
11502 11530 (mptsas_diag_action_t *)data, mode);
11503 11531 mpt->m_diag_action_in_progress = 0;
11504 11532 mutex_exit(&mpt->m_mutex);
11505 11533 break;
11506 11534 case MPTIOCTL_EVENT_QUERY:
11507 11535 /*
11508 11536 * The user has done an event query. Call our routine
11509 11537 * which does this.
11510 11538 */
11511 11539 status = mptsas_event_query(mpt,
11512 11540 (mptsas_event_query_t *)data, mode, rval);
11513 11541 break;
11514 11542 case MPTIOCTL_EVENT_ENABLE:
11515 11543 /*
11516 11544 * The user has done an event enable. Call our routine
11517 11545 * which does this.
11518 11546 */
11519 11547 status = mptsas_event_enable(mpt,
11520 11548 (mptsas_event_enable_t *)data, mode, rval);
11521 11549 break;
11522 11550 case MPTIOCTL_EVENT_REPORT:
11523 11551 /*
11524 11552 * The user has done an event report. Call our routine
11525 11553 * which does this.
11526 11554 */
11527 11555 status = mptsas_event_report(mpt,
11528 11556 (mptsas_event_report_t *)data, mode, rval);
11529 11557 break;
11530 11558 case MPTIOCTL_REG_ACCESS:
11531 11559 /*
11532 11560 * The user has requested register access. Call our
11533 11561 * routine which does this.
11534 11562 */
11535 11563 status = mptsas_reg_access(mpt,
11536 11564 (mptsas_reg_access_t *)data, mode);
11537 11565 break;
11538 11566 default:
11539 11567 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11540 11568 rval);
11541 11569 break;
11542 11570 }
11543 11571
11544 11572 out:
11545 11573 return (status);
11546 11574 }
11547 11575
11548 11576 int
11549 11577 mptsas_restart_ioc(mptsas_t *mpt)
11550 11578 {
11551 11579 int rval = DDI_SUCCESS;
11552 11580 mptsas_target_t *ptgt = NULL;
11553 11581
11554 11582 ASSERT(mutex_owned(&mpt->m_mutex));
11555 11583
11556 11584 /*
11557 11585 * Set a flag telling I/O path that we're processing a reset. This is
11558 11586 * needed because after the reset is complete, the hash table still
11559 11587 * needs to be rebuilt. If I/Os are started before the hash table is
11560 11588 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11561 11589 * so that they can be retried.
11562 11590 */
11563 11591 mpt->m_in_reset = TRUE;
11564 11592
11565 11593 /*
11566 11594 * Set all throttles to HOLD
11567 11595 */
11568 11596 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11569 11597 MPTSAS_HASH_FIRST);
11570 11598 while (ptgt != NULL) {
11571 11599 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11572 11600
11573 11601 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11574 11602 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11575 11603 }
11576 11604
11577 11605 /*
11578 11606 * Disable interrupts
11579 11607 */
11580 11608 MPTSAS_DISABLE_INTR(mpt);
11581 11609
11582 11610 /*
11583 11611 * Abort all commands: outstanding commands, commands in waitq and
11584 11612 * tx_waitq.
11585 11613 */
11586 11614 mptsas_flush_hba(mpt);
11587 11615
11588 11616 /*
11589 11617 * Reinitialize the chip.
11590 11618 */
11591 11619 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11592 11620 rval = DDI_FAILURE;
11593 11621 }
11594 11622
11595 11623 /*
11596 11624 * Enable interrupts again
11597 11625 */
11598 11626 MPTSAS_ENABLE_INTR(mpt);
11599 11627
11600 11628 /*
11601 11629 * If mptsas_init_chip was successful, update the driver data.
11602 11630 */
11603 11631 if (rval == DDI_SUCCESS) {
11604 11632 mptsas_update_driver_data(mpt);
11605 11633 }
11606 11634
11607 11635 /*
11608 11636 * Reset the throttles
11609 11637 */
11610 11638 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11611 11639 MPTSAS_HASH_FIRST);
11612 11640 while (ptgt != NULL) {
11613 11641 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11614 11642
11615 11643 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11616 11644 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11617 11645 }
11618 11646
11619 11647 mptsas_doneq_empty(mpt);
11620 11648 mptsas_restart_hba(mpt);
11621 11649
11622 11650 if (rval != DDI_SUCCESS) {
11623 11651 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11624 11652 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11625 11653 }
11626 11654
11627 11655 /*
11628 11656 * Clear the reset flag so that I/Os can continue.
11629 11657 */
11630 11658 mpt->m_in_reset = FALSE;
11631 11659
11632 11660 return (rval);
11633 11661 }
11634 11662
11635 11663 static int
11636 11664 mptsas_init_chip(mptsas_t *mpt, int first_time)
11637 11665 {
11638 11666 ddi_dma_cookie_t cookie;
11639 11667 uint32_t i;
11640 11668 int rval;
11641 11669
11642 11670 /*
11643 11671 * Check to see if the firmware image is valid
11644 11672 */
11645 11673 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11646 11674 MPI2_DIAG_FLASH_BAD_SIG) {
11647 11675 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11648 11676 goto fail;
11649 11677 }
11650 11678
11651 11679 /*
11652 11680 * Reset the chip
11653 11681 */
11654 11682 rval = mptsas_ioc_reset(mpt, first_time);
11655 11683 if (rval == MPTSAS_RESET_FAIL) {
11656 11684 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11657 11685 goto fail;
11658 11686 }
11659 11687
11660 11688 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11661 11689 goto mur;
11662 11690 }
11663 11691 /*
11664 11692 * Setup configuration space
11665 11693 */
11666 11694 if (mptsas_config_space_init(mpt) == FALSE) {
11667 11695 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11668 11696 "failed!");
11669 11697 goto fail;
11670 11698 }
11671 11699
11672 11700 /*
11673 11701 * IOC facts can change after a diag reset so all buffers that are
11674 11702 * based on these numbers must be de-allocated and re-allocated. Get
11675 11703 * new IOC facts each time chip is initialized.
11676 11704 */
11677 11705 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11678 11706 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11679 11707 goto fail;
11680 11708 }
11681 11709
11682 11710 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11683 11711 goto fail;
11684 11712 }
11685 11713 /*
11686 11714 * Allocate request message frames, reply free queue, reply descriptor
11687 11715 * post queue, and reply message frames using latest IOC facts.
11688 11716 */
11689 11717 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11690 11718 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11691 11719 goto fail;
11692 11720 }
11693 11721 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11694 11722 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11695 11723 goto fail;
11696 11724 }
11697 11725 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11698 11726 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11699 11727 goto fail;
11700 11728 }
11701 11729 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11702 11730 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11703 11731 goto fail;
11704 11732 }
11705 11733
11706 11734 mur:
11707 11735 /*
11708 11736 * Re-Initialize ioc to operational state
11709 11737 */
11710 11738 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11711 11739 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11712 11740 goto fail;
11713 11741 }
11714 11742
11715 11743 mptsas_alloc_reply_args(mpt);
11716 11744
11717 11745 /*
11718 11746 * Initialize reply post index. Reply free index is initialized after
11719 11747 * the next loop.
11720 11748 */
11721 11749 mpt->m_post_index = 0;
11722 11750
11723 11751 /*
11724 11752 * Initialize the Reply Free Queue with the physical addresses of our
11725 11753 * reply frames.
11726 11754 */
11727 11755 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11728 11756 for (i = 0; i < mpt->m_max_replies; i++) {
11729 11757 ddi_put32(mpt->m_acc_free_queue_hdl,
11730 11758 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11731 11759 cookie.dmac_address);
11732 11760 cookie.dmac_address += mpt->m_reply_frame_size;
11733 11761 }
11734 11762 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11735 11763 DDI_DMA_SYNC_FORDEV);
11736 11764
11737 11765 /*
11738 11766 * Initialize the reply free index to one past the last frame on the
11739 11767 * queue. This will signify that the queue is empty to start with.
11740 11768 */
11741 11769 mpt->m_free_index = i;
11742 11770 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11743 11771
11744 11772 /*
11745 11773 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11746 11774 */
11747 11775 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11748 11776 ddi_put64(mpt->m_acc_post_queue_hdl,
11749 11777 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11750 11778 0xFFFFFFFFFFFFFFFF);
11751 11779 }
11752 11780 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11753 11781 DDI_DMA_SYNC_FORDEV);
11754 11782
11755 11783 /*
11756 11784 * Enable ports
11757 11785 */
11758 11786 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11759 11787 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11760 11788 goto fail;
11761 11789 }
11762 11790
11763 11791 /*
11764 11792 * enable events
11765 11793 */
11766 11794 if (mptsas_ioc_enable_event_notification(mpt)) {
11767 11795 goto fail;
11768 11796 }
11769 11797
11770 11798 /*
11771 11799 * We need checks in attach and these.
11772 11800 * chip_init is called in mult. places
11773 11801 */
11774 11802
11775 11803 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11776 11804 DDI_SUCCESS) ||
11777 11805 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11778 11806 DDI_SUCCESS) ||
11779 11807 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11780 11808 DDI_SUCCESS) ||
11781 11809 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11782 11810 DDI_SUCCESS) ||
11783 11811 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11784 11812 DDI_SUCCESS)) {
11785 11813 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11786 11814 goto fail;
11787 11815 }
11788 11816
11789 11817 /* Check all acc handles */
11790 11818 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11791 11819 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11792 11820 DDI_SUCCESS) ||
11793 11821 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11794 11822 DDI_SUCCESS) ||
11795 11823 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11796 11824 DDI_SUCCESS) ||
11797 11825 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11798 11826 DDI_SUCCESS) ||
11799 11827 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11800 11828 DDI_SUCCESS) ||
11801 11829 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11802 11830 DDI_SUCCESS)) {
11803 11831 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11804 11832 goto fail;
11805 11833 }
11806 11834
11807 11835 return (DDI_SUCCESS);
11808 11836
11809 11837 fail:
11810 11838 return (DDI_FAILURE);
11811 11839 }
11812 11840
11813 11841 static int
11814 11842 mptsas_get_pci_cap(mptsas_t *mpt)
11815 11843 {
11816 11844 ushort_t caps_ptr, cap, cap_count;
11817 11845
11818 11846 if (mpt->m_config_handle == NULL)
11819 11847 return (FALSE);
11820 11848 /*
11821 11849 * Check if capabilities list is supported and if so,
11822 11850 * get initial capabilities pointer and clear bits 0,1.
11823 11851 */
11824 11852 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
11825 11853 & PCI_STAT_CAP) {
11826 11854 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11827 11855 PCI_CONF_CAP_PTR), 4);
11828 11856 } else {
11829 11857 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
11830 11858 }
11831 11859
11832 11860 /*
11833 11861 * Walk capabilities if supported.
11834 11862 */
11835 11863 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
11836 11864
11837 11865 /*
11838 11866 * Check that we haven't exceeded the maximum number of
11839 11867 * capabilities and that the pointer is in a valid range.
11840 11868 */
11841 11869 if (++cap_count > 48) {
11842 11870 mptsas_log(mpt, CE_WARN,
11843 11871 "too many device capabilities.\n");
11844 11872 break;
11845 11873 }
11846 11874 if (caps_ptr < 64) {
11847 11875 mptsas_log(mpt, CE_WARN,
11848 11876 "capabilities pointer 0x%x out of range.\n",
11849 11877 caps_ptr);
11850 11878 break;
11851 11879 }
11852 11880
11853 11881 /*
11854 11882 * Get next capability and check that it is valid.
11855 11883 * For now, we only support power management.
11856 11884 */
11857 11885 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
11858 11886 switch (cap) {
11859 11887 case PCI_CAP_ID_PM:
11860 11888 mptsas_log(mpt, CE_NOTE,
11861 11889 "?mptsas%d supports power management.\n",
11862 11890 mpt->m_instance);
11863 11891 mpt->m_options |= MPTSAS_OPT_PM;
11864 11892
11865 11893 /* Save PMCSR offset */
11866 11894 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
11867 11895 break;
11868 11896 /*
11869 11897 * The following capabilities are valid. Any others
11870 11898 * will cause a message to be logged.
11871 11899 */
11872 11900 case PCI_CAP_ID_VPD:
11873 11901 case PCI_CAP_ID_MSI:
11874 11902 case PCI_CAP_ID_PCIX:
11875 11903 case PCI_CAP_ID_PCI_E:
11876 11904 case PCI_CAP_ID_MSI_X:
11877 11905 break;
11878 11906 default:
11879 11907 mptsas_log(mpt, CE_NOTE,
11880 11908 "?mptsas%d unrecognized capability "
11881 11909 "0x%x.\n", mpt->m_instance, cap);
11882 11910 break;
11883 11911 }
11884 11912
11885 11913 /*
11886 11914 * Get next capabilities pointer and clear bits 0,1.
11887 11915 */
11888 11916 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11889 11917 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
11890 11918 }
11891 11919 return (TRUE);
11892 11920 }
11893 11921
11894 11922 static int
11895 11923 mptsas_init_pm(mptsas_t *mpt)
11896 11924 {
11897 11925 char pmc_name[16];
11898 11926 char *pmc[] = {
11899 11927 NULL,
11900 11928 "0=Off (PCI D3 State)",
11901 11929 "3=On (PCI D0 State)",
11902 11930 NULL
11903 11931 };
11904 11932 uint16_t pmcsr_stat;
11905 11933
11906 11934 if (mptsas_get_pci_cap(mpt) == FALSE) {
11907 11935 return (DDI_FAILURE);
11908 11936 }
11909 11937 /*
11910 11938 * If PCI's capability does not support PM, then don't need
11911 11939 * to registe the pm-components
11912 11940 */
11913 11941 if (!(mpt->m_options & MPTSAS_OPT_PM))
11914 11942 return (DDI_SUCCESS);
11915 11943 /*
11916 11944 * If power management is supported by this chip, create
11917 11945 * pm-components property for the power management framework
11918 11946 */
11919 11947 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
11920 11948 pmc[0] = pmc_name;
11921 11949 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
11922 11950 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
11923 11951 mpt->m_options &= ~MPTSAS_OPT_PM;
11924 11952 mptsas_log(mpt, CE_WARN,
11925 11953 "mptsas%d: pm-component property creation failed.",
11926 11954 mpt->m_instance);
11927 11955 return (DDI_FAILURE);
11928 11956 }
11929 11957
11930 11958 /*
11931 11959 * Power on device.
11932 11960 */
11933 11961 (void) pm_busy_component(mpt->m_dip, 0);
11934 11962 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
11935 11963 mpt->m_pmcsr_offset);
11936 11964 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
11937 11965 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
11938 11966 mpt->m_instance);
11939 11967 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
11940 11968 PCI_PMCSR_D0);
11941 11969 }
11942 11970 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
11943 11971 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
11944 11972 return (DDI_FAILURE);
11945 11973 }
11946 11974 mpt->m_power_level = PM_LEVEL_D0;
11947 11975 /*
11948 11976 * Set pm idle delay.
11949 11977 */
11950 11978 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
11951 11979 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
11952 11980
11953 11981 return (DDI_SUCCESS);
11954 11982 }
11955 11983
11956 11984 static int
11957 11985 mptsas_register_intrs(mptsas_t *mpt)
11958 11986 {
11959 11987 dev_info_t *dip;
11960 11988 int intr_types;
11961 11989
11962 11990 dip = mpt->m_dip;
11963 11991
11964 11992 /* Get supported interrupt types */
11965 11993 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
11966 11994 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
11967 11995 "failed\n");
11968 11996 return (FALSE);
11969 11997 }
11970 11998
11971 11999 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
11972 12000
11973 12001 /*
11974 12002 * Try MSI, but fall back to FIXED
11975 12003 */
11976 12004 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
11977 12005 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
11978 12006 NDBG0(("Using MSI interrupt type"));
11979 12007 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
11980 12008 return (TRUE);
11981 12009 }
11982 12010 }
11983 12011 if (intr_types & DDI_INTR_TYPE_FIXED) {
11984 12012 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
11985 12013 NDBG0(("Using FIXED interrupt type"));
11986 12014 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
11987 12015 return (TRUE);
11988 12016 } else {
11989 12017 NDBG0(("FIXED interrupt registration failed"));
11990 12018 return (FALSE);
11991 12019 }
11992 12020 }
11993 12021
11994 12022 return (FALSE);
11995 12023 }
11996 12024
11997 12025 static void
11998 12026 mptsas_unregister_intrs(mptsas_t *mpt)
11999 12027 {
12000 12028 mptsas_rem_intrs(mpt);
12001 12029 }
12002 12030
12003 12031 /*
12004 12032 * mptsas_add_intrs:
12005 12033 *
12006 12034 * Register FIXED or MSI interrupts.
12007 12035 */
12008 12036 static int
12009 12037 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12010 12038 {
12011 12039 dev_info_t *dip = mpt->m_dip;
12012 12040 int avail, actual, count = 0;
12013 12041 int i, flag, ret;
12014 12042
12015 12043 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12016 12044
12017 12045 /* Get number of interrupts */
12018 12046 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12019 12047 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12020 12048 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12021 12049 "ret %d count %d\n", ret, count);
12022 12050
12023 12051 return (DDI_FAILURE);
12024 12052 }
12025 12053
12026 12054 /* Get number of available interrupts */
12027 12055 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12028 12056 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12029 12057 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12030 12058 "ret %d avail %d\n", ret, avail);
12031 12059
12032 12060 return (DDI_FAILURE);
12033 12061 }
12034 12062
12035 12063 if (avail < count) {
12036 12064 mptsas_log(mpt, CE_CONT, "!ddi_intr_get_nvail returned %d, "
12037 12065 "navail() returned %d", count, avail);
12038 12066 }
12039 12067
12040 12068 /* Mpt only have one interrupt routine */
12041 12069 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12042 12070 count = 1;
12043 12071 }
12044 12072
12045 12073 /* Allocate an array of interrupt handles */
12046 12074 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12047 12075 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12048 12076
12049 12077 flag = DDI_INTR_ALLOC_NORMAL;
12050 12078
12051 12079 /* call ddi_intr_alloc() */
12052 12080 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12053 12081 count, &actual, flag);
12054 12082
12055 12083 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12056 12084 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12057 12085 ret);
12058 12086 kmem_free(mpt->m_htable, mpt->m_intr_size);
12059 12087 return (DDI_FAILURE);
12060 12088 }
12061 12089
12062 12090 /* use interrupt count returned or abort? */
12063 12091 if (actual < count) {
12064 12092 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12065 12093 count, actual);
12066 12094 }
12067 12095
12068 12096 mpt->m_intr_cnt = actual;
12069 12097
12070 12098 /*
12071 12099 * Get priority for first msi, assume remaining are all the same
12072 12100 */
12073 12101 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12074 12102 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12075 12103 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12076 12104
12077 12105 /* Free already allocated intr */
12078 12106 for (i = 0; i < actual; i++) {
12079 12107 (void) ddi_intr_free(mpt->m_htable[i]);
12080 12108 }
12081 12109
12082 12110 kmem_free(mpt->m_htable, mpt->m_intr_size);
12083 12111 return (DDI_FAILURE);
12084 12112 }
12085 12113
12086 12114 /* Test for high level mutex */
12087 12115 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12088 12116 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12089 12117 "Hi level interrupt not supported\n");
12090 12118
12091 12119 /* Free already allocated intr */
12092 12120 for (i = 0; i < actual; i++) {
12093 12121 (void) ddi_intr_free(mpt->m_htable[i]);
12094 12122 }
12095 12123
12096 12124 kmem_free(mpt->m_htable, mpt->m_intr_size);
12097 12125 return (DDI_FAILURE);
12098 12126 }
12099 12127
12100 12128 /* Call ddi_intr_add_handler() */
12101 12129 for (i = 0; i < actual; i++) {
12102 12130 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12103 12131 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12104 12132 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12105 12133 "failed %d\n", ret);
12106 12134
12107 12135 /* Free already allocated intr */
12108 12136 for (i = 0; i < actual; i++) {
12109 12137 (void) ddi_intr_free(mpt->m_htable[i]);
12110 12138 }
12111 12139
12112 12140 kmem_free(mpt->m_htable, mpt->m_intr_size);
12113 12141 return (DDI_FAILURE);
12114 12142 }
12115 12143 }
12116 12144
12117 12145 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12118 12146 != DDI_SUCCESS) {
12119 12147 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12120 12148
12121 12149 /* Free already allocated intr */
12122 12150 for (i = 0; i < actual; i++) {
12123 12151 (void) ddi_intr_free(mpt->m_htable[i]);
12124 12152 }
12125 12153
12126 12154 kmem_free(mpt->m_htable, mpt->m_intr_size);
12127 12155 return (DDI_FAILURE);
12128 12156 }
12129 12157
12130 12158 /*
12131 12159 * Enable interrupts
12132 12160 */
12133 12161 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12134 12162 /* Call ddi_intr_block_enable() for MSI interrupts */
12135 12163 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12136 12164 } else {
12137 12165 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12138 12166 for (i = 0; i < mpt->m_intr_cnt; i++) {
12139 12167 (void) ddi_intr_enable(mpt->m_htable[i]);
12140 12168 }
12141 12169 }
12142 12170 return (DDI_SUCCESS);
12143 12171 }
12144 12172
12145 12173 /*
12146 12174 * mptsas_rem_intrs:
12147 12175 *
12148 12176 * Unregister FIXED or MSI interrupts
12149 12177 */
12150 12178 static void
12151 12179 mptsas_rem_intrs(mptsas_t *mpt)
12152 12180 {
12153 12181 int i;
12154 12182
12155 12183 NDBG6(("mptsas_rem_intrs"));
12156 12184
12157 12185 /* Disable all interrupts */
12158 12186 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12159 12187 /* Call ddi_intr_block_disable() */
12160 12188 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12161 12189 } else {
12162 12190 for (i = 0; i < mpt->m_intr_cnt; i++) {
12163 12191 (void) ddi_intr_disable(mpt->m_htable[i]);
12164 12192 }
12165 12193 }
12166 12194
12167 12195 /* Call ddi_intr_remove_handler() */
12168 12196 for (i = 0; i < mpt->m_intr_cnt; i++) {
12169 12197 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12170 12198 (void) ddi_intr_free(mpt->m_htable[i]);
12171 12199 }
12172 12200
12173 12201 kmem_free(mpt->m_htable, mpt->m_intr_size);
12174 12202 }
12175 12203
12176 12204 /*
12177 12205 * The IO fault service error handling callback function
12178 12206 */
12179 12207 /*ARGSUSED*/
12180 12208 static int
12181 12209 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12182 12210 {
12183 12211 /*
12184 12212 * as the driver can always deal with an error in any dma or
12185 12213 * access handle, we can just return the fme_status value.
12186 12214 */
12187 12215 pci_ereport_post(dip, err, NULL);
12188 12216 return (err->fme_status);
12189 12217 }
12190 12218
12191 12219 /*
12192 12220 * mptsas_fm_init - initialize fma capabilities and register with IO
12193 12221 * fault services.
12194 12222 */
12195 12223 static void
12196 12224 mptsas_fm_init(mptsas_t *mpt)
12197 12225 {
12198 12226 /*
12199 12227 * Need to change iblock to priority for new MSI intr
12200 12228 */
12201 12229 ddi_iblock_cookie_t fm_ibc;
12202 12230
12203 12231 /* Only register with IO Fault Services if we have some capability */
12204 12232 if (mpt->m_fm_capabilities) {
12205 12233 /* Adjust access and dma attributes for FMA */
12206 12234 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12207 12235 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12208 12236 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12209 12237
12210 12238 /*
12211 12239 * Register capabilities with IO Fault Services.
12212 12240 * mpt->m_fm_capabilities will be updated to indicate
12213 12241 * capabilities actually supported (not requested.)
12214 12242 */
12215 12243 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12216 12244
12217 12245 /*
12218 12246 * Initialize pci ereport capabilities if ereport
12219 12247 * capable (should always be.)
12220 12248 */
12221 12249 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12222 12250 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12223 12251 pci_ereport_setup(mpt->m_dip);
12224 12252 }
12225 12253
12226 12254 /*
12227 12255 * Register error callback if error callback capable.
12228 12256 */
12229 12257 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12230 12258 ddi_fm_handler_register(mpt->m_dip,
12231 12259 mptsas_fm_error_cb, (void *) mpt);
12232 12260 }
12233 12261 }
12234 12262 }
12235 12263
12236 12264 /*
12237 12265 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12238 12266 * fault services.
12239 12267 *
12240 12268 */
12241 12269 static void
12242 12270 mptsas_fm_fini(mptsas_t *mpt)
12243 12271 {
12244 12272 /* Only unregister FMA capabilities if registered */
12245 12273 if (mpt->m_fm_capabilities) {
12246 12274
12247 12275 /*
12248 12276 * Un-register error callback if error callback capable.
12249 12277 */
12250 12278
12251 12279 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12252 12280 ddi_fm_handler_unregister(mpt->m_dip);
12253 12281 }
12254 12282
12255 12283 /*
12256 12284 * Release any resources allocated by pci_ereport_setup()
12257 12285 */
12258 12286
12259 12287 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12260 12288 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12261 12289 pci_ereport_teardown(mpt->m_dip);
12262 12290 }
12263 12291
12264 12292 /* Unregister from IO Fault Services */
12265 12293 ddi_fm_fini(mpt->m_dip);
12266 12294
12267 12295 /* Adjust access and dma attributes for FMA */
12268 12296 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12269 12297 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12270 12298 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12271 12299
12272 12300 }
12273 12301 }
12274 12302
12275 12303 int
12276 12304 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12277 12305 {
12278 12306 ddi_fm_error_t de;
12279 12307
12280 12308 if (handle == NULL)
12281 12309 return (DDI_FAILURE);
12282 12310 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12283 12311 return (de.fme_status);
12284 12312 }
12285 12313
12286 12314 int
12287 12315 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12288 12316 {
12289 12317 ddi_fm_error_t de;
12290 12318
12291 12319 if (handle == NULL)
12292 12320 return (DDI_FAILURE);
12293 12321 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12294 12322 return (de.fme_status);
12295 12323 }
12296 12324
12297 12325 void
12298 12326 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12299 12327 {
12300 12328 uint64_t ena;
12301 12329 char buf[FM_MAX_CLASS];
12302 12330
12303 12331 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12304 12332 ena = fm_ena_generate(0, FM_ENA_FMT1);
12305 12333 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12306 12334 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12307 12335 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12308 12336 }
12309 12337 }
12310 12338
12311 12339 static int
12312 12340 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12313 12341 uint16_t *dev_handle, mptsas_target_t **pptgt)
12314 12342 {
12315 12343 int rval;
12316 12344 uint32_t dev_info;
12317 12345 uint64_t sas_wwn;
12318 12346 mptsas_phymask_t phymask;
12319 12347 uint8_t physport, phynum, config, disk;
12320 12348 mptsas_slots_t *slots = mpt->m_active;
12321 12349 uint64_t devicename;
12322 12350 uint16_t pdev_hdl;
12323 12351 mptsas_target_t *tmp_tgt = NULL;
12324 12352 uint16_t bay_num, enclosure;
12325 12353
12326 12354 ASSERT(*pptgt == NULL);
12327 12355
12328 12356 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12329 12357 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12330 12358 &bay_num, &enclosure);
12331 12359 if (rval != DDI_SUCCESS) {
12332 12360 rval = DEV_INFO_FAIL_PAGE0;
12333 12361 return (rval);
12334 12362 }
12335 12363
12336 12364 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12337 12365 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12338 12366 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12339 12367 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12340 12368 return (rval);
12341 12369 }
12342 12370
12343 12371 /*
12344 12372 * Check if the dev handle is for a Phys Disk. If so, set return value
12345 12373 * and exit. Don't add Phys Disks to hash.
12346 12374 */
12347 12375 for (config = 0; config < slots->m_num_raid_configs; config++) {
12348 12376 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12349 12377 if (*dev_handle == slots->m_raidconfig[config].
12350 12378 m_physdisk_devhdl[disk]) {
12351 12379 rval = DEV_INFO_PHYS_DISK;
12352 12380 return (rval);
12353 12381 }
12354 12382 }
12355 12383 }
12356 12384
12357 12385 /*
12358 12386 * Get SATA Device Name from SAS device page0 for
12359 12387 * sata device, if device name doesn't exist, set m_sas_wwn to
12360 12388 * 0 for direct attached SATA. For the device behind the expander
12361 12389 * we still can use STP address assigned by expander.
12362 12390 */
12363 12391 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12364 12392 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12365 12393 mutex_exit(&mpt->m_mutex);
12366 12394 /* alloc a tmp_tgt to send the cmd */
12367 12395 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12368 12396 KM_SLEEP);
12369 12397 tmp_tgt->m_devhdl = *dev_handle;
12370 12398 tmp_tgt->m_deviceinfo = dev_info;
12371 12399 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12372 12400 tmp_tgt->m_qfull_retry_interval =
12373 12401 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12374 12402 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12375 12403 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12376 12404 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12377 12405 mutex_enter(&mpt->m_mutex);
12378 12406 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12379 12407 sas_wwn = devicename;
12380 12408 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12381 12409 sas_wwn = 0;
12382 12410 }
12383 12411 }
12384 12412
12385 12413 phymask = mptsas_physport_to_phymask(mpt, physport);
12386 12414 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12387 12415 dev_info, phymask, phynum);
12388 12416 if (*pptgt == NULL) {
12389 12417 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12390 12418 "structure!");
12391 12419 rval = DEV_INFO_FAIL_ALLOC;
12392 12420 return (rval);
12393 12421 }
12394 12422 (*pptgt)->m_enclosure = enclosure;
12395 12423 (*pptgt)->m_slot_num = bay_num;
12396 12424 return (DEV_INFO_SUCCESS);
12397 12425 }
12398 12426
12399 12427 uint64_t
12400 12428 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12401 12429 {
12402 12430 uint64_t sata_guid = 0, *pwwn = NULL;
12403 12431 int target = ptgt->m_devhdl;
12404 12432 uchar_t *inq83 = NULL;
12405 12433 int inq83_len = 0xFF;
12406 12434 uchar_t *dblk = NULL;
12407 12435 int inq83_retry = 3;
12408 12436 int rval = DDI_FAILURE;
12409 12437
12410 12438 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12411 12439
12412 12440 inq83_retry:
12413 12441 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12414 12442 inq83_len, NULL, 1);
12415 12443 if (rval != DDI_SUCCESS) {
12416 12444 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12417 12445 "0x83 for target:%x, lun:%x failed!", target, lun);
12418 12446 goto out;
12419 12447 }
12420 12448 /* According to SAT2, the first descriptor is logic unit name */
12421 12449 dblk = &inq83[4];
12422 12450 if ((dblk[1] & 0x30) != 0) {
12423 12451 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12424 12452 goto out;
12425 12453 }
12426 12454 pwwn = (uint64_t *)(void *)(&dblk[4]);
12427 12455 if ((dblk[4] & 0xf0) == 0x50) {
12428 12456 sata_guid = BE_64(*pwwn);
12429 12457 goto out;
12430 12458 } else if (dblk[4] == 'A') {
12431 12459 NDBG20(("SATA drive has no NAA format GUID."));
12432 12460 goto out;
12433 12461 } else {
12434 12462 /* The data is not ready, wait and retry */
12435 12463 inq83_retry--;
12436 12464 if (inq83_retry <= 0) {
12437 12465 goto out;
12438 12466 }
12439 12467 NDBG20(("The GUID is not ready, retry..."));
12440 12468 delay(1 * drv_usectohz(1000000));
12441 12469 goto inq83_retry;
12442 12470 }
12443 12471 out:
12444 12472 kmem_free(inq83, inq83_len);
12445 12473 return (sata_guid);
12446 12474 }
12447 12475
12448 12476 static int
12449 12477 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12450 12478 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12451 12479 {
12452 12480 uchar_t cdb[CDB_GROUP0];
12453 12481 struct scsi_address ap;
12454 12482 struct buf *data_bp = NULL;
12455 12483 int resid = 0;
12456 12484 int ret = DDI_FAILURE;
12457 12485
12458 12486 ASSERT(len <= 0xffff);
12459 12487
12460 12488 ap.a_target = MPTSAS_INVALID_DEVHDL;
12461 12489 ap.a_lun = (uchar_t)(lun);
12462 12490 ap.a_hba_tran = mpt->m_tran;
12463 12491
12464 12492 data_bp = scsi_alloc_consistent_buf(&ap,
12465 12493 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12466 12494 if (data_bp == NULL) {
12467 12495 return (ret);
12468 12496 }
12469 12497 bzero(cdb, CDB_GROUP0);
12470 12498 cdb[0] = SCMD_INQUIRY;
12471 12499 cdb[1] = evpd;
12472 12500 cdb[2] = page;
12473 12501 cdb[3] = (len & 0xff00) >> 8;
12474 12502 cdb[4] = (len & 0x00ff);
12475 12503 cdb[5] = 0;
12476 12504
12477 12505 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12478 12506 &resid);
12479 12507 if (ret == DDI_SUCCESS) {
12480 12508 if (reallen) {
12481 12509 *reallen = len - resid;
12482 12510 }
12483 12511 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12484 12512 }
12485 12513 if (data_bp) {
12486 12514 scsi_free_consistent_buf(data_bp);
12487 12515 }
12488 12516 return (ret);
12489 12517 }
12490 12518
12491 12519 static int
12492 12520 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12493 12521 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12494 12522 int *resid)
12495 12523 {
12496 12524 struct scsi_pkt *pktp = NULL;
12497 12525 scsi_hba_tran_t *tran_clone = NULL;
12498 12526 mptsas_tgt_private_t *tgt_private = NULL;
12499 12527 int ret = DDI_FAILURE;
12500 12528
12501 12529 /*
12502 12530 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12503 12531 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12504 12532 * to simulate the cmds from sd
12505 12533 */
12506 12534 tran_clone = kmem_alloc(
12507 12535 sizeof (scsi_hba_tran_t), KM_SLEEP);
12508 12536 if (tran_clone == NULL) {
12509 12537 goto out;
12510 12538 }
12511 12539 bcopy((caddr_t)mpt->m_tran,
12512 12540 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12513 12541 tgt_private = kmem_alloc(
12514 12542 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12515 12543 if (tgt_private == NULL) {
12516 12544 goto out;
12517 12545 }
12518 12546 tgt_private->t_lun = ap->a_lun;
12519 12547 tgt_private->t_private = ptgt;
12520 12548 tran_clone->tran_tgt_private = tgt_private;
12521 12549 ap->a_hba_tran = tran_clone;
12522 12550
12523 12551 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12524 12552 data_bp, cdblen, sizeof (struct scsi_arq_status),
12525 12553 0, PKT_CONSISTENT, NULL, NULL);
12526 12554 if (pktp == NULL) {
12527 12555 goto out;
12528 12556 }
12529 12557 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12530 12558 pktp->pkt_flags = FLAG_NOPARITY;
12531 12559 if (scsi_poll(pktp) < 0) {
12532 12560 goto out;
12533 12561 }
12534 12562 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12535 12563 goto out;
12536 12564 }
12537 12565 if (resid != NULL) {
12538 12566 *resid = pktp->pkt_resid;
12539 12567 }
12540 12568
12541 12569 ret = DDI_SUCCESS;
12542 12570 out:
12543 12571 if (pktp) {
12544 12572 scsi_destroy_pkt(pktp);
12545 12573 }
12546 12574 if (tran_clone) {
12547 12575 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12548 12576 }
12549 12577 if (tgt_private) {
12550 12578 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12551 12579 }
12552 12580 return (ret);
12553 12581 }
12554 12582 static int
12555 12583 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12556 12584 {
12557 12585 char *cp = NULL;
12558 12586 char *ptr = NULL;
12559 12587 size_t s = 0;
12560 12588 char *wwid_str = NULL;
12561 12589 char *lun_str = NULL;
12562 12590 long lunnum;
12563 12591 long phyid = -1;
12564 12592 int rc = DDI_FAILURE;
12565 12593
12566 12594 ptr = name;
12567 12595 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12568 12596 ptr++;
12569 12597 if ((cp = strchr(ptr, ',')) == NULL) {
12570 12598 return (DDI_FAILURE);
12571 12599 }
12572 12600
12573 12601 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12574 12602 s = (uintptr_t)cp - (uintptr_t)ptr;
12575 12603
12576 12604 bcopy(ptr, wwid_str, s);
12577 12605 wwid_str[s] = '\0';
12578 12606
12579 12607 ptr = ++cp;
12580 12608
12581 12609 if ((cp = strchr(ptr, '\0')) == NULL) {
12582 12610 goto out;
12583 12611 }
12584 12612 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12585 12613 s = (uintptr_t)cp - (uintptr_t)ptr;
12586 12614
12587 12615 bcopy(ptr, lun_str, s);
12588 12616 lun_str[s] = '\0';
12589 12617
12590 12618 if (name[0] == 'p') {
12591 12619 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12592 12620 } else {
12593 12621 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12594 12622 }
12595 12623 if (rc != DDI_SUCCESS)
12596 12624 goto out;
12597 12625
12598 12626 if (phyid != -1) {
12599 12627 ASSERT(phyid < MPTSAS_MAX_PHYS);
12600 12628 *phy = (uint8_t)phyid;
12601 12629 }
12602 12630 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12603 12631 if (rc != 0)
12604 12632 goto out;
12605 12633
12606 12634 *lun = (int)lunnum;
12607 12635 rc = DDI_SUCCESS;
12608 12636 out:
12609 12637 if (wwid_str)
12610 12638 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12611 12639 if (lun_str)
12612 12640 kmem_free(lun_str, SCSI_MAXNAMELEN);
12613 12641
12614 12642 return (rc);
12615 12643 }
12616 12644
12617 12645 /*
12618 12646 * mptsas_parse_smp_name() is to parse sas wwn string
12619 12647 * which format is "wWWN"
12620 12648 */
12621 12649 static int
12622 12650 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12623 12651 {
12624 12652 char *ptr = name;
12625 12653
12626 12654 if (*ptr != 'w') {
12627 12655 return (DDI_FAILURE);
12628 12656 }
12629 12657
12630 12658 ptr++;
12631 12659 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12632 12660 return (DDI_FAILURE);
12633 12661 }
12634 12662 return (DDI_SUCCESS);
12635 12663 }
12636 12664
12637 12665 static int
12638 12666 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12639 12667 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12640 12668 {
12641 12669 int ret = NDI_FAILURE;
12642 12670 int circ = 0;
12643 12671 int circ1 = 0;
12644 12672 mptsas_t *mpt;
12645 12673 char *ptr = NULL;
12646 12674 char *devnm = NULL;
12647 12675 uint64_t wwid = 0;
12648 12676 uint8_t phy = 0xFF;
12649 12677 int lun = 0;
12650 12678 uint_t mflags = flag;
12651 12679 int bconfig = TRUE;
12652 12680
12653 12681 if (scsi_hba_iport_unit_address(pdip) == 0) {
12654 12682 return (DDI_FAILURE);
12655 12683 }
12656 12684
12657 12685 mpt = DIP2MPT(pdip);
12658 12686 if (!mpt) {
12659 12687 return (DDI_FAILURE);
12660 12688 }
12661 12689 /*
12662 12690 * Hold the nexus across the bus_config
12663 12691 */
12664 12692 ndi_devi_enter(scsi_vhci_dip, &circ);
12665 12693 ndi_devi_enter(pdip, &circ1);
12666 12694 switch (op) {
12667 12695 case BUS_CONFIG_ONE:
12668 12696 /* parse wwid/target name out of name given */
12669 12697 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12670 12698 ret = NDI_FAILURE;
12671 12699 break;
12672 12700 }
12673 12701 ptr++;
12674 12702 if (strncmp((char *)arg, "smp", 3) == 0) {
12675 12703 /*
12676 12704 * This is a SMP target device
12677 12705 */
12678 12706 ret = mptsas_parse_smp_name(ptr, &wwid);
12679 12707 if (ret != DDI_SUCCESS) {
12680 12708 ret = NDI_FAILURE;
12681 12709 break;
12682 12710 }
12683 12711 ret = mptsas_config_smp(pdip, wwid, childp);
12684 12712 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12685 12713 /*
12686 12714 * OBP could pass down a non-canonical form
12687 12715 * bootpath without LUN part when LUN is 0.
12688 12716 * So driver need adjust the string.
12689 12717 */
12690 12718 if (strchr(ptr, ',') == NULL) {
12691 12719 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12692 12720 (void) sprintf(devnm, "%s,0", (char *)arg);
12693 12721 ptr = strchr(devnm, '@');
12694 12722 ptr++;
12695 12723 }
12696 12724
12697 12725 /*
12698 12726 * The device path is wWWID format and the device
12699 12727 * is not SMP target device.
12700 12728 */
12701 12729 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12702 12730 if (ret != DDI_SUCCESS) {
12703 12731 ret = NDI_FAILURE;
12704 12732 break;
12705 12733 }
12706 12734 *childp = NULL;
12707 12735 if (ptr[0] == 'w') {
12708 12736 ret = mptsas_config_one_addr(pdip, wwid,
12709 12737 lun, childp);
12710 12738 } else if (ptr[0] == 'p') {
12711 12739 ret = mptsas_config_one_phy(pdip, phy, lun,
12712 12740 childp);
12713 12741 }
12714 12742
12715 12743 /*
12716 12744 * If this is CD/DVD device in OBP path, the
12717 12745 * ndi_busop_bus_config can be skipped as config one
12718 12746 * operation is done above.
12719 12747 */
12720 12748 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12721 12749 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12722 12750 (strncmp((char *)arg, "disk", 4) == 0)) {
12723 12751 bconfig = FALSE;
12724 12752 ndi_hold_devi(*childp);
12725 12753 }
12726 12754 } else {
12727 12755 ret = NDI_FAILURE;
12728 12756 break;
12729 12757 }
12730 12758
12731 12759 /*
12732 12760 * DDI group instructed us to use this flag.
12733 12761 */
12734 12762 mflags |= NDI_MDI_FALLBACK;
12735 12763 break;
12736 12764 case BUS_CONFIG_DRIVER:
12737 12765 case BUS_CONFIG_ALL:
12738 12766 mptsas_config_all(pdip);
12739 12767 ret = NDI_SUCCESS;
12740 12768 break;
12741 12769 }
12742 12770
12743 12771 if ((ret == NDI_SUCCESS) && bconfig) {
12744 12772 ret = ndi_busop_bus_config(pdip, mflags, op,
12745 12773 (devnm == NULL) ? arg : devnm, childp, 0);
12746 12774 }
12747 12775
12748 12776 ndi_devi_exit(pdip, circ1);
12749 12777 ndi_devi_exit(scsi_vhci_dip, circ);
12750 12778 if (devnm != NULL)
12751 12779 kmem_free(devnm, SCSI_MAXNAMELEN);
12752 12780 return (ret);
12753 12781 }
12754 12782
12755 12783 static int
12756 12784 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12757 12785 mptsas_target_t *ptgt)
12758 12786 {
12759 12787 int rval = DDI_FAILURE;
12760 12788 struct scsi_inquiry *sd_inq = NULL;
12761 12789 mptsas_t *mpt = DIP2MPT(pdip);
12762 12790
12763 12791 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12764 12792
12765 12793 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12766 12794 SUN_INQSIZE, 0, (uchar_t)0);
12767 12795
12768 12796 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12769 12797 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12770 12798 } else {
12771 12799 rval = DDI_FAILURE;
12772 12800 }
12773 12801
12774 12802 kmem_free(sd_inq, SUN_INQSIZE);
12775 12803 return (rval);
12776 12804 }
12777 12805
12778 12806 static int
12779 12807 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12780 12808 dev_info_t **lundip)
12781 12809 {
12782 12810 int rval;
12783 12811 mptsas_t *mpt = DIP2MPT(pdip);
12784 12812 int phymask;
12785 12813 mptsas_target_t *ptgt = NULL;
12786 12814
12787 12815 /*
12788 12816 * Get the physical port associated to the iport
12789 12817 */
12790 12818 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12791 12819 "phymask", 0);
12792 12820
12793 12821 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12794 12822 if (ptgt == NULL) {
12795 12823 /*
12796 12824 * didn't match any device by searching
12797 12825 */
12798 12826 return (DDI_FAILURE);
12799 12827 }
12800 12828 /*
12801 12829 * If the LUN already exists and the status is online,
12802 12830 * we just return the pointer to dev_info_t directly.
12803 12831 * For the mdi_pathinfo node, we'll handle it in
12804 12832 * mptsas_create_virt_lun()
12805 12833 * TODO should be also in mptsas_handle_dr
12806 12834 */
12807 12835
12808 12836 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12809 12837 if (*lundip != NULL) {
12810 12838 /*
12811 12839 * TODO Another senario is, we hotplug the same disk
12812 12840 * on the same slot, the devhdl changed, is this
12813 12841 * possible?
12814 12842 * tgt_private->t_private != ptgt
12815 12843 */
12816 12844 if (sasaddr != ptgt->m_sas_wwn) {
12817 12845 /*
12818 12846 * The device has changed although the devhdl is the
12819 12847 * same (Enclosure mapping mode, change drive on the
12820 12848 * same slot)
12821 12849 */
12822 12850 return (DDI_FAILURE);
12823 12851 }
12824 12852 return (DDI_SUCCESS);
12825 12853 }
12826 12854
12827 12855 if (phymask == 0) {
12828 12856 /*
12829 12857 * Configure IR volume
12830 12858 */
12831 12859 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
12832 12860 return (rval);
12833 12861 }
12834 12862 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12835 12863
12836 12864 return (rval);
12837 12865 }
12838 12866
12839 12867 static int
12840 12868 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
12841 12869 dev_info_t **lundip)
12842 12870 {
12843 12871 int rval;
12844 12872 mptsas_t *mpt = DIP2MPT(pdip);
12845 12873 int phymask;
12846 12874 mptsas_target_t *ptgt = NULL;
12847 12875
12848 12876 /*
12849 12877 * Get the physical port associated to the iport
12850 12878 */
12851 12879 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12852 12880 "phymask", 0);
12853 12881
12854 12882 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
12855 12883 if (ptgt == NULL) {
12856 12884 /*
12857 12885 * didn't match any device by searching
12858 12886 */
12859 12887 return (DDI_FAILURE);
12860 12888 }
12861 12889
12862 12890 /*
12863 12891 * If the LUN already exists and the status is online,
12864 12892 * we just return the pointer to dev_info_t directly.
12865 12893 * For the mdi_pathinfo node, we'll handle it in
12866 12894 * mptsas_create_virt_lun().
12867 12895 */
12868 12896
12869 12897 *lundip = mptsas_find_child_phy(pdip, phy);
12870 12898 if (*lundip != NULL) {
12871 12899 return (DDI_SUCCESS);
12872 12900 }
12873 12901
12874 12902 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12875 12903
12876 12904 return (rval);
12877 12905 }
12878 12906
12879 12907 static int
12880 12908 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
12881 12909 uint8_t *lun_addr_type)
12882 12910 {
12883 12911 uint32_t lun_idx = 0;
12884 12912
12885 12913 ASSERT(lun_num != NULL);
12886 12914 ASSERT(lun_addr_type != NULL);
12887 12915
12888 12916 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
12889 12917 /* determine report luns addressing type */
12890 12918 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
12891 12919 /*
12892 12920 * Vendors in the field have been found to be concatenating
12893 12921 * bus/target/lun to equal the complete lun value instead
12894 12922 * of switching to flat space addressing
12895 12923 */
12896 12924 /* 00b - peripheral device addressing method */
12897 12925 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
12898 12926 /* FALLTHRU */
12899 12927 /* 10b - logical unit addressing method */
12900 12928 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
12901 12929 /* FALLTHRU */
12902 12930 /* 01b - flat space addressing method */
12903 12931 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
12904 12932 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
12905 12933 *lun_addr_type = (buf[lun_idx] &
12906 12934 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
12907 12935 *lun_num = (buf[lun_idx] & 0x3F) << 8;
12908 12936 *lun_num |= buf[lun_idx + 1];
12909 12937 return (DDI_SUCCESS);
12910 12938 default:
12911 12939 return (DDI_FAILURE);
12912 12940 }
12913 12941 }
12914 12942
12915 12943 static int
12916 12944 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
12917 12945 {
12918 12946 struct buf *repluns_bp = NULL;
12919 12947 struct scsi_address ap;
12920 12948 uchar_t cdb[CDB_GROUP5];
12921 12949 int ret = DDI_FAILURE;
12922 12950 int retry = 0;
12923 12951 int lun_list_len = 0;
12924 12952 uint16_t lun_num = 0;
12925 12953 uint8_t lun_addr_type = 0;
12926 12954 uint32_t lun_cnt = 0;
12927 12955 uint32_t lun_total = 0;
12928 12956 dev_info_t *cdip = NULL;
12929 12957 uint16_t *saved_repluns = NULL;
12930 12958 char *buffer = NULL;
12931 12959 int buf_len = 128;
12932 12960 mptsas_t *mpt = DIP2MPT(pdip);
12933 12961 uint64_t sas_wwn = 0;
12934 12962 uint8_t phy = 0xFF;
12935 12963 uint32_t dev_info = 0;
12936 12964
12937 12965 mutex_enter(&mpt->m_mutex);
12938 12966 sas_wwn = ptgt->m_sas_wwn;
12939 12967 phy = ptgt->m_phynum;
12940 12968 dev_info = ptgt->m_deviceinfo;
12941 12969 mutex_exit(&mpt->m_mutex);
12942 12970
12943 12971 if (sas_wwn == 0) {
12944 12972 /*
12945 12973 * It's a SATA without Device Name
12946 12974 * So don't try multi-LUNs
12947 12975 */
12948 12976 if (mptsas_find_child_phy(pdip, phy)) {
12949 12977 return (DDI_SUCCESS);
12950 12978 } else {
12951 12979 /*
12952 12980 * need configure and create node
12953 12981 */
12954 12982 return (DDI_FAILURE);
12955 12983 }
12956 12984 }
12957 12985
12958 12986 /*
12959 12987 * WWN (SAS address or Device Name exist)
12960 12988 */
12961 12989 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12962 12990 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12963 12991 /*
12964 12992 * SATA device with Device Name
12965 12993 * So don't try multi-LUNs
12966 12994 */
12967 12995 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
12968 12996 return (DDI_SUCCESS);
12969 12997 } else {
12970 12998 return (DDI_FAILURE);
12971 12999 }
12972 13000 }
12973 13001
12974 13002 do {
12975 13003 ap.a_target = MPTSAS_INVALID_DEVHDL;
12976 13004 ap.a_lun = 0;
12977 13005 ap.a_hba_tran = mpt->m_tran;
12978 13006 repluns_bp = scsi_alloc_consistent_buf(&ap,
12979 13007 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
12980 13008 if (repluns_bp == NULL) {
12981 13009 retry++;
12982 13010 continue;
12983 13011 }
12984 13012 bzero(cdb, CDB_GROUP5);
12985 13013 cdb[0] = SCMD_REPORT_LUNS;
12986 13014 cdb[6] = (buf_len & 0xff000000) >> 24;
12987 13015 cdb[7] = (buf_len & 0x00ff0000) >> 16;
12988 13016 cdb[8] = (buf_len & 0x0000ff00) >> 8;
12989 13017 cdb[9] = (buf_len & 0x000000ff);
12990 13018
12991 13019 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
12992 13020 repluns_bp, NULL);
12993 13021 if (ret != DDI_SUCCESS) {
12994 13022 scsi_free_consistent_buf(repluns_bp);
12995 13023 retry++;
12996 13024 continue;
12997 13025 }
12998 13026 lun_list_len = BE_32(*(int *)((void *)(
12999 13027 repluns_bp->b_un.b_addr)));
13000 13028 if (buf_len >= lun_list_len + 8) {
13001 13029 ret = DDI_SUCCESS;
13002 13030 break;
13003 13031 }
13004 13032 scsi_free_consistent_buf(repluns_bp);
13005 13033 buf_len = lun_list_len + 8;
13006 13034
13007 13035 } while (retry < 3);
13008 13036
13009 13037 if (ret != DDI_SUCCESS)
13010 13038 return (ret);
13011 13039 buffer = (char *)repluns_bp->b_un.b_addr;
13012 13040 /*
13013 13041 * find out the number of luns returned by the SCSI ReportLun call
13014 13042 * and allocate buffer space
13015 13043 */
13016 13044 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13017 13045 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13018 13046 if (saved_repluns == NULL) {
13019 13047 scsi_free_consistent_buf(repluns_bp);
13020 13048 return (DDI_FAILURE);
13021 13049 }
13022 13050 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13023 13051 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13024 13052 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13025 13053 continue;
13026 13054 }
13027 13055 saved_repluns[lun_cnt] = lun_num;
13028 13056 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13029 13057 ret = DDI_SUCCESS;
13030 13058 else
13031 13059 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13032 13060 ptgt);
13033 13061 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13034 13062 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13035 13063 MPTSAS_DEV_GONE);
13036 13064 }
13037 13065 }
13038 13066 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13039 13067 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13040 13068 scsi_free_consistent_buf(repluns_bp);
13041 13069 return (DDI_SUCCESS);
13042 13070 }
13043 13071
13044 13072 static int
13045 13073 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13046 13074 {
13047 13075 int rval = DDI_FAILURE;
13048 13076 struct scsi_inquiry *sd_inq = NULL;
13049 13077 mptsas_t *mpt = DIP2MPT(pdip);
13050 13078 mptsas_target_t *ptgt = NULL;
13051 13079
13052 13080 mutex_enter(&mpt->m_mutex);
13053 13081 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13054 13082 mutex_exit(&mpt->m_mutex);
13055 13083 if (ptgt == NULL) {
13056 13084 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13057 13085 "not found.", target);
13058 13086 return (rval);
13059 13087 }
13060 13088
13061 13089 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13062 13090 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13063 13091 SUN_INQSIZE, 0, (uchar_t)0);
13064 13092
13065 13093 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13066 13094 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13067 13095 0);
13068 13096 } else {
13069 13097 rval = DDI_FAILURE;
13070 13098 }
13071 13099
13072 13100 kmem_free(sd_inq, SUN_INQSIZE);
13073 13101 return (rval);
13074 13102 }
13075 13103
13076 13104 /*
13077 13105 * configure all RAID volumes for virtual iport
13078 13106 */
13079 13107 static void
13080 13108 mptsas_config_all_viport(dev_info_t *pdip)
13081 13109 {
13082 13110 mptsas_t *mpt = DIP2MPT(pdip);
13083 13111 int config, vol;
13084 13112 int target;
13085 13113 dev_info_t *lundip = NULL;
13086 13114 mptsas_slots_t *slots = mpt->m_active;
13087 13115
13088 13116 /*
13089 13117 * Get latest RAID info and search for any Volume DevHandles. If any
13090 13118 * are found, configure the volume.
13091 13119 */
13092 13120 mutex_enter(&mpt->m_mutex);
13093 13121 for (config = 0; config < slots->m_num_raid_configs; config++) {
13094 13122 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13095 13123 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13096 13124 == 1) {
13097 13125 target = slots->m_raidconfig[config].
13098 13126 m_raidvol[vol].m_raidhandle;
13099 13127 mutex_exit(&mpt->m_mutex);
13100 13128 (void) mptsas_config_raid(pdip, target,
13101 13129 &lundip);
13102 13130 mutex_enter(&mpt->m_mutex);
13103 13131 }
13104 13132 }
13105 13133 }
13106 13134 mutex_exit(&mpt->m_mutex);
13107 13135 }
13108 13136
13109 13137 static void
13110 13138 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13111 13139 int lun_cnt, mptsas_target_t *ptgt)
13112 13140 {
13113 13141 dev_info_t *child = NULL, *savechild = NULL;
13114 13142 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13115 13143 uint64_t sas_wwn, wwid;
13116 13144 uint8_t phy;
13117 13145 int lun;
13118 13146 int i;
13119 13147 int find;
13120 13148 char *addr;
13121 13149 char *nodename;
13122 13150 mptsas_t *mpt = DIP2MPT(pdip);
13123 13151
13124 13152 mutex_enter(&mpt->m_mutex);
13125 13153 wwid = ptgt->m_sas_wwn;
13126 13154 mutex_exit(&mpt->m_mutex);
13127 13155
13128 13156 child = ddi_get_child(pdip);
13129 13157 while (child) {
13130 13158 find = 0;
13131 13159 savechild = child;
13132 13160 child = ddi_get_next_sibling(child);
13133 13161
13134 13162 nodename = ddi_node_name(savechild);
13135 13163 if (strcmp(nodename, "smp") == 0) {
13136 13164 continue;
13137 13165 }
13138 13166
13139 13167 addr = ddi_get_name_addr(savechild);
13140 13168 if (addr == NULL) {
13141 13169 continue;
13142 13170 }
13143 13171
13144 13172 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13145 13173 DDI_SUCCESS) {
13146 13174 continue;
13147 13175 }
13148 13176
13149 13177 if (wwid == sas_wwn) {
13150 13178 for (i = 0; i < lun_cnt; i++) {
13151 13179 if (repluns[i] == lun) {
13152 13180 find = 1;
13153 13181 break;
13154 13182 }
13155 13183 }
13156 13184 } else {
13157 13185 continue;
13158 13186 }
13159 13187 if (find == 0) {
13160 13188 /*
13161 13189 * The lun has not been there already
13162 13190 */
13163 13191 (void) mptsas_offline_lun(pdip, savechild, NULL,
13164 13192 NDI_DEVI_REMOVE);
13165 13193 }
13166 13194 }
13167 13195
13168 13196 pip = mdi_get_next_client_path(pdip, NULL);
13169 13197 while (pip) {
13170 13198 find = 0;
13171 13199 savepip = pip;
13172 13200 addr = MDI_PI(pip)->pi_addr;
13173 13201
13174 13202 pip = mdi_get_next_client_path(pdip, pip);
13175 13203
13176 13204 if (addr == NULL) {
13177 13205 continue;
13178 13206 }
13179 13207
13180 13208 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13181 13209 &lun) != DDI_SUCCESS) {
13182 13210 continue;
13183 13211 }
13184 13212
13185 13213 if (sas_wwn == wwid) {
13186 13214 for (i = 0; i < lun_cnt; i++) {
13187 13215 if (repluns[i] == lun) {
13188 13216 find = 1;
13189 13217 break;
13190 13218 }
13191 13219 }
13192 13220 } else {
13193 13221 continue;
13194 13222 }
13195 13223
13196 13224 if (find == 0) {
13197 13225 /*
13198 13226 * The lun has not been there already
13199 13227 */
13200 13228 (void) mptsas_offline_lun(pdip, NULL, savepip,
13201 13229 NDI_DEVI_REMOVE);
13202 13230 }
13203 13231 }
13204 13232 }
13205 13233
13206 13234 void
13207 13235 mptsas_update_hashtab(struct mptsas *mpt)
13208 13236 {
13209 13237 uint32_t page_address;
13210 13238 int rval = 0;
13211 13239 uint16_t dev_handle;
13212 13240 mptsas_target_t *ptgt = NULL;
13213 13241 mptsas_smp_t smp_node;
13214 13242
13215 13243 /*
13216 13244 * Get latest RAID info.
13217 13245 */
13218 13246 (void) mptsas_get_raid_info(mpt);
13219 13247
13220 13248 dev_handle = mpt->m_smp_devhdl;
13221 13249 for (; mpt->m_done_traverse_smp == 0; ) {
13222 13250 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13223 13251 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13224 13252 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13225 13253 != DDI_SUCCESS) {
13226 13254 break;
13227 13255 }
13228 13256 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13229 13257 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13230 13258 }
13231 13259
13232 13260 /*
13233 13261 * Config target devices
13234 13262 */
13235 13263 dev_handle = mpt->m_dev_handle;
13236 13264
13237 13265 /*
13238 13266 * Do loop to get sas device page 0 by GetNextHandle till the
13239 13267 * the last handle. If the sas device is a SATA/SSP target,
13240 13268 * we try to config it.
13241 13269 */
13242 13270 for (; mpt->m_done_traverse_dev == 0; ) {
13243 13271 ptgt = NULL;
13244 13272 page_address =
13245 13273 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13246 13274 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13247 13275 (uint32_t)dev_handle;
13248 13276 rval = mptsas_get_target_device_info(mpt, page_address,
13249 13277 &dev_handle, &ptgt);
13250 13278 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13251 13279 (rval == DEV_INFO_FAIL_ALLOC)) {
13252 13280 break;
13253 13281 }
13254 13282
13255 13283 mpt->m_dev_handle = dev_handle;
13256 13284 }
13257 13285
13258 13286 }
13259 13287
13260 13288 void
13261 13289 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13262 13290 {
13263 13291 mptsas_hash_data_t *data;
13264 13292 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13265 13293 while (data != NULL) {
13266 13294 data->devhdl = MPTSAS_INVALID_DEVHDL;
13267 13295 data->device_info = 0;
13268 13296 /*
13269 13297 * For tgttbl, clear dr_flag.
13270 13298 */
13271 13299 data->dr_flag = MPTSAS_DR_INACTIVE;
13272 13300 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13273 13301 }
13274 13302 }
13275 13303
13276 13304 void
13277 13305 mptsas_update_driver_data(struct mptsas *mpt)
13278 13306 {
13279 13307 /*
13280 13308 * TODO after hard reset, update the driver data structures
13281 13309 * 1. update port/phymask mapping table mpt->m_phy_info
13282 13310 * 2. invalid all the entries in hash table
13283 13311 * m_devhdl = 0xffff and m_deviceinfo = 0
13284 13312 * 3. call sas_device_page/expander_page to update hash table
13285 13313 */
13286 13314 mptsas_update_phymask(mpt);
13287 13315 /*
13288 13316 * Invalid the existing entries
13289 13317 */
13290 13318 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13291 13319 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13292 13320 mpt->m_done_traverse_dev = 0;
13293 13321 mpt->m_done_traverse_smp = 0;
13294 13322 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13295 13323 mptsas_update_hashtab(mpt);
13296 13324 }
13297 13325
13298 13326 static void
13299 13327 mptsas_config_all(dev_info_t *pdip)
13300 13328 {
13301 13329 dev_info_t *smpdip = NULL;
13302 13330 mptsas_t *mpt = DIP2MPT(pdip);
13303 13331 int phymask = 0;
13304 13332 mptsas_phymask_t phy_mask;
13305 13333 mptsas_target_t *ptgt = NULL;
13306 13334 mptsas_smp_t *psmp;
13307 13335
13308 13336 /*
13309 13337 * Get the phymask associated to the iport
13310 13338 */
13311 13339 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13312 13340 "phymask", 0);
13313 13341
13314 13342 /*
13315 13343 * Enumerate RAID volumes here (phymask == 0).
13316 13344 */
13317 13345 if (phymask == 0) {
13318 13346 mptsas_config_all_viport(pdip);
13319 13347 return;
13320 13348 }
13321 13349
13322 13350 mutex_enter(&mpt->m_mutex);
13323 13351
13324 13352 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13325 13353 mptsas_update_hashtab(mpt);
13326 13354 }
13327 13355
13328 13356 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13329 13357 MPTSAS_HASH_FIRST);
13330 13358 while (psmp != NULL) {
13331 13359 phy_mask = psmp->m_phymask;
13332 13360 if (phy_mask == phymask) {
13333 13361 smpdip = NULL;
13334 13362 mutex_exit(&mpt->m_mutex);
13335 13363 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13336 13364 mutex_enter(&mpt->m_mutex);
13337 13365 }
13338 13366 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13339 13367 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13340 13368 }
13341 13369
13342 13370 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13343 13371 MPTSAS_HASH_FIRST);
13344 13372 while (ptgt != NULL) {
13345 13373 phy_mask = ptgt->m_phymask;
13346 13374 if (phy_mask == phymask) {
13347 13375 mutex_exit(&mpt->m_mutex);
13348 13376 (void) mptsas_config_target(pdip, ptgt);
13349 13377 mutex_enter(&mpt->m_mutex);
13350 13378 }
13351 13379
13352 13380 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13353 13381 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13354 13382 }
13355 13383 mutex_exit(&mpt->m_mutex);
13356 13384 }
13357 13385
13358 13386 static int
13359 13387 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13360 13388 {
13361 13389 int rval = DDI_FAILURE;
13362 13390 dev_info_t *tdip;
13363 13391
13364 13392 rval = mptsas_config_luns(pdip, ptgt);
13365 13393 if (rval != DDI_SUCCESS) {
13366 13394 /*
13367 13395 * The return value means the SCMD_REPORT_LUNS
13368 13396 * did not execute successfully. The target maybe
13369 13397 * doesn't support such command.
13370 13398 */
13371 13399 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13372 13400 }
13373 13401 return (rval);
13374 13402 }
13375 13403
13376 13404 /*
13377 13405 * Return fail if not all the childs/paths are freed.
13378 13406 * if there is any path under the HBA, the return value will be always fail
13379 13407 * because we didn't call mdi_pi_free for path
13380 13408 */
13381 13409 static int
13382 13410 mptsas_offline_target(dev_info_t *pdip, char *name)
13383 13411 {
13384 13412 dev_info_t *child = NULL, *prechild = NULL;
13385 13413 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13386 13414 int tmp_rval, rval = DDI_SUCCESS;
13387 13415 char *addr, *cp;
13388 13416 size_t s;
13389 13417 mptsas_t *mpt = DIP2MPT(pdip);
13390 13418
13391 13419 child = ddi_get_child(pdip);
13392 13420 while (child) {
13393 13421 addr = ddi_get_name_addr(child);
13394 13422 prechild = child;
13395 13423 child = ddi_get_next_sibling(child);
13396 13424
13397 13425 if (addr == NULL) {
13398 13426 continue;
13399 13427 }
13400 13428 if ((cp = strchr(addr, ',')) == NULL) {
13401 13429 continue;
13402 13430 }
13403 13431
13404 13432 s = (uintptr_t)cp - (uintptr_t)addr;
13405 13433
13406 13434 if (strncmp(addr, name, s) != 0) {
13407 13435 continue;
13408 13436 }
13409 13437
13410 13438 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13411 13439 NDI_DEVI_REMOVE);
13412 13440 if (tmp_rval != DDI_SUCCESS) {
13413 13441 rval = DDI_FAILURE;
13414 13442 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13415 13443 prechild, MPTSAS_DEV_GONE) !=
13416 13444 DDI_PROP_SUCCESS) {
13417 13445 mptsas_log(mpt, CE_WARN, "mptsas driver "
13418 13446 "unable to create property for "
13419 13447 "SAS %s (MPTSAS_DEV_GONE)", addr);
13420 13448 }
13421 13449 }
13422 13450 }
13423 13451
13424 13452 pip = mdi_get_next_client_path(pdip, NULL);
13425 13453 while (pip) {
13426 13454 addr = MDI_PI(pip)->pi_addr;
13427 13455 savepip = pip;
13428 13456 pip = mdi_get_next_client_path(pdip, pip);
13429 13457 if (addr == NULL) {
13430 13458 continue;
13431 13459 }
13432 13460
13433 13461 if ((cp = strchr(addr, ',')) == NULL) {
13434 13462 continue;
13435 13463 }
13436 13464
13437 13465 s = (uintptr_t)cp - (uintptr_t)addr;
13438 13466
13439 13467 if (strncmp(addr, name, s) != 0) {
13440 13468 continue;
13441 13469 }
13442 13470
13443 13471 (void) mptsas_offline_lun(pdip, NULL, savepip,
13444 13472 NDI_DEVI_REMOVE);
13445 13473 /*
13446 13474 * driver will not invoke mdi_pi_free, so path will not
13447 13475 * be freed forever, return DDI_FAILURE.
13448 13476 */
13449 13477 rval = DDI_FAILURE;
13450 13478 }
13451 13479 return (rval);
13452 13480 }
13453 13481
13454 13482 static int
13455 13483 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13456 13484 mdi_pathinfo_t *rpip, uint_t flags)
13457 13485 {
13458 13486 int rval = DDI_FAILURE;
13459 13487 char *devname;
13460 13488 dev_info_t *cdip, *parent;
13461 13489
13462 13490 if (rpip != NULL) {
13463 13491 parent = scsi_vhci_dip;
13464 13492 cdip = mdi_pi_get_client(rpip);
13465 13493 } else if (rdip != NULL) {
13466 13494 parent = pdip;
13467 13495 cdip = rdip;
13468 13496 } else {
13469 13497 return (DDI_FAILURE);
13470 13498 }
13471 13499
13472 13500 /*
13473 13501 * Make sure node is attached otherwise
13474 13502 * it won't have related cache nodes to
13475 13503 * clean up. i_ddi_devi_attached is
13476 13504 * similiar to i_ddi_node_state(cdip) >=
13477 13505 * DS_ATTACHED.
13478 13506 */
13479 13507 if (i_ddi_devi_attached(cdip)) {
13480 13508
13481 13509 /* Get full devname */
13482 13510 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13483 13511 (void) ddi_deviname(cdip, devname);
13484 13512 /* Clean cache */
13485 13513 (void) devfs_clean(parent, devname + 1,
13486 13514 DV_CLEAN_FORCE);
13487 13515 kmem_free(devname, MAXNAMELEN + 1);
13488 13516 }
13489 13517 if (rpip != NULL) {
13490 13518 if (MDI_PI_IS_OFFLINE(rpip)) {
13491 13519 rval = DDI_SUCCESS;
13492 13520 } else {
13493 13521 rval = mdi_pi_offline(rpip, 0);
13494 13522 }
13495 13523 } else {
13496 13524 rval = ndi_devi_offline(cdip, flags);
13497 13525 }
13498 13526
13499 13527 return (rval);
13500 13528 }
13501 13529
13502 13530 static dev_info_t *
13503 13531 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13504 13532 {
13505 13533 dev_info_t *child = NULL;
13506 13534 char *smp_wwn = NULL;
13507 13535
13508 13536 child = ddi_get_child(parent);
13509 13537 while (child) {
13510 13538 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13511 13539 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13512 13540 != DDI_SUCCESS) {
13513 13541 child = ddi_get_next_sibling(child);
13514 13542 continue;
13515 13543 }
13516 13544
13517 13545 if (strcmp(smp_wwn, str_wwn) == 0) {
13518 13546 ddi_prop_free(smp_wwn);
13519 13547 break;
13520 13548 }
13521 13549 child = ddi_get_next_sibling(child);
13522 13550 ddi_prop_free(smp_wwn);
13523 13551 }
13524 13552 return (child);
13525 13553 }
13526 13554
13527 13555 static int
13528 13556 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13529 13557 {
13530 13558 int rval = DDI_FAILURE;
13531 13559 char *devname;
13532 13560 char wwn_str[MPTSAS_WWN_STRLEN];
13533 13561 dev_info_t *cdip;
13534 13562
13535 13563 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13536 13564
13537 13565 cdip = mptsas_find_smp_child(pdip, wwn_str);
13538 13566
13539 13567 if (cdip == NULL)
13540 13568 return (DDI_SUCCESS);
13541 13569
13542 13570 /*
13543 13571 * Make sure node is attached otherwise
13544 13572 * it won't have related cache nodes to
13545 13573 * clean up. i_ddi_devi_attached is
13546 13574 * similiar to i_ddi_node_state(cdip) >=
13547 13575 * DS_ATTACHED.
13548 13576 */
13549 13577 if (i_ddi_devi_attached(cdip)) {
13550 13578
13551 13579 /* Get full devname */
13552 13580 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13553 13581 (void) ddi_deviname(cdip, devname);
13554 13582 /* Clean cache */
13555 13583 (void) devfs_clean(pdip, devname + 1,
13556 13584 DV_CLEAN_FORCE);
13557 13585 kmem_free(devname, MAXNAMELEN + 1);
13558 13586 }
13559 13587
13560 13588 rval = ndi_devi_offline(cdip, flags);
13561 13589
13562 13590 return (rval);
13563 13591 }
13564 13592
13565 13593 static dev_info_t *
13566 13594 mptsas_find_child(dev_info_t *pdip, char *name)
13567 13595 {
13568 13596 dev_info_t *child = NULL;
13569 13597 char *rname = NULL;
13570 13598 int rval = DDI_FAILURE;
13571 13599
13572 13600 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13573 13601
13574 13602 child = ddi_get_child(pdip);
13575 13603 while (child) {
13576 13604 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13577 13605 if (rval != DDI_SUCCESS) {
13578 13606 child = ddi_get_next_sibling(child);
13579 13607 bzero(rname, SCSI_MAXNAMELEN);
13580 13608 continue;
13581 13609 }
13582 13610
13583 13611 if (strcmp(rname, name) == 0) {
13584 13612 break;
13585 13613 }
13586 13614 child = ddi_get_next_sibling(child);
13587 13615 bzero(rname, SCSI_MAXNAMELEN);
13588 13616 }
13589 13617
13590 13618 kmem_free(rname, SCSI_MAXNAMELEN);
13591 13619
13592 13620 return (child);
13593 13621 }
13594 13622
13595 13623
13596 13624 static dev_info_t *
13597 13625 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13598 13626 {
13599 13627 dev_info_t *child = NULL;
13600 13628 char *name = NULL;
13601 13629 char *addr = NULL;
13602 13630
13603 13631 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13604 13632 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13605 13633 (void) sprintf(name, "%016"PRIx64, sasaddr);
13606 13634 (void) sprintf(addr, "w%s,%x", name, lun);
13607 13635 child = mptsas_find_child(pdip, addr);
13608 13636 kmem_free(name, SCSI_MAXNAMELEN);
13609 13637 kmem_free(addr, SCSI_MAXNAMELEN);
13610 13638 return (child);
13611 13639 }
13612 13640
13613 13641 static dev_info_t *
13614 13642 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13615 13643 {
13616 13644 dev_info_t *child;
13617 13645 char *addr;
13618 13646
13619 13647 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13620 13648 (void) sprintf(addr, "p%x,0", phy);
13621 13649 child = mptsas_find_child(pdip, addr);
13622 13650 kmem_free(addr, SCSI_MAXNAMELEN);
13623 13651 return (child);
13624 13652 }
13625 13653
13626 13654 static mdi_pathinfo_t *
13627 13655 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13628 13656 {
13629 13657 mdi_pathinfo_t *path;
13630 13658 char *addr = NULL;
13631 13659
13632 13660 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13633 13661 (void) sprintf(addr, "p%x,0", phy);
13634 13662 path = mdi_pi_find(pdip, NULL, addr);
13635 13663 kmem_free(addr, SCSI_MAXNAMELEN);
13636 13664 return (path);
13637 13665 }
13638 13666
13639 13667 static mdi_pathinfo_t *
13640 13668 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13641 13669 {
13642 13670 mdi_pathinfo_t *path;
13643 13671 char *name = NULL;
13644 13672 char *addr = NULL;
13645 13673
13646 13674 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13647 13675 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13648 13676 (void) sprintf(name, "%016"PRIx64, sasaddr);
13649 13677 (void) sprintf(addr, "w%s,%x", name, lun);
13650 13678 path = mdi_pi_find(parent, NULL, addr);
13651 13679 kmem_free(name, SCSI_MAXNAMELEN);
13652 13680 kmem_free(addr, SCSI_MAXNAMELEN);
13653 13681
13654 13682 return (path);
13655 13683 }
13656 13684
13657 13685 static int
13658 13686 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13659 13687 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13660 13688 {
13661 13689 int i = 0;
13662 13690 uchar_t *inq83 = NULL;
13663 13691 int inq83_len1 = 0xFF;
13664 13692 int inq83_len = 0;
13665 13693 int rval = DDI_FAILURE;
13666 13694 ddi_devid_t devid;
13667 13695 char *guid = NULL;
13668 13696 int target = ptgt->m_devhdl;
13669 13697 mdi_pathinfo_t *pip = NULL;
13670 13698 mptsas_t *mpt = DIP2MPT(pdip);
13671 13699
13672 13700 /*
13673 13701 * For DVD/CD ROM and tape devices and optical
13674 13702 * devices, we won't try to enumerate them under
13675 13703 * scsi_vhci, so no need to try page83
13676 13704 */
13677 13705 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13678 13706 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13679 13707 sd_inq->inq_dtype == DTYPE_ESI))
13680 13708 goto create_lun;
13681 13709
13682 13710 /*
13683 13711 * The LCA returns good SCSI status, but corrupt page 83 data the first
13684 13712 * time it is queried. The solution is to keep trying to request page83
13685 13713 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13686 13714 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13687 13715 * give up to get VPD page at this stage and fail the enumeration.
13688 13716 */
13689 13717
13690 13718 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13691 13719
13692 13720 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13693 13721 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13694 13722 inq83_len1, &inq83_len, 1);
13695 13723 if (rval != 0) {
13696 13724 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13697 13725 "0x83 for target:%x, lun:%x failed!", target, lun);
13698 13726 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13699 13727 goto create_lun;
13700 13728 goto out;
13701 13729 }
13702 13730 /*
13703 13731 * create DEVID from inquiry data
13704 13732 */
13705 13733 if ((rval = ddi_devid_scsi_encode(
13706 13734 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13707 13735 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13708 13736 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13709 13737 /*
13710 13738 * extract GUID from DEVID
13711 13739 */
13712 13740 guid = ddi_devid_to_guid(devid);
13713 13741
13714 13742 /*
13715 13743 * Do not enable MPXIO if the strlen(guid) is greater
13716 13744 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13717 13745 * handled by framework later.
13718 13746 */
13719 13747 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13720 13748 ddi_devid_free_guid(guid);
13721 13749 guid = NULL;
13722 13750 if (mpt->m_mpxio_enable == TRUE) {
13723 13751 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13724 13752 "lun:%x doesn't have a valid GUID, "
13725 13753 "multipathing for this drive is "
13726 13754 "not enabled", target, lun);
13727 13755 }
13728 13756 }
13729 13757
13730 13758 /*
13731 13759 * devid no longer needed
13732 13760 */
13733 13761 ddi_devid_free(devid);
13734 13762 break;
13735 13763 } else if (rval == DDI_NOT_WELL_FORMED) {
13736 13764 /*
13737 13765 * return value of ddi_devid_scsi_encode equal to
13738 13766 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13739 13767 * to retry inquiry page 0x83 and get GUID.
13740 13768 */
13741 13769 NDBG20(("Not well formed devid, retry..."));
13742 13770 delay(1 * drv_usectohz(1000000));
13743 13771 continue;
13744 13772 } else {
13745 13773 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13746 13774 "path target:%x, lun:%x", target, lun);
13747 13775 rval = DDI_FAILURE;
13748 13776 goto create_lun;
13749 13777 }
13750 13778 }
13751 13779
13752 13780 if (i == mptsas_inq83_retry_timeout) {
13753 13781 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13754 13782 "for path target:%x, lun:%x", target, lun);
13755 13783 }
13756 13784
13757 13785 rval = DDI_FAILURE;
13758 13786
13759 13787 create_lun:
13760 13788 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13761 13789 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13762 13790 ptgt, lun);
13763 13791 }
13764 13792 if (rval != DDI_SUCCESS) {
13765 13793 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13766 13794 ptgt, lun);
13767 13795
13768 13796 }
13769 13797 out:
13770 13798 if (guid != NULL) {
13771 13799 /*
13772 13800 * guid no longer needed
13773 13801 */
13774 13802 ddi_devid_free_guid(guid);
13775 13803 }
13776 13804 if (inq83 != NULL)
13777 13805 kmem_free(inq83, inq83_len1);
13778 13806 return (rval);
13779 13807 }
13780 13808
13781 13809 static int
13782 13810 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13783 13811 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13784 13812 {
13785 13813 int target;
13786 13814 char *nodename = NULL;
13787 13815 char **compatible = NULL;
13788 13816 int ncompatible = 0;
13789 13817 int mdi_rtn = MDI_FAILURE;
13790 13818 int rval = DDI_FAILURE;
13791 13819 char *old_guid = NULL;
13792 13820 mptsas_t *mpt = DIP2MPT(pdip);
13793 13821 char *lun_addr = NULL;
13794 13822 char *wwn_str = NULL;
13795 13823 char *attached_wwn_str = NULL;
13796 13824 char *component = NULL;
13797 13825 uint8_t phy = 0xFF;
13798 13826 uint64_t sas_wwn;
13799 13827 int64_t lun64 = 0;
13800 13828 uint32_t devinfo;
13801 13829 uint16_t dev_hdl;
13802 13830 uint16_t pdev_hdl;
13803 13831 uint64_t dev_sas_wwn;
13804 13832 uint64_t pdev_sas_wwn;
13805 13833 uint32_t pdev_info;
13806 13834 uint8_t physport;
13807 13835 uint8_t phy_id;
13808 13836 uint32_t page_address;
13809 13837 uint16_t bay_num, enclosure;
13810 13838 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
13811 13839 uint32_t dev_info;
13812 13840
13813 13841 mutex_enter(&mpt->m_mutex);
13814 13842 target = ptgt->m_devhdl;
13815 13843 sas_wwn = ptgt->m_sas_wwn;
13816 13844 devinfo = ptgt->m_deviceinfo;
13817 13845 phy = ptgt->m_phynum;
13818 13846 mutex_exit(&mpt->m_mutex);
13819 13847
13820 13848 if (sas_wwn) {
13821 13849 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13822 13850 } else {
13823 13851 *pip = mptsas_find_path_phy(pdip, phy);
13824 13852 }
13825 13853
13826 13854 if (*pip != NULL) {
13827 13855 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13828 13856 ASSERT(*lun_dip != NULL);
13829 13857 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13830 13858 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
|
↓ open down ↓ |
2434 lines elided |
↑ open up ↑ |
13831 13859 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13832 13860 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13833 13861 /*
13834 13862 * Same path back online again.
13835 13863 */
13836 13864 (void) ddi_prop_free(old_guid);
13837 13865 if ((!MDI_PI_IS_ONLINE(*pip)) &&
13838 13866 (!MDI_PI_IS_STANDBY(*pip)) &&
13839 13867 (ptgt->m_tgt_unconfigured == 0)) {
13840 13868 rval = mdi_pi_online(*pip, 0);
13841 - mutex_enter(&mpt->m_mutex);
13842 - (void) mptsas_set_led_status(mpt, ptgt,
13843 - 0);
13844 - mutex_exit(&mpt->m_mutex);
13845 13869 } else {
13846 13870 rval = DDI_SUCCESS;
13847 13871 }
13848 13872 if (rval != DDI_SUCCESS) {
13849 13873 mptsas_log(mpt, CE_WARN, "path:target: "
13850 13874 "%x, lun:%x online failed!", target,
13851 13875 lun);
13852 13876 *pip = NULL;
13853 13877 *lun_dip = NULL;
13854 13878 }
13855 13879 return (rval);
13856 13880 } else {
13857 13881 /*
13858 13882 * The GUID of the LUN has changed which maybe
13859 13883 * because customer mapped another volume to the
13860 13884 * same LUN.
13861 13885 */
13862 13886 mptsas_log(mpt, CE_WARN, "The GUID of the "
13863 13887 "target:%x, lun:%x was changed, maybe "
13864 13888 "because someone mapped another volume "
13865 13889 "to the same LUN", target, lun);
13866 13890 (void) ddi_prop_free(old_guid);
13867 13891 if (!MDI_PI_IS_OFFLINE(*pip)) {
13868 13892 rval = mdi_pi_offline(*pip, 0);
13869 13893 if (rval != MDI_SUCCESS) {
13870 13894 mptsas_log(mpt, CE_WARN, "path:"
13871 13895 "target:%x, lun:%x offline "
13872 13896 "failed!", target, lun);
13873 13897 *pip = NULL;
13874 13898 *lun_dip = NULL;
13875 13899 return (DDI_FAILURE);
13876 13900 }
13877 13901 }
13878 13902 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
13879 13903 mptsas_log(mpt, CE_WARN, "path:target:"
13880 13904 "%x, lun:%x free failed!", target,
13881 13905 lun);
13882 13906 *pip = NULL;
13883 13907 *lun_dip = NULL;
13884 13908 return (DDI_FAILURE);
13885 13909 }
13886 13910 }
13887 13911 } else {
13888 13912 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
13889 13913 "property for path:target:%x, lun:%x", target, lun);
13890 13914 *pip = NULL;
13891 13915 *lun_dip = NULL;
13892 13916 return (DDI_FAILURE);
13893 13917 }
13894 13918 }
13895 13919 scsi_hba_nodename_compatible_get(inq, NULL,
13896 13920 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
13897 13921
13898 13922 /*
13899 13923 * if nodename can't be determined then print a message and skip it
13900 13924 */
13901 13925 if (nodename == NULL) {
13902 13926 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
13903 13927 "driver for target%d lun %d dtype:0x%02x", target, lun,
13904 13928 inq->inq_dtype);
13905 13929 return (DDI_FAILURE);
13906 13930 }
13907 13931
13908 13932 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
13909 13933 /* The property is needed by MPAPI */
13910 13934 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
13911 13935
13912 13936 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13913 13937 if (guid) {
13914 13938 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
13915 13939 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
13916 13940 } else {
13917 13941 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
13918 13942 (void) sprintf(wwn_str, "p%x", phy);
13919 13943 }
13920 13944
13921 13945 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
13922 13946 guid, lun_addr, compatible, ncompatible,
13923 13947 0, pip);
13924 13948 if (mdi_rtn == MDI_SUCCESS) {
13925 13949
13926 13950 if (mdi_prop_update_string(*pip, MDI_GUID,
13927 13951 guid) != DDI_SUCCESS) {
13928 13952 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13929 13953 "create prop for target %d lun %d (MDI_GUID)",
13930 13954 target, lun);
13931 13955 mdi_rtn = MDI_FAILURE;
13932 13956 goto virt_create_done;
13933 13957 }
13934 13958
13935 13959 if (mdi_prop_update_int(*pip, LUN_PROP,
13936 13960 lun) != DDI_SUCCESS) {
13937 13961 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13938 13962 "create prop for target %d lun %d (LUN_PROP)",
13939 13963 target, lun);
13940 13964 mdi_rtn = MDI_FAILURE;
13941 13965 goto virt_create_done;
13942 13966 }
13943 13967 lun64 = (int64_t)lun;
13944 13968 if (mdi_prop_update_int64(*pip, LUN64_PROP,
13945 13969 lun64) != DDI_SUCCESS) {
13946 13970 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13947 13971 "create prop for target %d (LUN64_PROP)",
13948 13972 target);
13949 13973 mdi_rtn = MDI_FAILURE;
13950 13974 goto virt_create_done;
13951 13975 }
13952 13976 if (mdi_prop_update_string_array(*pip, "compatible",
13953 13977 compatible, ncompatible) !=
13954 13978 DDI_PROP_SUCCESS) {
13955 13979 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13956 13980 "create prop for target %d lun %d (COMPATIBLE)",
13957 13981 target, lun);
13958 13982 mdi_rtn = MDI_FAILURE;
13959 13983 goto virt_create_done;
13960 13984 }
13961 13985 if (sas_wwn && (mdi_prop_update_string(*pip,
13962 13986 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
13963 13987 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13964 13988 "create prop for target %d lun %d "
13965 13989 "(target-port)", target, lun);
13966 13990 mdi_rtn = MDI_FAILURE;
13967 13991 goto virt_create_done;
13968 13992 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
13969 13993 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
13970 13994 /*
13971 13995 * Direct attached SATA device without DeviceName
13972 13996 */
13973 13997 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13974 13998 "create prop for SAS target %d lun %d "
13975 13999 "(sata-phy)", target, lun);
13976 14000 mdi_rtn = MDI_FAILURE;
13977 14001 goto virt_create_done;
13978 14002 }
13979 14003 mutex_enter(&mpt->m_mutex);
13980 14004
13981 14005 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
13982 14006 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13983 14007 (uint32_t)ptgt->m_devhdl;
13984 14008 rval = mptsas_get_sas_device_page0(mpt, page_address,
13985 14009 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
13986 14010 &phy_id, &pdev_hdl, &bay_num, &enclosure);
13987 14011 if (rval != DDI_SUCCESS) {
13988 14012 mutex_exit(&mpt->m_mutex);
13989 14013 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
13990 14014 "parent device for handle %d", page_address);
13991 14015 mdi_rtn = MDI_FAILURE;
13992 14016 goto virt_create_done;
13993 14017 }
13994 14018
13995 14019 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
13996 14020 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
13997 14021 rval = mptsas_get_sas_device_page0(mpt, page_address,
13998 14022 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
13999 14023 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14000 14024 if (rval != DDI_SUCCESS) {
14001 14025 mutex_exit(&mpt->m_mutex);
14002 14026 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14003 14027 "device info for handle %d", page_address);
14004 14028 mdi_rtn = MDI_FAILURE;
14005 14029 goto virt_create_done;
14006 14030 }
14007 14031
14008 14032 mutex_exit(&mpt->m_mutex);
14009 14033
14010 14034 /*
14011 14035 * If this device direct attached to the controller
14012 14036 * set the attached-port to the base wwid
14013 14037 */
14014 14038 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14015 14039 != DEVINFO_DIRECT_ATTACHED) {
14016 14040 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14017 14041 pdev_sas_wwn);
14018 14042 } else {
14019 14043 /*
14020 14044 * Update the iport's attached-port to guid
14021 14045 */
14022 14046 if (sas_wwn == 0) {
14023 14047 (void) sprintf(wwn_str, "p%x", phy);
14024 14048 } else {
14025 14049 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14026 14050 }
14027 14051 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14028 14052 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14029 14053 DDI_PROP_SUCCESS) {
14030 14054 mptsas_log(mpt, CE_WARN,
14031 14055 "mptsas unable to create "
14032 14056 "property for iport target-port"
14033 14057 " %s (sas_wwn)",
14034 14058 wwn_str);
14035 14059 mdi_rtn = MDI_FAILURE;
14036 14060 goto virt_create_done;
14037 14061 }
14038 14062
14039 14063 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14040 14064 mpt->un.m_base_wwid);
14041 14065 }
14042 14066
14043 14067 if (mdi_prop_update_string(*pip,
14044 14068 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14045 14069 DDI_PROP_SUCCESS) {
14046 14070 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14047 14071 "property for iport attached-port %s (sas_wwn)",
14048 14072 attached_wwn_str);
14049 14073 mdi_rtn = MDI_FAILURE;
14050 14074 goto virt_create_done;
14051 14075 }
14052 14076
14053 14077
14054 14078 if (inq->inq_dtype == 0) {
14055 14079 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14056 14080 /*
14057 14081 * set obp path for pathinfo
14058 14082 */
14059 14083 (void) snprintf(component, MAXPATHLEN,
14060 14084 "disk@%s", lun_addr);
14061 14085
14062 14086 if (mdi_pi_pathname_obp_set(*pip, component) !=
14063 14087 DDI_SUCCESS) {
14064 14088 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14065 14089 "unable to set obp-path for object %s",
14066 14090 component);
14067 14091 mdi_rtn = MDI_FAILURE;
14068 14092 goto virt_create_done;
14069 14093 }
14070 14094 }
14071 14095
14072 14096 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14073 14097 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14074 14098 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14075 14099 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14076 14100 "pm-capable", 1)) !=
14077 14101 DDI_PROP_SUCCESS) {
14078 14102 mptsas_log(mpt, CE_WARN, "mptsas driver"
14079 14103 "failed to create pm-capable "
14080 14104 "property, target %d", target);
14081 14105 mdi_rtn = MDI_FAILURE;
14082 14106 goto virt_create_done;
14083 14107 }
14084 14108 }
14085 14109 /*
14086 14110 * Create the phy-num property
14087 14111 */
|
↓ open down ↓ |
233 lines elided |
↑ open up ↑ |
14088 14112 if (mdi_prop_update_int(*pip, "phy-num",
14089 14113 ptgt->m_phynum) != DDI_SUCCESS) {
14090 14114 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14091 14115 "create phy-num property for target %d lun %d",
14092 14116 target, lun);
14093 14117 mdi_rtn = MDI_FAILURE;
14094 14118 goto virt_create_done;
14095 14119 }
14096 14120 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14097 14121 mdi_rtn = mdi_pi_online(*pip, 0);
14098 - if (mdi_rtn == MDI_SUCCESS) {
14099 - mutex_enter(&mpt->m_mutex);
14100 - if (mptsas_set_led_status(mpt, ptgt, 0) !=
14101 - DDI_SUCCESS) {
14102 - NDBG14(("mptsas: clear LED for slot %x "
14103 - "failed", ptgt->m_slot_num));
14104 - }
14105 - mutex_exit(&mpt->m_mutex);
14106 - }
14107 14122 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14108 14123 mdi_rtn = MDI_FAILURE;
14109 14124 }
14110 14125 virt_create_done:
14111 14126 if (*pip && mdi_rtn != MDI_SUCCESS) {
14112 14127 (void) mdi_pi_free(*pip, 0);
14113 14128 *pip = NULL;
14114 14129 *lun_dip = NULL;
14115 14130 }
14116 14131 }
14117 14132
14118 14133 scsi_hba_nodename_compatible_free(nodename, compatible);
14119 14134 if (lun_addr != NULL) {
14120 14135 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14121 14136 }
14122 14137 if (wwn_str != NULL) {
14123 14138 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14124 14139 }
14125 14140 if (component != NULL) {
14126 14141 kmem_free(component, MAXPATHLEN);
14127 14142 }
14128 14143
14129 14144 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14130 14145 }
14131 14146
14132 14147 static int
14133 14148 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14134 14149 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14135 14150 {
14136 14151 int target;
14137 14152 int rval;
14138 14153 int ndi_rtn = NDI_FAILURE;
14139 14154 uint64_t be_sas_wwn;
14140 14155 char *nodename = NULL;
14141 14156 char **compatible = NULL;
14142 14157 int ncompatible = 0;
14143 14158 int instance = 0;
14144 14159 mptsas_t *mpt = DIP2MPT(pdip);
14145 14160 char *wwn_str = NULL;
14146 14161 char *component = NULL;
14147 14162 char *attached_wwn_str = NULL;
14148 14163 uint8_t phy = 0xFF;
14149 14164 uint64_t sas_wwn;
14150 14165 uint32_t devinfo;
14151 14166 uint16_t dev_hdl;
14152 14167 uint16_t pdev_hdl;
14153 14168 uint64_t pdev_sas_wwn;
14154 14169 uint64_t dev_sas_wwn;
14155 14170 uint32_t pdev_info;
14156 14171 uint8_t physport;
14157 14172 uint8_t phy_id;
14158 14173 uint32_t page_address;
14159 14174 uint16_t bay_num, enclosure;
14160 14175 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14161 14176 uint32_t dev_info;
14162 14177 int64_t lun64 = 0;
14163 14178
14164 14179 mutex_enter(&mpt->m_mutex);
14165 14180 target = ptgt->m_devhdl;
14166 14181 sas_wwn = ptgt->m_sas_wwn;
14167 14182 devinfo = ptgt->m_deviceinfo;
14168 14183 phy = ptgt->m_phynum;
14169 14184 mutex_exit(&mpt->m_mutex);
14170 14185
14171 14186 /*
14172 14187 * generate compatible property with binding-set "mpt"
14173 14188 */
14174 14189 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14175 14190 &nodename, &compatible, &ncompatible);
14176 14191
14177 14192 /*
14178 14193 * if nodename can't be determined then print a message and skip it
14179 14194 */
14180 14195 if (nodename == NULL) {
14181 14196 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14182 14197 "for target %d lun %d", target, lun);
14183 14198 return (DDI_FAILURE);
14184 14199 }
14185 14200
14186 14201 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14187 14202 DEVI_SID_NODEID, lun_dip);
14188 14203
14189 14204 /*
14190 14205 * if lun alloc success, set props
14191 14206 */
14192 14207 if (ndi_rtn == NDI_SUCCESS) {
14193 14208
14194 14209 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14195 14210 *lun_dip, LUN_PROP, lun) !=
14196 14211 DDI_PROP_SUCCESS) {
14197 14212 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14198 14213 "property for target %d lun %d (LUN_PROP)",
14199 14214 target, lun);
14200 14215 ndi_rtn = NDI_FAILURE;
14201 14216 goto phys_create_done;
14202 14217 }
14203 14218
14204 14219 lun64 = (int64_t)lun;
14205 14220 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14206 14221 *lun_dip, LUN64_PROP, lun64) !=
14207 14222 DDI_PROP_SUCCESS) {
14208 14223 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14209 14224 "property for target %d lun64 %d (LUN64_PROP)",
14210 14225 target, lun);
14211 14226 ndi_rtn = NDI_FAILURE;
14212 14227 goto phys_create_done;
14213 14228 }
14214 14229 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14215 14230 *lun_dip, "compatible", compatible, ncompatible)
14216 14231 != DDI_PROP_SUCCESS) {
14217 14232 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14218 14233 "property for target %d lun %d (COMPATIBLE)",
14219 14234 target, lun);
14220 14235 ndi_rtn = NDI_FAILURE;
14221 14236 goto phys_create_done;
14222 14237 }
14223 14238
14224 14239 /*
14225 14240 * We need the SAS WWN for non-multipath devices, so
14226 14241 * we'll use the same property as that multipathing
14227 14242 * devices need to present for MPAPI. If we don't have
14228 14243 * a WWN (e.g. parallel SCSI), don't create the prop.
14229 14244 */
14230 14245 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14231 14246 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14232 14247 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14233 14248 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14234 14249 != DDI_PROP_SUCCESS) {
14235 14250 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14236 14251 "create property for SAS target %d lun %d "
14237 14252 "(target-port)", target, lun);
14238 14253 ndi_rtn = NDI_FAILURE;
14239 14254 goto phys_create_done;
14240 14255 }
14241 14256
14242 14257 be_sas_wwn = BE_64(sas_wwn);
14243 14258 if (sas_wwn && ndi_prop_update_byte_array(
14244 14259 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14245 14260 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14246 14261 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14247 14262 "create property for SAS target %d lun %d "
14248 14263 "(port-wwn)", target, lun);
14249 14264 ndi_rtn = NDI_FAILURE;
14250 14265 goto phys_create_done;
14251 14266 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14252 14267 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14253 14268 DDI_PROP_SUCCESS)) {
14254 14269 /*
14255 14270 * Direct attached SATA device without DeviceName
14256 14271 */
14257 14272 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14258 14273 "create property for SAS target %d lun %d "
14259 14274 "(sata-phy)", target, lun);
14260 14275 ndi_rtn = NDI_FAILURE;
14261 14276 goto phys_create_done;
14262 14277 }
14263 14278
14264 14279 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14265 14280 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14266 14281 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14267 14282 "create property for SAS target %d lun %d"
14268 14283 " (SAS_PROP)", target, lun);
14269 14284 ndi_rtn = NDI_FAILURE;
14270 14285 goto phys_create_done;
14271 14286 }
14272 14287 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14273 14288 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14274 14289 mptsas_log(mpt, CE_WARN, "mptsas unable "
14275 14290 "to create guid property for target %d "
14276 14291 "lun %d", target, lun);
14277 14292 ndi_rtn = NDI_FAILURE;
14278 14293 goto phys_create_done;
14279 14294 }
14280 14295
14281 14296 /*
14282 14297 * The following code is to set properties for SM-HBA support,
14283 14298 * it doesn't apply to RAID volumes
14284 14299 */
14285 14300 if (ptgt->m_phymask == 0)
14286 14301 goto phys_raid_lun;
14287 14302
14288 14303 mutex_enter(&mpt->m_mutex);
14289 14304
14290 14305 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14291 14306 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14292 14307 (uint32_t)ptgt->m_devhdl;
14293 14308 rval = mptsas_get_sas_device_page0(mpt, page_address,
14294 14309 &dev_hdl, &dev_sas_wwn, &dev_info,
14295 14310 &physport, &phy_id, &pdev_hdl,
14296 14311 &bay_num, &enclosure);
14297 14312 if (rval != DDI_SUCCESS) {
14298 14313 mutex_exit(&mpt->m_mutex);
14299 14314 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14300 14315 "parent device for handle %d.", page_address);
14301 14316 ndi_rtn = NDI_FAILURE;
14302 14317 goto phys_create_done;
14303 14318 }
14304 14319
14305 14320 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14306 14321 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14307 14322 rval = mptsas_get_sas_device_page0(mpt, page_address,
14308 14323 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14309 14324 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14310 14325 if (rval != DDI_SUCCESS) {
14311 14326 mutex_exit(&mpt->m_mutex);
14312 14327 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14313 14328 "device for handle %d.", page_address);
14314 14329 ndi_rtn = NDI_FAILURE;
14315 14330 goto phys_create_done;
14316 14331 }
14317 14332
14318 14333 mutex_exit(&mpt->m_mutex);
14319 14334
14320 14335 /*
14321 14336 * If this device direct attached to the controller
14322 14337 * set the attached-port to the base wwid
14323 14338 */
14324 14339 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14325 14340 != DEVINFO_DIRECT_ATTACHED) {
14326 14341 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14327 14342 pdev_sas_wwn);
14328 14343 } else {
14329 14344 /*
14330 14345 * Update the iport's attached-port to guid
14331 14346 */
14332 14347 if (sas_wwn == 0) {
14333 14348 (void) sprintf(wwn_str, "p%x", phy);
14334 14349 } else {
14335 14350 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14336 14351 }
14337 14352 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14338 14353 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14339 14354 DDI_PROP_SUCCESS) {
14340 14355 mptsas_log(mpt, CE_WARN,
14341 14356 "mptsas unable to create "
14342 14357 "property for iport target-port"
14343 14358 " %s (sas_wwn)",
14344 14359 wwn_str);
14345 14360 ndi_rtn = NDI_FAILURE;
14346 14361 goto phys_create_done;
14347 14362 }
14348 14363
14349 14364 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14350 14365 mpt->un.m_base_wwid);
14351 14366 }
14352 14367
14353 14368 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14354 14369 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14355 14370 DDI_PROP_SUCCESS) {
14356 14371 mptsas_log(mpt, CE_WARN,
14357 14372 "mptsas unable to create "
14358 14373 "property for iport attached-port %s (sas_wwn)",
14359 14374 attached_wwn_str);
14360 14375 ndi_rtn = NDI_FAILURE;
14361 14376 goto phys_create_done;
14362 14377 }
14363 14378
14364 14379 if (IS_SATA_DEVICE(dev_info)) {
14365 14380 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14366 14381 *lun_dip, MPTSAS_VARIANT, "sata") !=
14367 14382 DDI_PROP_SUCCESS) {
14368 14383 mptsas_log(mpt, CE_WARN,
14369 14384 "mptsas unable to create "
14370 14385 "property for device variant ");
14371 14386 ndi_rtn = NDI_FAILURE;
14372 14387 goto phys_create_done;
14373 14388 }
14374 14389 }
14375 14390
14376 14391 if (IS_ATAPI_DEVICE(dev_info)) {
14377 14392 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14378 14393 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14379 14394 DDI_PROP_SUCCESS) {
14380 14395 mptsas_log(mpt, CE_WARN,
14381 14396 "mptsas unable to create "
14382 14397 "property for device variant ");
14383 14398 ndi_rtn = NDI_FAILURE;
14384 14399 goto phys_create_done;
14385 14400 }
14386 14401 }
14387 14402
14388 14403 phys_raid_lun:
14389 14404 /*
14390 14405 * if this is a SAS controller, and the target is a SATA
14391 14406 * drive, set the 'pm-capable' property for sd and if on
14392 14407 * an OPL platform, also check if this is an ATAPI
14393 14408 * device.
14394 14409 */
14395 14410 instance = ddi_get_instance(mpt->m_dip);
14396 14411 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14397 14412 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14398 14413 NDBG2(("mptsas%d: creating pm-capable property, "
14399 14414 "target %d", instance, target));
14400 14415
14401 14416 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14402 14417 *lun_dip, "pm-capable", 1)) !=
14403 14418 DDI_PROP_SUCCESS) {
14404 14419 mptsas_log(mpt, CE_WARN, "mptsas "
14405 14420 "failed to create pm-capable "
14406 14421 "property, target %d", target);
14407 14422 ndi_rtn = NDI_FAILURE;
14408 14423 goto phys_create_done;
14409 14424 }
14410 14425
14411 14426 }
14412 14427
14413 14428 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14414 14429 /*
14415 14430 * add 'obp-path' properties for devinfo
14416 14431 */
14417 14432 bzero(wwn_str, sizeof (wwn_str));
14418 14433 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14419 14434 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14420 14435 if (guid) {
14421 14436 (void) snprintf(component, MAXPATHLEN,
14422 14437 "disk@w%s,%x", wwn_str, lun);
14423 14438 } else {
14424 14439 (void) snprintf(component, MAXPATHLEN,
14425 14440 "disk@p%x,%x", phy, lun);
14426 14441 }
14427 14442 if (ddi_pathname_obp_set(*lun_dip, component)
14428 14443 != DDI_SUCCESS) {
14429 14444 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14430 14445 "unable to set obp-path for SAS "
14431 14446 "object %s", component);
14432 14447 ndi_rtn = NDI_FAILURE;
14433 14448 goto phys_create_done;
14434 14449 }
14435 14450 }
14436 14451 /*
14437 14452 * Create the phy-num property for non-raid disk
14438 14453 */
14439 14454 if (ptgt->m_phymask != 0) {
14440 14455 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14441 14456 *lun_dip, "phy-num", ptgt->m_phynum) !=
14442 14457 DDI_PROP_SUCCESS) {
14443 14458 mptsas_log(mpt, CE_WARN, "mptsas driver "
14444 14459 "failed to create phy-num property for "
14445 14460 "target %d", target);
14446 14461 ndi_rtn = NDI_FAILURE;
14447 14462 goto phys_create_done;
14448 14463 }
14449 14464 }
|
↓ open down ↓ |
333 lines elided |
↑ open up ↑ |
14450 14465 phys_create_done:
14451 14466 /*
14452 14467 * If props were setup ok, online the lun
14453 14468 */
14454 14469 if (ndi_rtn == NDI_SUCCESS) {
14455 14470 /*
14456 14471 * Try to online the new node
14457 14472 */
14458 14473 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14459 14474 }
14460 - if (ndi_rtn == NDI_SUCCESS) {
14461 - mutex_enter(&mpt->m_mutex);
14462 - if (mptsas_set_led_status(mpt, ptgt, 0) !=
14463 - DDI_SUCCESS) {
14464 - NDBG14(("mptsas: clear LED for tgt %x "
14465 - "failed", ptgt->m_slot_num));
14466 - }
14467 - mutex_exit(&mpt->m_mutex);
14468 - }
14469 14475
14470 14476 /*
14471 14477 * If success set rtn flag, else unwire alloc'd lun
14472 14478 */
14473 14479 if (ndi_rtn != NDI_SUCCESS) {
14474 14480 NDBG12(("mptsas driver unable to online "
14475 14481 "target %d lun %d", target, lun));
14476 14482 ndi_prop_remove_all(*lun_dip);
14477 14483 (void) ndi_devi_free(*lun_dip);
14478 14484 *lun_dip = NULL;
14479 14485 }
14480 14486 }
14481 14487
14482 14488 scsi_hba_nodename_compatible_free(nodename, compatible);
14483 14489
14484 14490 if (wwn_str != NULL) {
14485 14491 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14486 14492 }
14487 14493 if (component != NULL) {
14488 14494 kmem_free(component, MAXPATHLEN);
14489 14495 }
14490 14496
14491 14497
14492 14498 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14493 14499 }
14494 14500
14495 14501 static int
14496 14502 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14497 14503 {
14498 14504 mptsas_t *mpt = DIP2MPT(pdip);
14499 14505 struct smp_device smp_sd;
14500 14506
14501 14507 /* XXX An HBA driver should not be allocating an smp_device. */
14502 14508 bzero(&smp_sd, sizeof (struct smp_device));
14503 14509 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14504 14510 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14505 14511
14506 14512 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14507 14513 return (NDI_FAILURE);
14508 14514 return (NDI_SUCCESS);
14509 14515 }
14510 14516
14511 14517 static int
14512 14518 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14513 14519 {
14514 14520 mptsas_t *mpt = DIP2MPT(pdip);
14515 14521 mptsas_smp_t *psmp = NULL;
14516 14522 int rval;
14517 14523 int phymask;
14518 14524
14519 14525 /*
14520 14526 * Get the physical port associated to the iport
14521 14527 * PHYMASK TODO
14522 14528 */
14523 14529 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14524 14530 "phymask", 0);
14525 14531 /*
14526 14532 * Find the smp node in hash table with specified sas address and
14527 14533 * physical port
14528 14534 */
14529 14535 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14530 14536 if (psmp == NULL) {
14531 14537 return (DDI_FAILURE);
14532 14538 }
14533 14539
14534 14540 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14535 14541
14536 14542 return (rval);
14537 14543 }
14538 14544
14539 14545 static int
14540 14546 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14541 14547 dev_info_t **smp_dip)
14542 14548 {
14543 14549 char wwn_str[MPTSAS_WWN_STRLEN];
14544 14550 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14545 14551 int ndi_rtn = NDI_FAILURE;
14546 14552 int rval = 0;
14547 14553 mptsas_smp_t dev_info;
14548 14554 uint32_t page_address;
14549 14555 mptsas_t *mpt = DIP2MPT(pdip);
14550 14556 uint16_t dev_hdl;
14551 14557 uint64_t sas_wwn;
14552 14558 uint64_t smp_sas_wwn;
14553 14559 uint8_t physport;
14554 14560 uint8_t phy_id;
14555 14561 uint16_t pdev_hdl;
14556 14562 uint8_t numphys = 0;
14557 14563 uint16_t i = 0;
14558 14564 char phymask[MPTSAS_MAX_PHYS];
14559 14565 char *iport = NULL;
14560 14566 mptsas_phymask_t phy_mask = 0;
14561 14567 uint16_t attached_devhdl;
14562 14568 uint16_t bay_num, enclosure;
14563 14569
14564 14570 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14565 14571
14566 14572 /*
14567 14573 * Probe smp device, prevent the node of removed device from being
14568 14574 * configured succesfully
14569 14575 */
14570 14576 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
14571 14577 return (DDI_FAILURE);
14572 14578 }
14573 14579
14574 14580 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14575 14581 return (DDI_SUCCESS);
14576 14582 }
14577 14583
14578 14584 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14579 14585
14580 14586 /*
14581 14587 * if lun alloc success, set props
14582 14588 */
14583 14589 if (ndi_rtn == NDI_SUCCESS) {
14584 14590 /*
14585 14591 * Set the flavor of the child to be SMP flavored
14586 14592 */
14587 14593 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14588 14594
14589 14595 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14590 14596 *smp_dip, SMP_WWN, wwn_str) !=
14591 14597 DDI_PROP_SUCCESS) {
14592 14598 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14593 14599 "property for smp device %s (sas_wwn)",
14594 14600 wwn_str);
14595 14601 ndi_rtn = NDI_FAILURE;
14596 14602 goto smp_create_done;
14597 14603 }
14598 14604 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
14599 14605 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14600 14606 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14601 14607 DDI_PROP_SUCCESS) {
14602 14608 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14603 14609 "property for iport target-port %s (sas_wwn)",
14604 14610 wwn_str);
14605 14611 ndi_rtn = NDI_FAILURE;
14606 14612 goto smp_create_done;
14607 14613 }
14608 14614
14609 14615 mutex_enter(&mpt->m_mutex);
14610 14616
14611 14617 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14612 14618 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14613 14619 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14614 14620 &dev_info);
14615 14621 if (rval != DDI_SUCCESS) {
14616 14622 mutex_exit(&mpt->m_mutex);
14617 14623 mptsas_log(mpt, CE_WARN,
14618 14624 "mptsas unable to get expander "
14619 14625 "parent device info for %x", page_address);
14620 14626 ndi_rtn = NDI_FAILURE;
14621 14627 goto smp_create_done;
14622 14628 }
14623 14629
14624 14630 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14625 14631 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14626 14632 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14627 14633 (uint32_t)dev_info.m_pdevhdl;
14628 14634 rval = mptsas_get_sas_device_page0(mpt, page_address,
14629 14635 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14630 14636 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14631 14637 if (rval != DDI_SUCCESS) {
14632 14638 mutex_exit(&mpt->m_mutex);
14633 14639 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14634 14640 "device info for %x", page_address);
14635 14641 ndi_rtn = NDI_FAILURE;
14636 14642 goto smp_create_done;
14637 14643 }
14638 14644
14639 14645 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14640 14646 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14641 14647 (uint32_t)dev_info.m_devhdl;
14642 14648 rval = mptsas_get_sas_device_page0(mpt, page_address,
14643 14649 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14644 14650 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14645 14651 if (rval != DDI_SUCCESS) {
14646 14652 mutex_exit(&mpt->m_mutex);
14647 14653 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14648 14654 "device info for %x", page_address);
14649 14655 ndi_rtn = NDI_FAILURE;
14650 14656 goto smp_create_done;
14651 14657 }
14652 14658 mutex_exit(&mpt->m_mutex);
14653 14659
14654 14660 /*
14655 14661 * If this smp direct attached to the controller
14656 14662 * set the attached-port to the base wwid
14657 14663 */
14658 14664 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14659 14665 != DEVINFO_DIRECT_ATTACHED) {
14660 14666 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14661 14667 sas_wwn);
14662 14668 } else {
14663 14669 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14664 14670 mpt->un.m_base_wwid);
14665 14671 }
14666 14672
14667 14673 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14668 14674 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14669 14675 DDI_PROP_SUCCESS) {
14670 14676 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14671 14677 "property for smp attached-port %s (sas_wwn)",
14672 14678 attached_wwn_str);
14673 14679 ndi_rtn = NDI_FAILURE;
14674 14680 goto smp_create_done;
14675 14681 }
14676 14682
14677 14683 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14678 14684 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14679 14685 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14680 14686 "create property for SMP %s (SMP_PROP) ",
14681 14687 wwn_str);
14682 14688 ndi_rtn = NDI_FAILURE;
14683 14689 goto smp_create_done;
14684 14690 }
14685 14691
14686 14692 /*
14687 14693 * check the smp to see whether it direct
14688 14694 * attached to the controller
14689 14695 */
14690 14696 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14691 14697 != DEVINFO_DIRECT_ATTACHED) {
14692 14698 goto smp_create_done;
14693 14699 }
14694 14700 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14695 14701 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14696 14702 if (numphys > 0) {
14697 14703 goto smp_create_done;
14698 14704 }
14699 14705 /*
14700 14706 * this iport is an old iport, we need to
14701 14707 * reconfig the props for it.
14702 14708 */
14703 14709 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14704 14710 MPTSAS_VIRTUAL_PORT, 0) !=
14705 14711 DDI_PROP_SUCCESS) {
14706 14712 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14707 14713 MPTSAS_VIRTUAL_PORT);
14708 14714 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14709 14715 "prop update failed");
14710 14716 goto smp_create_done;
14711 14717 }
14712 14718
14713 14719 mutex_enter(&mpt->m_mutex);
14714 14720 numphys = 0;
14715 14721 iport = ddi_get_name_addr(pdip);
14716 14722 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14717 14723 bzero(phymask, sizeof (phymask));
14718 14724 (void) sprintf(phymask,
14719 14725 "%x", mpt->m_phy_info[i].phy_mask);
14720 14726 if (strcmp(phymask, iport) == 0) {
14721 14727 phy_mask = mpt->m_phy_info[i].phy_mask;
14722 14728 break;
14723 14729 }
14724 14730 }
14725 14731
14726 14732 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14727 14733 if ((phy_mask >> i) & 0x01) {
14728 14734 numphys++;
14729 14735 }
14730 14736 }
14731 14737 /*
14732 14738 * Update PHY info for smhba
14733 14739 */
14734 14740 if (mptsas_smhba_phy_init(mpt)) {
14735 14741 mutex_exit(&mpt->m_mutex);
14736 14742 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14737 14743 "failed");
14738 14744 goto smp_create_done;
14739 14745 }
14740 14746 mutex_exit(&mpt->m_mutex);
14741 14747
14742 14748 mptsas_smhba_set_phy_props(mpt, iport, pdip,
14743 14749 numphys, &attached_devhdl);
14744 14750
14745 14751 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14746 14752 MPTSAS_NUM_PHYS, numphys) !=
14747 14753 DDI_PROP_SUCCESS) {
14748 14754 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14749 14755 MPTSAS_NUM_PHYS);
14750 14756 mptsas_log(mpt, CE_WARN, "mptsas update "
14751 14757 "num phys props failed");
14752 14758 goto smp_create_done;
14753 14759 }
14754 14760 /*
14755 14761 * Add parent's props for SMHBA support
14756 14762 */
14757 14763 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14758 14764 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14759 14765 DDI_PROP_SUCCESS) {
14760 14766 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14761 14767 SCSI_ADDR_PROP_ATTACHED_PORT);
14762 14768 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14763 14769 "attached-port failed");
14764 14770 goto smp_create_done;
14765 14771 }
14766 14772
14767 14773 smp_create_done:
14768 14774 /*
14769 14775 * If props were setup ok, online the lun
14770 14776 */
14771 14777 if (ndi_rtn == NDI_SUCCESS) {
14772 14778 /*
14773 14779 * Try to online the new node
14774 14780 */
14775 14781 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14776 14782 }
14777 14783
14778 14784 /*
14779 14785 * If success set rtn flag, else unwire alloc'd lun
14780 14786 */
14781 14787 if (ndi_rtn != NDI_SUCCESS) {
14782 14788 NDBG12(("mptsas unable to online "
14783 14789 "SMP target %s", wwn_str));
14784 14790 ndi_prop_remove_all(*smp_dip);
14785 14791 (void) ndi_devi_free(*smp_dip);
14786 14792 }
14787 14793 }
14788 14794
14789 14795 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14790 14796 }
14791 14797
14792 14798 /* smp transport routine */
14793 14799 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14794 14800 {
14795 14801 uint64_t wwn;
14796 14802 Mpi2SmpPassthroughRequest_t req;
14797 14803 Mpi2SmpPassthroughReply_t rep;
14798 14804 uint32_t direction = 0;
14799 14805 mptsas_t *mpt;
14800 14806 int ret;
14801 14807 uint64_t tmp64;
14802 14808
14803 14809 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14804 14810 smp_a_hba_tran->smp_tran_hba_private;
14805 14811
14806 14812 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14807 14813 /*
14808 14814 * Need to compose a SMP request message
14809 14815 * and call mptsas_do_passthru() function
14810 14816 */
14811 14817 bzero(&req, sizeof (req));
14812 14818 bzero(&rep, sizeof (rep));
14813 14819 req.PassthroughFlags = 0;
14814 14820 req.PhysicalPort = 0xff;
14815 14821 req.ChainOffset = 0;
14816 14822 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14817 14823
14818 14824 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14819 14825 smp_pkt->smp_pkt_reason = ERANGE;
14820 14826 return (DDI_FAILURE);
14821 14827 }
14822 14828 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14823 14829
14824 14830 req.MsgFlags = 0;
14825 14831 tmp64 = LE_64(wwn);
14826 14832 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14827 14833 if (smp_pkt->smp_pkt_rspsize > 0) {
14828 14834 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14829 14835 }
14830 14836 if (smp_pkt->smp_pkt_reqsize > 0) {
14831 14837 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
14832 14838 }
14833 14839
14834 14840 mutex_enter(&mpt->m_mutex);
14835 14841 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
14836 14842 (uint8_t *)smp_pkt->smp_pkt_rsp,
14837 14843 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
14838 14844 smp_pkt->smp_pkt_rspsize - 4, direction,
14839 14845 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
14840 14846 smp_pkt->smp_pkt_timeout, FKIOCTL);
14841 14847 mutex_exit(&mpt->m_mutex);
14842 14848 if (ret != 0) {
14843 14849 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
14844 14850 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
14845 14851 return (DDI_FAILURE);
14846 14852 }
14847 14853 /* do passthrough success, check the smp status */
14848 14854 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14849 14855 switch (LE_16(rep.IOCStatus)) {
14850 14856 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
14851 14857 smp_pkt->smp_pkt_reason = ENODEV;
14852 14858 break;
14853 14859 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
14854 14860 smp_pkt->smp_pkt_reason = EOVERFLOW;
14855 14861 break;
14856 14862 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
14857 14863 smp_pkt->smp_pkt_reason = EIO;
14858 14864 break;
14859 14865 default:
14860 14866 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
14861 14867 "status:%x", LE_16(rep.IOCStatus));
14862 14868 smp_pkt->smp_pkt_reason = EIO;
14863 14869 break;
14864 14870 }
14865 14871 return (DDI_FAILURE);
14866 14872 }
14867 14873 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
14868 14874 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
14869 14875 rep.SASStatus);
14870 14876 smp_pkt->smp_pkt_reason = EIO;
14871 14877 return (DDI_FAILURE);
14872 14878 }
14873 14879
14874 14880 return (DDI_SUCCESS);
14875 14881 }
14876 14882
14877 14883 /*
14878 14884 * If we didn't get a match, we need to get sas page0 for each device, and
14879 14885 * untill we get a match. If failed, return NULL
14880 14886 */
14881 14887 static mptsas_target_t *
14882 14888 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
14883 14889 {
14884 14890 int i, j = 0;
14885 14891 int rval = 0;
14886 14892 uint16_t cur_handle;
14887 14893 uint32_t page_address;
14888 14894 mptsas_target_t *ptgt = NULL;
14889 14895
14890 14896 /*
14891 14897 * PHY named device must be direct attached and attaches to
14892 14898 * narrow port, if the iport is not parent of the device which
14893 14899 * we are looking for.
14894 14900 */
14895 14901 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14896 14902 if ((1 << i) & phymask)
14897 14903 j++;
14898 14904 }
14899 14905
14900 14906 if (j > 1)
14901 14907 return (NULL);
14902 14908
14903 14909 /*
14904 14910 * Must be a narrow port and single device attached to the narrow port
14905 14911 * So the physical port num of device which is equal to the iport's
14906 14912 * port num is the device what we are looking for.
14907 14913 */
14908 14914
14909 14915 if (mpt->m_phy_info[phy].phy_mask != phymask)
14910 14916 return (NULL);
14911 14917
14912 14918 mutex_enter(&mpt->m_mutex);
14913 14919
14914 14920 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14915 14921 MPTSAS_HASH_FIRST);
14916 14922 while (ptgt != NULL) {
14917 14923 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14918 14924 mutex_exit(&mpt->m_mutex);
14919 14925 return (ptgt);
14920 14926 }
14921 14927
14922 14928 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14923 14929 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14924 14930 }
14925 14931
14926 14932 if (mpt->m_done_traverse_dev) {
14927 14933 mutex_exit(&mpt->m_mutex);
14928 14934 return (NULL);
14929 14935 }
14930 14936
14931 14937 /* If didn't get a match, come here */
14932 14938 cur_handle = mpt->m_dev_handle;
14933 14939 for (; ; ) {
14934 14940 ptgt = NULL;
14935 14941 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14936 14942 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
14937 14943 rval = mptsas_get_target_device_info(mpt, page_address,
14938 14944 &cur_handle, &ptgt);
14939 14945 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14940 14946 (rval == DEV_INFO_FAIL_ALLOC)) {
14941 14947 break;
14942 14948 }
14943 14949 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
14944 14950 (rval == DEV_INFO_PHYS_DISK)) {
14945 14951 continue;
14946 14952 }
14947 14953 mpt->m_dev_handle = cur_handle;
14948 14954
14949 14955 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14950 14956 break;
14951 14957 }
14952 14958 }
14953 14959
14954 14960 mutex_exit(&mpt->m_mutex);
14955 14961 return (ptgt);
14956 14962 }
14957 14963
14958 14964 /*
14959 14965 * The ptgt->m_sas_wwn contains the wwid for each disk.
14960 14966 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
14961 14967 * If we didn't get a match, we need to get sas page0 for each device, and
14962 14968 * untill we get a match
14963 14969 * If failed, return NULL
14964 14970 */
14965 14971 static mptsas_target_t *
14966 14972 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
14967 14973 {
14968 14974 int rval = 0;
14969 14975 uint16_t cur_handle;
14970 14976 uint32_t page_address;
14971 14977 mptsas_target_t *tmp_tgt = NULL;
14972 14978
14973 14979 mutex_enter(&mpt->m_mutex);
14974 14980 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14975 14981 &mpt->m_active->m_tgttbl, wwid, phymask);
14976 14982 if (tmp_tgt != NULL) {
14977 14983 mutex_exit(&mpt->m_mutex);
14978 14984 return (tmp_tgt);
14979 14985 }
14980 14986
14981 14987 if (phymask == 0) {
14982 14988 /*
14983 14989 * It's IR volume
14984 14990 */
14985 14991 rval = mptsas_get_raid_info(mpt);
14986 14992 if (rval) {
14987 14993 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14988 14994 &mpt->m_active->m_tgttbl, wwid, phymask);
14989 14995 }
14990 14996 mutex_exit(&mpt->m_mutex);
14991 14997 return (tmp_tgt);
14992 14998 }
14993 14999
14994 15000 if (mpt->m_done_traverse_dev) {
14995 15001 mutex_exit(&mpt->m_mutex);
14996 15002 return (NULL);
14997 15003 }
14998 15004
14999 15005 /* If didn't get a match, come here */
15000 15006 cur_handle = mpt->m_dev_handle;
15001 15007 for (; ; ) {
15002 15008 tmp_tgt = NULL;
15003 15009 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15004 15010 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15005 15011 rval = mptsas_get_target_device_info(mpt, page_address,
15006 15012 &cur_handle, &tmp_tgt);
15007 15013 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15008 15014 (rval == DEV_INFO_FAIL_ALLOC)) {
15009 15015 tmp_tgt = NULL;
15010 15016 break;
15011 15017 }
15012 15018 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15013 15019 (rval == DEV_INFO_PHYS_DISK)) {
15014 15020 continue;
15015 15021 }
15016 15022 mpt->m_dev_handle = cur_handle;
15017 15023 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15018 15024 (tmp_tgt->m_phymask == phymask)) {
15019 15025 break;
15020 15026 }
15021 15027 }
15022 15028
15023 15029 mutex_exit(&mpt->m_mutex);
15024 15030 return (tmp_tgt);
15025 15031 }
15026 15032
15027 15033 static mptsas_smp_t *
15028 15034 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15029 15035 {
15030 15036 int rval = 0;
15031 15037 uint16_t cur_handle;
15032 15038 uint32_t page_address;
15033 15039 mptsas_smp_t smp_node, *psmp = NULL;
15034 15040
15035 15041 mutex_enter(&mpt->m_mutex);
15036 15042 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15037 15043 wwid, phymask);
15038 15044 if (psmp != NULL) {
15039 15045 mutex_exit(&mpt->m_mutex);
15040 15046 return (psmp);
15041 15047 }
15042 15048
15043 15049 if (mpt->m_done_traverse_smp) {
15044 15050 mutex_exit(&mpt->m_mutex);
15045 15051 return (NULL);
15046 15052 }
15047 15053
15048 15054 /* If didn't get a match, come here */
15049 15055 cur_handle = mpt->m_smp_devhdl;
15050 15056 for (; ; ) {
15051 15057 psmp = NULL;
15052 15058 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15053 15059 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15054 15060 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15055 15061 &smp_node);
15056 15062 if (rval != DDI_SUCCESS) {
15057 15063 break;
15058 15064 }
15059 15065 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15060 15066 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15061 15067 ASSERT(psmp);
15062 15068 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15063 15069 (psmp->m_phymask == phymask)) {
15064 15070 break;
15065 15071 }
15066 15072 }
15067 15073
15068 15074 mutex_exit(&mpt->m_mutex);
15069 15075 return (psmp);
15070 15076 }
15071 15077
15072 15078 /* helper functions using hash */
15073 15079
15074 15080 /*
15075 15081 * Can't have duplicate entries for same devhdl,
15076 15082 * if there are invalid entries, the devhdl should be set to 0xffff
15077 15083 */
15078 15084 static void *
15079 15085 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15080 15086 {
15081 15087 mptsas_hash_data_t *data;
15082 15088
15083 15089 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15084 15090 while (data != NULL) {
15085 15091 if (data->devhdl == devhdl) {
15086 15092 break;
15087 15093 }
15088 15094 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15089 15095 }
15090 15096 return (data);
15091 15097 }
15092 15098
15093 15099 mptsas_target_t *
15094 15100 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15095 15101 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15096 15102 {
15097 15103 mptsas_target_t *tmp_tgt = NULL;
15098 15104
15099 15105 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15100 15106 if (tmp_tgt != NULL) {
15101 15107 NDBG20(("Hash item already exist"));
15102 15108 tmp_tgt->m_deviceinfo = devinfo;
15103 15109 tmp_tgt->m_devhdl = devhdl;
15104 15110 return (tmp_tgt);
15105 15111 }
15106 15112 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15107 15113 if (tmp_tgt == NULL) {
15108 15114 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15109 15115 return (NULL);
15110 15116 }
15111 15117 tmp_tgt->m_devhdl = devhdl;
15112 15118 tmp_tgt->m_sas_wwn = wwid;
15113 15119 tmp_tgt->m_deviceinfo = devinfo;
15114 15120 tmp_tgt->m_phymask = phymask;
15115 15121 tmp_tgt->m_phynum = phynum;
15116 15122 /* Initialized the tgt structure */
15117 15123 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15118 15124 tmp_tgt->m_qfull_retry_interval =
15119 15125 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15120 15126 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15121 15127
15122 15128 mptsas_hash_add(hashtab, tmp_tgt);
15123 15129
15124 15130 return (tmp_tgt);
15125 15131 }
15126 15132
15127 15133 static void
15128 15134 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15129 15135 mptsas_phymask_t phymask)
15130 15136 {
15131 15137 mptsas_target_t *tmp_tgt;
15132 15138 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15133 15139 if (tmp_tgt == NULL) {
15134 15140 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15135 15141 } else {
15136 15142 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15137 15143 }
15138 15144 }
15139 15145
15140 15146 /*
15141 15147 * Return the entry in the hash table
15142 15148 */
15143 15149 static mptsas_smp_t *
15144 15150 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15145 15151 {
15146 15152 uint64_t key1 = data->m_sasaddr;
15147 15153 mptsas_phymask_t key2 = data->m_phymask;
15148 15154 mptsas_smp_t *ret_data;
15149 15155
15150 15156 ret_data = mptsas_hash_search(hashtab, key1, key2);
15151 15157 if (ret_data != NULL) {
15152 15158 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15153 15159 return (ret_data);
15154 15160 }
15155 15161
15156 15162 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15157 15163 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15158 15164 mptsas_hash_add(hashtab, ret_data);
15159 15165 return (ret_data);
15160 15166 }
15161 15167
15162 15168 static void
15163 15169 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15164 15170 mptsas_phymask_t phymask)
15165 15171 {
15166 15172 mptsas_smp_t *tmp_smp;
15167 15173 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15168 15174 if (tmp_smp == NULL) {
15169 15175 cmn_err(CE_WARN, "Smp element not found, nothing to free");
15170 15176 } else {
15171 15177 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15172 15178 }
15173 15179 }
15174 15180
15175 15181 /*
15176 15182 * Hash operation functions
15177 15183 * key1 is the sas_wwn, key2 is the phymask
15178 15184 */
15179 15185 static void
15180 15186 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15181 15187 {
15182 15188 if (hashtab == NULL) {
15183 15189 return;
15184 15190 }
15185 15191 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15186 15192 MPTSAS_HASH_ARRAY_SIZE);
15187 15193 hashtab->cur = NULL;
15188 15194 hashtab->line = 0;
15189 15195 }
15190 15196
15191 15197 static void
15192 15198 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15193 15199 {
15194 15200 uint16_t line = 0;
15195 15201 mptsas_hash_node_t *cur = NULL, *last = NULL;
15196 15202
15197 15203 if (hashtab == NULL) {
15198 15204 return;
15199 15205 }
15200 15206 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15201 15207 cur = hashtab->head[line];
15202 15208 while (cur != NULL) {
15203 15209 last = cur;
15204 15210 cur = cur->next;
15205 15211 kmem_free(last->data, datalen);
15206 15212 kmem_free(last, sizeof (mptsas_hash_node_t));
15207 15213 }
15208 15214 }
15209 15215 }
15210 15216
15211 15217 /*
15212 15218 * You must guarantee the element doesn't exist in the hash table
15213 15219 * before you call mptsas_hash_add()
15214 15220 */
15215 15221 static void
15216 15222 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15217 15223 {
15218 15224 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15219 15225 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15220 15226 mptsas_hash_node_t **head = NULL;
15221 15227 mptsas_hash_node_t *node = NULL;
15222 15228
15223 15229 if (hashtab == NULL) {
15224 15230 return;
15225 15231 }
15226 15232 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15227 15233 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15228 15234 node->data = data;
15229 15235
15230 15236 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15231 15237 if (*head == NULL) {
15232 15238 *head = node;
15233 15239 } else {
15234 15240 node->next = *head;
15235 15241 *head = node;
15236 15242 }
15237 15243 }
15238 15244
15239 15245 static void *
15240 15246 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15241 15247 mptsas_phymask_t key2)
15242 15248 {
15243 15249 mptsas_hash_node_t **head = NULL;
15244 15250 mptsas_hash_node_t *last = NULL, *cur = NULL;
15245 15251 mptsas_hash_data_t *data;
15246 15252 if (hashtab == NULL) {
15247 15253 return (NULL);
15248 15254 }
15249 15255 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15250 15256 cur = *head;
15251 15257 while (cur != NULL) {
15252 15258 data = cur->data;
15253 15259 if ((data->key1 == key1) && (data->key2 == key2)) {
15254 15260 if (last == NULL) {
15255 15261 (*head) = cur->next;
15256 15262 } else {
15257 15263 last->next = cur->next;
15258 15264 }
15259 15265 kmem_free(cur, sizeof (mptsas_hash_node_t));
15260 15266 return (data);
15261 15267 } else {
15262 15268 last = cur;
15263 15269 cur = cur->next;
15264 15270 }
15265 15271 }
15266 15272 return (NULL);
15267 15273 }
15268 15274
15269 15275 static void *
15270 15276 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15271 15277 mptsas_phymask_t key2)
15272 15278 {
15273 15279 mptsas_hash_node_t *cur = NULL;
15274 15280 mptsas_hash_data_t *data;
15275 15281 if (hashtab == NULL) {
15276 15282 return (NULL);
15277 15283 }
15278 15284 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15279 15285 while (cur != NULL) {
15280 15286 data = cur->data;
15281 15287 if ((data->key1 == key1) && (data->key2 == key2)) {
15282 15288 return (data);
15283 15289 } else {
15284 15290 cur = cur->next;
15285 15291 }
15286 15292 }
15287 15293 return (NULL);
15288 15294 }
15289 15295
15290 15296 static void *
15291 15297 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15292 15298 {
15293 15299 mptsas_hash_node_t *this = NULL;
15294 15300
15295 15301 if (hashtab == NULL) {
15296 15302 return (NULL);
15297 15303 }
15298 15304
15299 15305 if (pos == MPTSAS_HASH_FIRST) {
15300 15306 hashtab->line = 0;
15301 15307 hashtab->cur = NULL;
15302 15308 this = hashtab->head[0];
15303 15309 } else {
15304 15310 if (hashtab->cur == NULL) {
15305 15311 return (NULL);
15306 15312 } else {
15307 15313 this = hashtab->cur->next;
15308 15314 }
15309 15315 }
15310 15316
15311 15317 while (this == NULL) {
15312 15318 hashtab->line++;
15313 15319 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
15314 15320 /* the traverse reaches the end */
15315 15321 hashtab->cur = NULL;
15316 15322 return (NULL);
15317 15323 } else {
15318 15324 this = hashtab->head[hashtab->line];
15319 15325 }
15320 15326 }
15321 15327 hashtab->cur = this;
15322 15328 return (this->data);
15323 15329 }
15324 15330
15325 15331 /*
15326 15332 * Functions for SGPIO LED support
15327 15333 */
15328 15334 static dev_info_t *
15329 15335 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15330 15336 {
15331 15337 dev_info_t *dip;
15332 15338 int prop;
15333 15339 dip = e_ddi_hold_devi_by_dev(dev, 0);
15334 15340 if (dip == NULL)
15335 15341 return (dip);
15336 15342 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15337 15343 "phymask", 0);
15338 15344 *phymask = (mptsas_phymask_t)prop;
15339 15345 ddi_release_devi(dip);
15340 15346 return (dip);
15341 15347 }
15342 15348 static mptsas_target_t *
15343 15349 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15344 15350 {
15345 15351 uint8_t phynum;
15346 15352 uint64_t wwn;
15347 15353 int lun;
15348 15354 mptsas_target_t *ptgt = NULL;
15349 15355
15350 15356 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
|
↓ open down ↓ |
872 lines elided |
↑ open up ↑ |
15351 15357 return (NULL);
15352 15358 }
15353 15359 if (addr[0] == 'w') {
15354 15360 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15355 15361 } else {
15356 15362 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15357 15363 }
15358 15364 return (ptgt);
15359 15365 }
15360 15366
15361 -#ifdef MPTSAS_GET_LED
15362 -static int
15363 -mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
15364 - uint32_t *slotstatus)
15365 -{
15366 - return (mptsas_send_sep(mpt, ptgt, slotstatus,
15367 - MPI2_SEP_REQ_ACTION_READ_STATUS));
15368 -}
15369 -#endif
15370 -static int
15371 -mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
15372 -{
15373 - NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
15374 - slotstatus, ptgt->m_slot_num));
15375 - return (mptsas_send_sep(mpt, ptgt, &slotstatus,
15376 - MPI2_SEP_REQ_ACTION_WRITE_STATUS));
15377 -}
15378 -/*
15379 - * send sep request, use enclosure/slot addressing
15380 - */
15381 -static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
15382 - uint32_t *status, uint8_t act)
15383 -{
15384 - Mpi2SepRequest_t req;
15385 - Mpi2SepReply_t rep;
15386 - int ret;
15387 -
15388 - ASSERT(mutex_owned(&mpt->m_mutex));
15389 -
15390 - bzero(&req, sizeof (req));
15391 - bzero(&rep, sizeof (rep));
15392 -
15393 - /* Do nothing for RAID volumes */
15394 - if (ptgt->m_phymask == 0) {
15395 - NDBG14(("mptsas_send_sep: Skip RAID volumes"));
15396 - return (DDI_FAILURE);
15397 - }
15398 -
15399 - req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
15400 - req.Action = act;
15401 - req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
15402 - req.EnclosureHandle = LE_16(ptgt->m_enclosure);
15403 - req.Slot = LE_16(ptgt->m_slot_num);
15404 - if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15405 - req.SlotStatus = LE_32(*status);
15406 - }
15407 - ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
15408 - sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
15409 - if (ret != 0) {
15410 - mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
15411 - "Processor Request message error %d", ret);
15412 - return (DDI_FAILURE);
15413 - }
15414 - /* do passthrough success, check the ioc status */
15415 - if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15416 - if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
15417 - MPI2_IOCSTATUS_INVALID_FIELD) {
15418 - mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
15419 - "supported action, loginfo %x", act,
15420 - LE_32(rep.IOCLogInfo));
15421 - return (DDI_FAILURE);
15422 - }
15423 - mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
15424 - "status:%x", act, LE_16(rep.IOCStatus));
15425 - return (DDI_FAILURE);
15426 - }
15427 - if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15428 - *status = LE_32(rep.SlotStatus);
15429 - }
15430 -
15431 - return (DDI_SUCCESS);
15432 -}
15433 -
15434 15367 int
15435 15368 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15436 15369 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15437 15370 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15438 15371 {
15439 15372 ddi_dma_cookie_t new_cookie;
15440 15373 size_t alloc_len;
15441 15374 uint_t ncookie;
15442 15375
15443 15376 if (cookiep == NULL)
15444 15377 cookiep = &new_cookie;
15445 15378
15446 15379 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15447 15380 NULL, dma_hdp) != DDI_SUCCESS) {
15448 15381 dma_hdp = NULL;
15449 15382 return (FALSE);
15450 15383 }
15451 15384
15452 15385 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15453 15386 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15454 15387 acc_hdp) != DDI_SUCCESS) {
15455 15388 ddi_dma_free_handle(dma_hdp);
15456 15389 dma_hdp = NULL;
15457 15390 return (FALSE);
15458 15391 }
15459 15392
15460 15393 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15461 15394 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15462 15395 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15463 15396 (void) ddi_dma_mem_free(acc_hdp);
15464 15397 ddi_dma_free_handle(dma_hdp);
15465 15398 dma_hdp = NULL;
15466 15399 return (FALSE);
15467 15400 }
15468 15401
15469 15402 return (TRUE);
15470 15403 }
15471 15404
15472 15405 void
15473 15406 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15474 15407 {
15475 15408 if (*dma_hdp == NULL)
15476 15409 return;
15477 15410
15478 15411 (void) ddi_dma_unbind_handle(*dma_hdp);
15479 15412 (void) ddi_dma_mem_free(acc_hdp);
15480 15413 ddi_dma_free_handle(dma_hdp);
15481 15414 dma_hdp = NULL;
15482 15415 }
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX