1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 * Copyright (c) 2000 to 2010, LSI Corporation.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms of all code within
32 * this file that is exclusively owned by LSI, with or without
33 * modification, is permitted provided that, in addition to the CDDL 1.0
34 * License requirements, the following conditions are met:
35 *
36 * Neither the name of the author nor the names of its contributors may be
37 * used to endorse or promote products derived from this software without
38 * specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
52 */
53
54 /*
55 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
56 *
57 */
58
59 #if defined(lint) || defined(DEBUG)
60 #define MPTSAS_DEBUG
61 #endif
62
63 /*
64 * standard header files.
65 */
66 #include <sys/note.h>
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/file.h>
70 #include <sys/policy.h>
71 #include <sys/sysevent.h>
72 #include <sys/sysevent/eventdefs.h>
73 #include <sys/sysevent/dr.h>
74 #include <sys/sata/sata_defs.h>
75 #include <sys/scsi/generic/sas.h>
76 #include <sys/scsi/impl/scsi_sas.h>
77
78 #pragma pack(1)
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
87 #pragma pack()
88
89 /*
90 * private header files.
91 *
92 */
93 #include <sys/scsi/impl/scsi_reset_notify.h>
94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
97 #include <sys/raidioctl.h>
98
99 #include <sys/fs/dv_node.h> /* devfs_clean */
100
101 /*
102 * FMA header files
103 */
104 #include <sys/ddifm.h>
105 #include <sys/fm/protocol.h>
106 #include <sys/fm/util.h>
107 #include <sys/fm/io/ddi.h>
108
109 /*
110 * autoconfiguration data and routines.
111 */
112 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
113 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
114 static int mptsas_power(dev_info_t *dip, int component, int level);
115
116 /*
117 * cb_ops function
118 */
119 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
120 cred_t *credp, int *rval);
121 #ifdef __sparc
122 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
123 #else /* __sparc */
124 static int mptsas_quiesce(dev_info_t *devi);
125 #endif /* __sparc */
126
127 /*
128 * Resource initilaization for hardware
129 */
130 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
131 static void mptsas_disable_bus_master(mptsas_t *mpt);
132 static void mptsas_hba_fini(mptsas_t *mpt);
133 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
134 static int mptsas_hba_setup(mptsas_t *mpt);
135 static void mptsas_hba_teardown(mptsas_t *mpt);
136 static int mptsas_config_space_init(mptsas_t *mpt);
137 static void mptsas_config_space_fini(mptsas_t *mpt);
138 static void mptsas_iport_register(mptsas_t *mpt);
139 static int mptsas_smp_setup(mptsas_t *mpt);
140 static void mptsas_smp_teardown(mptsas_t *mpt);
141 static int mptsas_cache_create(mptsas_t *mpt);
142 static void mptsas_cache_destroy(mptsas_t *mpt);
143 static int mptsas_alloc_request_frames(mptsas_t *mpt);
144 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
145 static int mptsas_alloc_free_queue(mptsas_t *mpt);
146 static int mptsas_alloc_post_queue(mptsas_t *mpt);
147 static void mptsas_alloc_reply_args(mptsas_t *mpt);
148 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
149 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
150 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
151
152 /*
153 * SCSA function prototypes
154 */
155 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
156 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
157 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
158 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
159 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
160 int tgtonly);
161 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
162 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
163 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
164 int tgtlen, int flags, int (*callback)(), caddr_t arg);
165 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
166 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
167 struct scsi_pkt *pkt);
168 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
169 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
170 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
172 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
173 void (*callback)(caddr_t), caddr_t arg);
174 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
175 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
176 static int mptsas_scsi_quiesce(dev_info_t *dip);
177 static int mptsas_scsi_unquiesce(dev_info_t *dip);
178 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
179 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
180
181 /*
182 * SMP functions
183 */
184 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
185
186 /*
187 * internal function prototypes.
188 */
189 static void mptsas_list_add(mptsas_t *mpt);
190 static void mptsas_list_del(mptsas_t *mpt);
191
192 static int mptsas_quiesce_bus(mptsas_t *mpt);
193 static int mptsas_unquiesce_bus(mptsas_t *mpt);
194
195 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
196 static void mptsas_free_handshake_msg(mptsas_t *mpt);
197
198 static void mptsas_ncmds_checkdrain(void *arg);
199
200 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
201 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
202 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
203 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
204
205 static int mptsas_do_detach(dev_info_t *dev);
206 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
207 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
208 struct scsi_pkt *pkt);
209 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
210
211 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
212 static void mptsas_handle_event(void *args);
213 static int mptsas_handle_event_sync(void *args);
214 static void mptsas_handle_dr(void *args);
215 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
216 dev_info_t *pdip);
217
218 static void mptsas_restart_cmd(void *);
219
220 static void mptsas_flush_hba(mptsas_t *mpt);
221 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
222 uint8_t tasktype);
223 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
224 uchar_t reason, uint_t stat);
225
226 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
227 static void mptsas_process_intr(mptsas_t *mpt,
228 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
229 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
230 pMpi2ReplyDescriptorsUnion_t reply_desc);
231 static void mptsas_handle_address_reply(mptsas_t *mpt,
232 pMpi2ReplyDescriptorsUnion_t reply_desc);
233 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
234 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
235 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
236
237 static void mptsas_watch(void *arg);
238 static void mptsas_watchsubr(mptsas_t *mpt);
239 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
240 static void mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt);
241
242 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
243 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
244 uint8_t *data, uint32_t request_size, uint32_t reply_size,
245 uint32_t data_size, uint32_t direction, uint8_t *dataout,
246 uint32_t dataout_size, short timeout, int mode);
247 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
248
249 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
250 uint32_t unique_id);
251 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
252 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
253 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
254 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
255 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
256 uint32_t diag_type);
257 static int mptsas_diag_register(mptsas_t *mpt,
258 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
259 static int mptsas_diag_unregister(mptsas_t *mpt,
260 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
261 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
262 uint32_t *return_code);
263 static int mptsas_diag_read_buffer(mptsas_t *mpt,
264 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
265 uint32_t *return_code, int ioctl_mode);
266 static int mptsas_diag_release(mptsas_t *mpt,
267 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
268 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
269 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
270 int ioctl_mode);
271 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
272 int mode);
273
274 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
275 int cmdlen, int tgtlen, int statuslen, int kf);
276 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
277
278 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
279 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
280
281 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
282 int kmflags);
283 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
284
285 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
286 mptsas_cmd_t *cmd);
287 static void mptsas_check_task_mgt(mptsas_t *mpt,
288 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
289 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
290 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
291 int *resid);
292
293 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
294 static void mptsas_free_active_slots(mptsas_t *mpt);
295 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
296
297 static void mptsas_restart_hba(mptsas_t *mpt);
298 static void mptsas_restart_waitq(mptsas_t *mpt);
299
300 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
301 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
302 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
303
304 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
305 static void mptsas_doneq_empty(mptsas_t *mpt);
306 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
307
308 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
309 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
310 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
311 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
312
313
314 static void mptsas_start_watch_reset_delay();
315 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
316 static void mptsas_watch_reset_delay(void *arg);
317 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
318
319 /*
320 * helper functions
321 */
322 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
323
324 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
325 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
326 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
327 int lun);
328 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
329 int lun);
330 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
331 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
332
333 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
334 int *lun);
335 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
336
337 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
338 uint8_t phy);
339 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
340 uint64_t wwid);
341 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
342 uint64_t wwid);
343
344 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
345 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
346
347 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
348 uint16_t *handle, mptsas_target_t **pptgt);
349 static void mptsas_update_phymask(mptsas_t *mpt);
350
351 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
352 mptsas_phymask_t *phymask);
353 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
354 mptsas_phymask_t phymask);
355
356
357 /*
358 * Enumeration / DR functions
359 */
360 static void mptsas_config_all(dev_info_t *pdip);
361 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
362 dev_info_t **lundip);
363 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
364 dev_info_t **lundip);
365
366 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
367 static int mptsas_offline_target(dev_info_t *pdip, char *name);
368
369 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
370 dev_info_t **dip);
371
372 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
373 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
374 dev_info_t **dip, mptsas_target_t *ptgt);
375
376 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
377 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
378
379 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
380 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
381 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
382 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
383 int lun);
384
385 static void mptsas_offline_missed_luns(dev_info_t *pdip,
386 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
387 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
388 mdi_pathinfo_t *rpip, uint_t flags);
389
390 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
391 dev_info_t **smp_dip);
392 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
393 uint_t flags);
394
395 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
396 int mode, int *rval);
397 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
398 int mode, int *rval);
399 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
400 int mode, int *rval);
401 static void mptsas_record_event(void *args);
402 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
403 int mode);
404
405 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
406 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
407 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
408 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
409 mptsas_phymask_t key2);
410 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
411 mptsas_phymask_t key2);
412 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
413
414 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
415 uint32_t, mptsas_phymask_t, uint8_t);
416 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
417 mptsas_smp_t *data);
418 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
419 mptsas_phymask_t phymask);
420 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
421 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
422 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
423 dev_info_t **smp_dip);
424
425 /*
426 * Power management functions
427 */
428 static int mptsas_get_pci_cap(mptsas_t *mpt);
429 static int mptsas_init_pm(mptsas_t *mpt);
430
431 /*
432 * MPT MSI tunable:
433 *
434 * By default MSI is enabled on all supported platforms.
435 */
436 boolean_t mptsas_enable_msi = B_TRUE;
437 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
438
439 static int mptsas_register_intrs(mptsas_t *);
440 static void mptsas_unregister_intrs(mptsas_t *);
441 static int mptsas_add_intrs(mptsas_t *, int);
442 static void mptsas_rem_intrs(mptsas_t *);
443
444 /*
445 * FMA Prototypes
446 */
447 static void mptsas_fm_init(mptsas_t *mpt);
448 static void mptsas_fm_fini(mptsas_t *mpt);
449 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
450
451 extern pri_t minclsyspri, maxclsyspri;
452
453 /*
454 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
455 * under this device that the paths to a physical device are created when
456 * MPxIO is used.
457 */
458 extern dev_info_t *scsi_vhci_dip;
459
460 /*
461 * Tunable timeout value for Inquiry VPD page 0x83
462 * By default the value is 30 seconds.
463 */
464 int mptsas_inq83_retry_timeout = 30;
465 /*
466 * Maximum number of command timeouts (0 - 255) considered acceptable.
467 */
468 int mptsas_timeout_threshold = 2;
469 /*
470 * Timeouts exceeding threshold within this period are considered excessive.
471 */
472 int mptsas_timeout_interval = 30;
473
474 /*
475 * This is used to allocate memory for message frame storage, not for
476 * data I/O DMA. All message frames must be stored in the first 4G of
477 * physical memory.
478 */
479 ddi_dma_attr_t mptsas_dma_attrs = {
480 DMA_ATTR_V0, /* attribute layout version */
481 0x0ull, /* address low - should be 0 (longlong) */
482 0xffffffffull, /* address high - 32-bit max range */
483 0x00ffffffull, /* count max - max DMA object size */
484 4, /* allocation alignment requirements */
485 0x78, /* burstsizes - binary encoded values */
486 1, /* minxfer - gran. of DMA engine */
487 0x00ffffffull, /* maxxfer - gran. of DMA engine */
488 0xffffffffull, /* max segment size (DMA boundary) */
489 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
490 512, /* granularity - device transfer size */
491 0 /* flags, set to 0 */
492 };
493
494 /*
495 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
496 * physical addresses are supported.)
497 */
498 ddi_dma_attr_t mptsas_dma_attrs64 = {
499 DMA_ATTR_V0, /* attribute layout version */
500 0x0ull, /* address low - should be 0 (longlong) */
501 0xffffffffffffffffull, /* address high - 64-bit max */
502 0x00ffffffull, /* count max - max DMA object size */
503 4, /* allocation alignment requirements */
504 0x78, /* burstsizes - binary encoded values */
505 1, /* minxfer - gran. of DMA engine */
506 0x00ffffffull, /* maxxfer - gran. of DMA engine */
507 0xffffffffull, /* max segment size (DMA boundary) */
508 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
509 512, /* granularity - device transfer size */
510 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
511 };
512
513 ddi_device_acc_attr_t mptsas_dev_attr = {
514 DDI_DEVICE_ATTR_V1,
515 DDI_STRUCTURE_LE_ACC,
516 DDI_STRICTORDER_ACC,
517 DDI_DEFAULT_ACC
518 };
519
520 static struct cb_ops mptsas_cb_ops = {
521 scsi_hba_open, /* open */
522 scsi_hba_close, /* close */
523 nodev, /* strategy */
524 nodev, /* print */
525 nodev, /* dump */
526 nodev, /* read */
527 nodev, /* write */
528 mptsas_ioctl, /* ioctl */
529 nodev, /* devmap */
530 nodev, /* mmap */
531 nodev, /* segmap */
532 nochpoll, /* chpoll */
533 ddi_prop_op, /* cb_prop_op */
534 NULL, /* streamtab */
535 D_MP, /* cb_flag */
536 CB_REV, /* rev */
537 nodev, /* aread */
538 nodev /* awrite */
539 };
540
541 static struct dev_ops mptsas_ops = {
542 DEVO_REV, /* devo_rev, */
543 0, /* refcnt */
544 ddi_no_info, /* info */
545 nulldev, /* identify */
546 nulldev, /* probe */
547 mptsas_attach, /* attach */
548 mptsas_detach, /* detach */
549 #ifdef __sparc
550 mptsas_reset,
551 #else
552 nodev, /* reset */
553 #endif /* __sparc */
554 &mptsas_cb_ops, /* driver operations */
555 NULL, /* bus operations */
556 mptsas_power, /* power management */
557 #ifdef __sparc
558 ddi_quiesce_not_needed
559 #else
560 mptsas_quiesce /* quiesce */
561 #endif /* __sparc */
562 };
563
564
565 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
566
567 static struct modldrv modldrv = {
568 &mod_driverops, /* Type of module. This one is a driver */
569 MPTSAS_MOD_STRING, /* Name of the module. */
570 &mptsas_ops, /* driver ops */
571 };
572
573 static struct modlinkage modlinkage = {
574 MODREV_1, &modldrv, NULL
575 };
576 #define TARGET_PROP "target"
577 #define LUN_PROP "lun"
578 #define LUN64_PROP "lun64"
579 #define SAS_PROP "sas-mpt"
580 #define MDI_GUID "wwn"
581 #define NDI_GUID "guid"
582 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
583
584 /*
585 * Local static data
586 */
587 #if defined(MPTSAS_DEBUG)
588 uint32_t mptsas_debug_flags = 0;
589 #endif /* defined(MPTSAS_DEBUG) */
590 uint32_t mptsas_debug_resets = 0;
591
592 static kmutex_t mptsas_global_mutex;
593 static void *mptsas_state; /* soft state ptr */
594 static krwlock_t mptsas_global_rwlock;
595
596 static kmutex_t mptsas_log_mutex;
597 static char mptsas_log_buf[256];
598 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
599
600 static mptsas_t *mptsas_head, *mptsas_tail;
601 static clock_t mptsas_scsi_watchdog_tick;
602 static clock_t mptsas_tick;
603 static timeout_id_t mptsas_reset_watch;
604 static timeout_id_t mptsas_timeout_id;
605 static int mptsas_timeouts_enabled = 0;
606 /*
607 * warlock directives
608 */
609 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
610 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
611 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
612 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
613 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
614 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
615
616 /*
617 * SM - HBA statics
618 */
619 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
620
621 #ifdef MPTSAS_DEBUG
622 void debug_enter(char *);
623 #endif
624
625 /*
626 * Notes:
627 * - scsi_hba_init(9F) initializes SCSI HBA modules
628 * - must call scsi_hba_fini(9F) if modload() fails
629 */
630 int
631 _init(void)
632 {
633 int status;
634 /* CONSTCOND */
635 ASSERT(NO_COMPETING_THREADS);
636
637 NDBG0(("_init"));
638
639 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
640 MPTSAS_INITIAL_SOFT_SPACE);
641 if (status != 0) {
642 return (status);
643 }
644
645 if ((status = scsi_hba_init(&modlinkage)) != 0) {
646 ddi_soft_state_fini(&mptsas_state);
647 return (status);
648 }
649
650 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
651 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
652 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
653
654 if ((status = mod_install(&modlinkage)) != 0) {
655 mutex_destroy(&mptsas_log_mutex);
656 rw_destroy(&mptsas_global_rwlock);
657 mutex_destroy(&mptsas_global_mutex);
658 ddi_soft_state_fini(&mptsas_state);
659 scsi_hba_fini(&modlinkage);
660 }
661
662 return (status);
663 }
664
665 /*
666 * Notes:
667 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
668 */
669 int
670 _fini(void)
671 {
672 int status;
673 /* CONSTCOND */
674 ASSERT(NO_COMPETING_THREADS);
675
676 NDBG0(("_fini"));
677
678 if ((status = mod_remove(&modlinkage)) == 0) {
679 ddi_soft_state_fini(&mptsas_state);
680 scsi_hba_fini(&modlinkage);
681 mutex_destroy(&mptsas_global_mutex);
682 rw_destroy(&mptsas_global_rwlock);
683 mutex_destroy(&mptsas_log_mutex);
684 }
685 return (status);
686 }
687
688 /*
689 * The loadable-module _info(9E) entry point
690 */
691 int
692 _info(struct modinfo *modinfop)
693 {
694 /* CONSTCOND */
695 ASSERT(NO_COMPETING_THREADS);
696 NDBG0(("mptsas _info"));
697
698 return (mod_info(&modlinkage, modinfop));
699 }
700
701
702 static int
703 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
704 {
705 dev_info_t *pdip;
706 mptsas_t *mpt;
707 scsi_hba_tran_t *hba_tran;
708 char *iport = NULL;
709 char phymask[MPTSAS_MAX_PHYS];
710 mptsas_phymask_t phy_mask = 0;
711 int dynamic_port = 0;
712 uint32_t page_address;
713 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
714 int rval = DDI_FAILURE;
715 int i = 0;
716 uint8_t numphys = 0;
717 uint8_t phy_id;
718 uint8_t phy_port = 0;
719 uint16_t attached_devhdl = 0;
720 uint32_t dev_info;
721 uint64_t attached_sas_wwn;
722 uint16_t dev_hdl;
723 uint16_t pdev_hdl;
724 uint16_t bay_num, enclosure;
725 char attached_wwnstr[MPTSAS_WWN_STRLEN];
726
727 /* CONSTCOND */
728 ASSERT(NO_COMPETING_THREADS);
729
730 switch (cmd) {
731 case DDI_ATTACH:
732 break;
733
734 case DDI_RESUME:
735 /*
736 * If this a scsi-iport node, nothing to do here.
737 */
738 return (DDI_SUCCESS);
739
740 default:
741 return (DDI_FAILURE);
742 }
743
744 pdip = ddi_get_parent(dip);
745
746 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
747 NULL) {
748 cmn_err(CE_WARN, "Failed attach iport because fail to "
749 "get tran vector for the HBA node");
750 return (DDI_FAILURE);
751 }
752
753 mpt = TRAN2MPT(hba_tran);
754 ASSERT(mpt != NULL);
755 if (mpt == NULL)
756 return (DDI_FAILURE);
757
758 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
759 NULL) {
760 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
761 "get tran vector for the iport node");
762 return (DDI_FAILURE);
763 }
764
765 /*
766 * Overwrite parent's tran_hba_private to iport's tran vector
767 */
768 hba_tran->tran_hba_private = mpt;
769
770 ddi_report_dev(dip);
771
772 /*
773 * Get SAS address for initiator port according dev_handle
774 */
775 iport = ddi_get_name_addr(dip);
776 if (iport && strncmp(iport, "v0", 2) == 0) {
777 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
778 MPTSAS_VIRTUAL_PORT, 1) !=
779 DDI_PROP_SUCCESS) {
780 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
781 MPTSAS_VIRTUAL_PORT);
782 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
783 "prop update failed");
784 return (DDI_FAILURE);
785 }
786 return (DDI_SUCCESS);
787 }
788
789 mutex_enter(&mpt->m_mutex);
790 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
791 bzero(phymask, sizeof (phymask));
792 (void) sprintf(phymask,
793 "%x", mpt->m_phy_info[i].phy_mask);
794 if (strcmp(phymask, iport) == 0) {
795 break;
796 }
797 }
798
799 if (i == MPTSAS_MAX_PHYS) {
800 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
801 "seems not exist", iport);
802 mutex_exit(&mpt->m_mutex);
803 return (DDI_FAILURE);
804 }
805
806 phy_mask = mpt->m_phy_info[i].phy_mask;
807
808 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
809 dynamic_port = 1;
810 else
811 dynamic_port = 0;
812
813 /*
814 * Update PHY info for smhba
815 */
816 if (mptsas_smhba_phy_init(mpt)) {
817 mutex_exit(&mpt->m_mutex);
818 mptsas_log(mpt, CE_WARN, "mptsas phy update "
819 "failed");
820 return (DDI_FAILURE);
821 }
822
823 mutex_exit(&mpt->m_mutex);
824
825 numphys = 0;
826 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
827 if ((phy_mask >> i) & 0x01) {
828 numphys++;
829 }
830 }
831
832 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
833 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
834 mpt->un.m_base_wwid);
835
836 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
837 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
838 DDI_PROP_SUCCESS) {
839 (void) ddi_prop_remove(DDI_DEV_T_NONE,
840 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
841 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
842 "prop update failed");
843 return (DDI_FAILURE);
844 }
845 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
846 MPTSAS_NUM_PHYS, numphys) !=
847 DDI_PROP_SUCCESS) {
848 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
849 return (DDI_FAILURE);
850 }
851
852 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
853 "phymask", phy_mask) !=
854 DDI_PROP_SUCCESS) {
855 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
856 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
857 "prop update failed");
858 return (DDI_FAILURE);
859 }
860
861 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
862 "dynamic-port", dynamic_port) !=
863 DDI_PROP_SUCCESS) {
864 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
865 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
866 "prop update failed");
867 return (DDI_FAILURE);
868 }
869 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
870 MPTSAS_VIRTUAL_PORT, 0) !=
871 DDI_PROP_SUCCESS) {
872 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
873 MPTSAS_VIRTUAL_PORT);
874 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
875 "prop update failed");
876 return (DDI_FAILURE);
877 }
878 mptsas_smhba_set_phy_props(mpt,
879 iport, dip, numphys, &attached_devhdl);
880
881 mutex_enter(&mpt->m_mutex);
882 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
883 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
884 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
885 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
886 &pdev_hdl, &bay_num, &enclosure);
887 if (rval != DDI_SUCCESS) {
888 mptsas_log(mpt, CE_WARN,
889 "Failed to get device page0 for handle:%d",
890 attached_devhdl);
891 mutex_exit(&mpt->m_mutex);
892 return (DDI_FAILURE);
893 }
894
895 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
896 bzero(phymask, sizeof (phymask));
897 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
898 if (strcmp(phymask, iport) == 0) {
899 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
900 "%x",
901 mpt->m_phy_info[i].phy_mask);
902 }
903 }
904 mutex_exit(&mpt->m_mutex);
905
906 bzero(attached_wwnstr, sizeof (attached_wwnstr));
907 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
908 attached_sas_wwn);
909 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
910 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
911 DDI_PROP_SUCCESS) {
912 (void) ddi_prop_remove(DDI_DEV_T_NONE,
913 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
914 return (DDI_FAILURE);
915 }
916
917 /* Create kstats for each phy on this iport */
918
919 mptsas_create_phy_stats(mpt, iport, dip);
920
921 /*
922 * register sas hba iport with mdi (MPxIO/vhci)
923 */
924 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
925 dip, 0) == MDI_SUCCESS) {
926 mpt->m_mpxio_enable = TRUE;
927 }
928 return (DDI_SUCCESS);
929 }
930
931 /*
932 * Notes:
933 * Set up all device state and allocate data structures,
934 * mutexes, condition variables, etc. for device operation.
935 * Add interrupts needed.
936 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
937 */
938 static int
939 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
940 {
941 mptsas_t *mpt = NULL;
942 int instance, i, j;
943 int doneq_thread_num;
944 char intr_added = 0;
945 char map_setup = 0;
946 char config_setup = 0;
947 char hba_attach_setup = 0;
948 char smp_attach_setup = 0;
949 char mutex_init_done = 0;
950 char event_taskq_create = 0;
951 char dr_taskq_create = 0;
952 char doneq_thread_create = 0;
953 scsi_hba_tran_t *hba_tran;
954 uint_t mem_bar = MEM_SPACE;
955 int rval = DDI_FAILURE;
956
957 /* CONSTCOND */
958 ASSERT(NO_COMPETING_THREADS);
959
960 if (scsi_hba_iport_unit_address(dip)) {
961 return (mptsas_iport_attach(dip, cmd));
962 }
963
964 switch (cmd) {
965 case DDI_ATTACH:
966 break;
967
968 case DDI_RESUME:
969 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
970 return (DDI_FAILURE);
971
972 mpt = TRAN2MPT(hba_tran);
973
974 if (!mpt) {
975 return (DDI_FAILURE);
976 }
977
978 /*
979 * Reset hardware and softc to "no outstanding commands"
980 * Note that a check condition can result on first command
981 * to a target.
982 */
983 mutex_enter(&mpt->m_mutex);
984
985 /*
986 * raise power.
987 */
988 if (mpt->m_options & MPTSAS_OPT_PM) {
989 mutex_exit(&mpt->m_mutex);
990 (void) pm_busy_component(dip, 0);
991 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
992 if (rval == DDI_SUCCESS) {
993 mutex_enter(&mpt->m_mutex);
994 } else {
995 /*
996 * The pm_raise_power() call above failed,
997 * and that can only occur if we were unable
998 * to reset the hardware. This is probably
999 * due to unhealty hardware, and because
1000 * important filesystems(such as the root
1001 * filesystem) could be on the attached disks,
1002 * it would not be a good idea to continue,
1003 * as we won't be entirely certain we are
1004 * writing correct data. So we panic() here
1005 * to not only prevent possible data corruption,
1006 * but to give developers or end users a hope
1007 * of identifying and correcting any problems.
1008 */
1009 fm_panic("mptsas could not reset hardware "
1010 "during resume");
1011 }
1012 }
1013
1014 mpt->m_suspended = 0;
1015
1016 /*
1017 * Reinitialize ioc
1018 */
1019 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1020 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1021 mutex_exit(&mpt->m_mutex);
1022 if (mpt->m_options & MPTSAS_OPT_PM) {
1023 (void) pm_idle_component(dip, 0);
1024 }
1025 fm_panic("mptsas init chip fail during resume");
1026 }
1027 /*
1028 * mptsas_update_driver_data needs interrupts so enable them
1029 * first.
1030 */
1031 MPTSAS_ENABLE_INTR(mpt);
1032 mptsas_update_driver_data(mpt);
1033
1034 /* start requests, if possible */
1035 mptsas_restart_hba(mpt);
1036
1037 mutex_exit(&mpt->m_mutex);
1038
1039 /*
1040 * Restart watch thread
1041 */
1042 mutex_enter(&mptsas_global_mutex);
1043 if (mptsas_timeout_id == 0) {
1044 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1045 mptsas_tick);
1046 mptsas_timeouts_enabled = 1;
1047 }
1048 mutex_exit(&mptsas_global_mutex);
1049
1050 /* report idle status to pm framework */
1051 if (mpt->m_options & MPTSAS_OPT_PM) {
1052 (void) pm_idle_component(dip, 0);
1053 }
1054
1055 return (DDI_SUCCESS);
1056
1057 default:
1058 return (DDI_FAILURE);
1059
1060 }
1061
1062 instance = ddi_get_instance(dip);
1063
1064 /*
1065 * Allocate softc information.
1066 */
1067 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1068 mptsas_log(NULL, CE_WARN,
1069 "mptsas%d: cannot allocate soft state", instance);
1070 goto fail;
1071 }
1072
1073 mpt = ddi_get_soft_state(mptsas_state, instance);
1074
1075 if (mpt == NULL) {
1076 mptsas_log(NULL, CE_WARN,
1077 "mptsas%d: cannot get soft state", instance);
1078 goto fail;
1079 }
1080
1081 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1082 scsi_size_clean(dip);
1083
1084 mpt->m_dip = dip;
1085 mpt->m_instance = instance;
1086
1087 /* Make a per-instance copy of the structures */
1088 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1089 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1090 mpt->m_reg_acc_attr = mptsas_dev_attr;
1091 mpt->m_dev_acc_attr = mptsas_dev_attr;
1092
1093 /*
1094 * Initialize FMA
1095 */
1096 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1097 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1098 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1099 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1100
1101 mptsas_fm_init(mpt);
1102
1103 if (mptsas_alloc_handshake_msg(mpt,
1104 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1105 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1106 goto fail;
1107 }
1108
1109 /*
1110 * Setup configuration space
1111 */
1112 if (mptsas_config_space_init(mpt) == FALSE) {
1113 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1114 goto fail;
1115 }
1116 config_setup++;
1117
1118 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1119 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1120 mptsas_log(mpt, CE_WARN, "map setup failed");
1121 goto fail;
1122 }
1123 map_setup++;
1124
1125 /*
1126 * A taskq is created for dealing with the event handler
1127 */
1128 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1129 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1130 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1131 goto fail;
1132 }
1133 event_taskq_create++;
1134
1135 /*
1136 * A taskq is created for dealing with dr events
1137 */
1138 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1139 "mptsas_dr_taskq",
1140 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1141 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1142 "failed");
1143 goto fail;
1144 }
1145 dr_taskq_create++;
1146
1147 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1148 0, "mptsas_doneq_thread_threshold_prop", 10);
1149 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1150 0, "mptsas_doneq_length_threshold_prop", 8);
1151 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1152 0, "mptsas_doneq_thread_n_prop", 8);
1153
1154 if (mpt->m_doneq_thread_n) {
1155 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1156 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1157
1158 mutex_enter(&mpt->m_doneq_mutex);
1159 mpt->m_doneq_thread_id =
1160 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1161 * mpt->m_doneq_thread_n, KM_SLEEP);
1162
1163 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1164 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1165 CV_DRIVER, NULL);
1166 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1167 MUTEX_DRIVER, NULL);
1168 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1169 mpt->m_doneq_thread_id[j].flag |=
1170 MPTSAS_DONEQ_THREAD_ACTIVE;
1171 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1172 mpt->m_doneq_thread_id[j].arg.t = j;
1173 mpt->m_doneq_thread_id[j].threadp =
1174 thread_create(NULL, 0, mptsas_doneq_thread,
1175 &mpt->m_doneq_thread_id[j].arg,
1176 0, &p0, TS_RUN, minclsyspri);
1177 mpt->m_doneq_thread_id[j].donetail =
1178 &mpt->m_doneq_thread_id[j].doneq;
1179 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1180 }
1181 mutex_exit(&mpt->m_doneq_mutex);
1182 doneq_thread_create++;
1183 }
1184
1185 /* Initialize mutex used in interrupt handler */
1186 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1187 DDI_INTR_PRI(mpt->m_intr_pri));
1188 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1189 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1190 DDI_INTR_PRI(mpt->m_intr_pri));
1191 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1192 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1193 NULL, MUTEX_DRIVER,
1194 DDI_INTR_PRI(mpt->m_intr_pri));
1195 }
1196
1197 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1198 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1199 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1200 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1201 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1202 mutex_init_done++;
1203
1204 /*
1205 * Disable hardware interrupt since we're not ready to
1206 * handle it yet.
1207 */
1208 MPTSAS_DISABLE_INTR(mpt);
1209 if (mptsas_register_intrs(mpt) == FALSE)
1210 goto fail;
1211 intr_added++;
1212
1213 mutex_enter(&mpt->m_mutex);
1214 /*
1215 * Initialize power management component
1216 */
1217 if (mpt->m_options & MPTSAS_OPT_PM) {
1218 if (mptsas_init_pm(mpt)) {
1219 mutex_exit(&mpt->m_mutex);
1220 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1221 "failed");
1222 goto fail;
1223 }
1224 }
1225
1226 /*
1227 * Initialize chip using Message Unit Reset, if allowed
1228 */
1229 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1230 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1231 mutex_exit(&mpt->m_mutex);
1232 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1233 goto fail;
1234 }
1235
1236 /*
1237 * Fill in the phy_info structure and get the base WWID
1238 */
1239 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1240 mptsas_log(mpt, CE_WARN,
1241 "mptsas_get_manufacture_page5 failed!");
1242 goto fail;
1243 }
1244
1245 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1246 mptsas_log(mpt, CE_WARN,
1247 "mptsas_get_sas_io_unit_page_hndshk failed!");
1248 goto fail;
1249 }
1250
1251 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1252 mptsas_log(mpt, CE_WARN,
1253 "mptsas_get_manufacture_page0 failed!");
1254 goto fail;
1255 }
1256
1257 mutex_exit(&mpt->m_mutex);
1258
1259 /*
1260 * Register the iport for multiple port HBA
1261 */
1262 mptsas_iport_register(mpt);
1263
1264 /*
1265 * initialize SCSI HBA transport structure
1266 */
1267 if (mptsas_hba_setup(mpt) == FALSE)
1268 goto fail;
1269 hba_attach_setup++;
1270
1271 if (mptsas_smp_setup(mpt) == FALSE)
1272 goto fail;
1273 smp_attach_setup++;
1274
1275 if (mptsas_cache_create(mpt) == FALSE)
1276 goto fail;
1277
1278 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1279 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1280 if (mpt->m_scsi_reset_delay == 0) {
1281 mptsas_log(mpt, CE_NOTE,
1282 "scsi_reset_delay of 0 is not recommended,"
1283 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1284 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1285 }
1286
1287 /*
1288 * Initialize the wait and done FIFO queue
1289 */
1290 mpt->m_donetail = &mpt->m_doneq;
1291 mpt->m_waitqtail = &mpt->m_waitq;
1292 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1293 mpt->m_tx_draining = 0;
1294
1295 /*
1296 * ioc cmd queue initialize
1297 */
1298 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1299 mpt->m_dev_handle = 0xFFFF;
1300
1301 MPTSAS_ENABLE_INTR(mpt);
1302
1303 /*
1304 * enable event notification
1305 */
1306 mutex_enter(&mpt->m_mutex);
1307 if (mptsas_ioc_enable_event_notification(mpt)) {
1308 mutex_exit(&mpt->m_mutex);
1309 goto fail;
1310 }
1311 mutex_exit(&mpt->m_mutex);
1312
1313 /*
1314 * Initialize PHY info for smhba
1315 */
1316 if (mptsas_smhba_setup(mpt)) {
1317 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1318 "failed");
1319 goto fail;
1320 }
1321
1322 /* Check all dma handles allocated in attach */
1323 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1324 != DDI_SUCCESS) ||
1325 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1326 != DDI_SUCCESS) ||
1327 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1328 != DDI_SUCCESS) ||
1329 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1330 != DDI_SUCCESS) ||
1331 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1332 != DDI_SUCCESS)) {
1333 goto fail;
1334 }
1335
1336 /* Check all acc handles allocated in attach */
1337 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1338 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1339 != DDI_SUCCESS) ||
1340 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1341 != DDI_SUCCESS) ||
1342 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1343 != DDI_SUCCESS) ||
1344 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1345 != DDI_SUCCESS) ||
1346 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1347 != DDI_SUCCESS) ||
1348 (mptsas_check_acc_handle(mpt->m_config_handle)
1349 != DDI_SUCCESS)) {
1350 goto fail;
1351 }
1352
1353 /*
1354 * After this point, we are not going to fail the attach.
1355 */
1356 /*
1357 * used for mptsas_watch
1358 */
1359 mptsas_list_add(mpt);
1360
1361 mutex_enter(&mptsas_global_mutex);
1362 if (mptsas_timeouts_enabled == 0) {
1363 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1364 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1365
1366 mptsas_tick = mptsas_scsi_watchdog_tick *
1367 drv_usectohz((clock_t)1000000);
1368
1369 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1370 mptsas_timeouts_enabled = 1;
1371 }
1372 mutex_exit(&mptsas_global_mutex);
1373
1374 /* Print message of HBA present */
1375 ddi_report_dev(dip);
1376
1377 /* report idle status to pm framework */
1378 if (mpt->m_options & MPTSAS_OPT_PM) {
1379 (void) pm_idle_component(dip, 0);
1380 }
1381
1382 return (DDI_SUCCESS);
1383
1384 fail:
1385 mptsas_log(mpt, CE_WARN, "attach failed");
1386 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1387 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1388 if (mpt) {
1389 mutex_enter(&mptsas_global_mutex);
1390
1391 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1392 timeout_id_t tid = mptsas_timeout_id;
1393 mptsas_timeouts_enabled = 0;
1394 mptsas_timeout_id = 0;
1395 mutex_exit(&mptsas_global_mutex);
1396 (void) untimeout(tid);
1397 mutex_enter(&mptsas_global_mutex);
1398 }
1399 mutex_exit(&mptsas_global_mutex);
1400 /* deallocate in reverse order */
1401 mptsas_cache_destroy(mpt);
1402
1403 if (smp_attach_setup) {
1404 mptsas_smp_teardown(mpt);
1405 }
1406 if (hba_attach_setup) {
1407 mptsas_hba_teardown(mpt);
1408 }
1409
1410 if (mpt->m_active) {
1411 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1412 sizeof (mptsas_smp_t));
1413 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1414 sizeof (mptsas_target_t));
1415 mptsas_free_active_slots(mpt);
1416 }
1417 if (intr_added) {
1418 mptsas_unregister_intrs(mpt);
1419 }
1420
1421 if (doneq_thread_create) {
1422 mutex_enter(&mpt->m_doneq_mutex);
1423 doneq_thread_num = mpt->m_doneq_thread_n;
1424 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1425 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1426 mpt->m_doneq_thread_id[j].flag &=
1427 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1428 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1429 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1430 }
1431 while (mpt->m_doneq_thread_n) {
1432 cv_wait(&mpt->m_doneq_thread_cv,
1433 &mpt->m_doneq_mutex);
1434 }
1435 for (j = 0; j < doneq_thread_num; j++) {
1436 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1437 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1438 }
1439 kmem_free(mpt->m_doneq_thread_id,
1440 sizeof (mptsas_doneq_thread_list_t)
1441 * doneq_thread_num);
1442 mutex_exit(&mpt->m_doneq_mutex);
1443 cv_destroy(&mpt->m_doneq_thread_cv);
1444 mutex_destroy(&mpt->m_doneq_mutex);
1445 }
1446 if (event_taskq_create) {
1447 ddi_taskq_destroy(mpt->m_event_taskq);
1448 }
1449 if (dr_taskq_create) {
1450 ddi_taskq_destroy(mpt->m_dr_taskq);
1451 }
1452 if (mutex_init_done) {
1453 mutex_destroy(&mpt->m_tx_waitq_mutex);
1454 mutex_destroy(&mpt->m_passthru_mutex);
1455 mutex_destroy(&mpt->m_mutex);
1456 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1457 mutex_destroy(
1458 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1459 }
1460 cv_destroy(&mpt->m_cv);
1461 cv_destroy(&mpt->m_passthru_cv);
1462 cv_destroy(&mpt->m_fw_cv);
1463 cv_destroy(&mpt->m_config_cv);
1464 cv_destroy(&mpt->m_fw_diag_cv);
1465 }
1466
1467 if (map_setup) {
1468 mptsas_cfg_fini(mpt);
1469 }
1470 if (config_setup) {
1471 mptsas_config_space_fini(mpt);
1472 }
1473 mptsas_free_handshake_msg(mpt);
1474 mptsas_hba_fini(mpt);
1475
1476 mptsas_fm_fini(mpt);
1477 ddi_soft_state_free(mptsas_state, instance);
1478 ddi_prop_remove_all(dip);
1479 }
1480 return (DDI_FAILURE);
1481 }
1482
1483 static int
1484 mptsas_suspend(dev_info_t *devi)
1485 {
1486 mptsas_t *mpt, *g;
1487 scsi_hba_tran_t *tran;
1488
1489 if (scsi_hba_iport_unit_address(devi)) {
1490 return (DDI_SUCCESS);
1491 }
1492
1493 if ((tran = ddi_get_driver_private(devi)) == NULL)
1494 return (DDI_SUCCESS);
1495
1496 mpt = TRAN2MPT(tran);
1497 if (!mpt) {
1498 return (DDI_SUCCESS);
1499 }
1500
1501 mutex_enter(&mpt->m_mutex);
1502
1503 if (mpt->m_suspended++) {
1504 mutex_exit(&mpt->m_mutex);
1505 return (DDI_SUCCESS);
1506 }
1507
1508 /*
1509 * Cancel timeout threads for this mpt
1510 */
1511 if (mpt->m_quiesce_timeid) {
1512 timeout_id_t tid = mpt->m_quiesce_timeid;
1513 mpt->m_quiesce_timeid = 0;
1514 mutex_exit(&mpt->m_mutex);
1515 (void) untimeout(tid);
1516 mutex_enter(&mpt->m_mutex);
1517 }
1518
1519 if (mpt->m_restart_cmd_timeid) {
1520 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1521 mpt->m_restart_cmd_timeid = 0;
1522 mutex_exit(&mpt->m_mutex);
1523 (void) untimeout(tid);
1524 mutex_enter(&mpt->m_mutex);
1525 }
1526
1527 mutex_exit(&mpt->m_mutex);
1528
1529 (void) pm_idle_component(mpt->m_dip, 0);
1530
1531 /*
1532 * Cancel watch threads if all mpts suspended
1533 */
1534 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1535 for (g = mptsas_head; g != NULL; g = g->m_next) {
1536 if (!g->m_suspended)
1537 break;
1538 }
1539 rw_exit(&mptsas_global_rwlock);
1540
1541 mutex_enter(&mptsas_global_mutex);
1542 if (g == NULL) {
1543 timeout_id_t tid;
1544
1545 mptsas_timeouts_enabled = 0;
1546 if (mptsas_timeout_id) {
1547 tid = mptsas_timeout_id;
1548 mptsas_timeout_id = 0;
1549 mutex_exit(&mptsas_global_mutex);
1550 (void) untimeout(tid);
1551 mutex_enter(&mptsas_global_mutex);
1552 }
1553 if (mptsas_reset_watch) {
1554 tid = mptsas_reset_watch;
1555 mptsas_reset_watch = 0;
1556 mutex_exit(&mptsas_global_mutex);
1557 (void) untimeout(tid);
1558 mutex_enter(&mptsas_global_mutex);
1559 }
1560 }
1561 mutex_exit(&mptsas_global_mutex);
1562
1563 mutex_enter(&mpt->m_mutex);
1564
1565 /*
1566 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1567 */
1568 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1569 (mpt->m_power_level != PM_LEVEL_D0)) {
1570 mutex_exit(&mpt->m_mutex);
1571 return (DDI_SUCCESS);
1572 }
1573
1574 /* Disable HBA interrupts in hardware */
1575 MPTSAS_DISABLE_INTR(mpt);
1576 /*
1577 * Send RAID action system shutdown to sync IR
1578 */
1579 mptsas_raid_action_system_shutdown(mpt);
1580
1581 mutex_exit(&mpt->m_mutex);
1582
1583 /* drain the taskq */
1584 ddi_taskq_wait(mpt->m_event_taskq);
1585 ddi_taskq_wait(mpt->m_dr_taskq);
1586
1587 return (DDI_SUCCESS);
1588 }
1589
1590 #ifdef __sparc
1591 /*ARGSUSED*/
1592 static int
1593 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1594 {
1595 mptsas_t *mpt;
1596 scsi_hba_tran_t *tran;
1597
1598 /*
1599 * If this call is for iport, just return.
1600 */
1601 if (scsi_hba_iport_unit_address(devi))
1602 return (DDI_SUCCESS);
1603
1604 if ((tran = ddi_get_driver_private(devi)) == NULL)
1605 return (DDI_SUCCESS);
1606
1607 if ((mpt = TRAN2MPT(tran)) == NULL)
1608 return (DDI_SUCCESS);
1609
1610 /*
1611 * Send RAID action system shutdown to sync IR. Disable HBA
1612 * interrupts in hardware first.
1613 */
1614 MPTSAS_DISABLE_INTR(mpt);
1615 mptsas_raid_action_system_shutdown(mpt);
1616
1617 return (DDI_SUCCESS);
1618 }
1619 #else /* __sparc */
1620 /*
1621 * quiesce(9E) entry point.
1622 *
1623 * This function is called when the system is single-threaded at high
1624 * PIL with preemption disabled. Therefore, this function must not be
1625 * blocked.
1626 *
1627 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1628 * DDI_FAILURE indicates an error condition and should almost never happen.
1629 */
1630 static int
1631 mptsas_quiesce(dev_info_t *devi)
1632 {
1633 mptsas_t *mpt;
1634 scsi_hba_tran_t *tran;
1635
1636 /*
1637 * If this call is for iport, just return.
1638 */
1639 if (scsi_hba_iport_unit_address(devi))
1640 return (DDI_SUCCESS);
1641
1642 if ((tran = ddi_get_driver_private(devi)) == NULL)
1643 return (DDI_SUCCESS);
1644
1645 if ((mpt = TRAN2MPT(tran)) == NULL)
1646 return (DDI_SUCCESS);
1647
1648 /* Disable HBA interrupts in hardware */
1649 MPTSAS_DISABLE_INTR(mpt);
1650 /* Send RAID action system shutdonw to sync IR */
1651 mptsas_raid_action_system_shutdown(mpt);
1652
1653 return (DDI_SUCCESS);
1654 }
1655 #endif /* __sparc */
1656
1657 /*
1658 * detach(9E). Remove all device allocations and system resources;
1659 * disable device interrupts.
1660 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1661 */
1662 static int
1663 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1664 {
1665 /* CONSTCOND */
1666 ASSERT(NO_COMPETING_THREADS);
1667 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1668
1669 switch (cmd) {
1670 case DDI_DETACH:
1671 return (mptsas_do_detach(devi));
1672
1673 case DDI_SUSPEND:
1674 return (mptsas_suspend(devi));
1675
1676 default:
1677 return (DDI_FAILURE);
1678 }
1679 /* NOTREACHED */
1680 }
1681
1682 static int
1683 mptsas_do_detach(dev_info_t *dip)
1684 {
1685 mptsas_t *mpt;
1686 scsi_hba_tran_t *tran;
1687 int circ = 0;
1688 int circ1 = 0;
1689 mdi_pathinfo_t *pip = NULL;
1690 int i;
1691 int doneq_thread_num = 0;
1692
1693 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1694
1695 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1696 return (DDI_FAILURE);
1697
1698 mpt = TRAN2MPT(tran);
1699 if (!mpt) {
1700 return (DDI_FAILURE);
1701 }
1702 /*
1703 * Still have pathinfo child, should not detach mpt driver
1704 */
1705 if (scsi_hba_iport_unit_address(dip)) {
1706 if (mpt->m_mpxio_enable) {
1707 /*
1708 * MPxIO enabled for the iport
1709 */
1710 ndi_devi_enter(scsi_vhci_dip, &circ1);
1711 ndi_devi_enter(dip, &circ);
1712 while (pip = mdi_get_next_client_path(dip, NULL)) {
1713 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1714 continue;
1715 }
1716 ndi_devi_exit(dip, circ);
1717 ndi_devi_exit(scsi_vhci_dip, circ1);
1718 NDBG12(("detach failed because of "
1719 "outstanding path info"));
1720 return (DDI_FAILURE);
1721 }
1722 ndi_devi_exit(dip, circ);
1723 ndi_devi_exit(scsi_vhci_dip, circ1);
1724 (void) mdi_phci_unregister(dip, 0);
1725 }
1726
1727 ddi_prop_remove_all(dip);
1728
1729 return (DDI_SUCCESS);
1730 }
1731
1732 /* Make sure power level is D0 before accessing registers */
1733 if (mpt->m_options & MPTSAS_OPT_PM) {
1734 (void) pm_busy_component(dip, 0);
1735 if (mpt->m_power_level != PM_LEVEL_D0) {
1736 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1737 DDI_SUCCESS) {
1738 mptsas_log(mpt, CE_WARN,
1739 "mptsas%d: Raise power request failed.",
1740 mpt->m_instance);
1741 (void) pm_idle_component(dip, 0);
1742 return (DDI_FAILURE);
1743 }
1744 }
1745 }
1746
1747 /*
1748 * Send RAID action system shutdown to sync IR. After action, send a
1749 * Message Unit Reset. Since after that DMA resource will be freed,
1750 * set ioc to READY state will avoid HBA initiated DMA operation.
1751 */
1752 mutex_enter(&mpt->m_mutex);
1753 MPTSAS_DISABLE_INTR(mpt);
1754 mptsas_raid_action_system_shutdown(mpt);
1755 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1756 (void) mptsas_ioc_reset(mpt, FALSE);
1757 mutex_exit(&mpt->m_mutex);
1758 mptsas_rem_intrs(mpt);
1759 ddi_taskq_destroy(mpt->m_event_taskq);
1760 ddi_taskq_destroy(mpt->m_dr_taskq);
1761
1762 if (mpt->m_doneq_thread_n) {
1763 mutex_enter(&mpt->m_doneq_mutex);
1764 doneq_thread_num = mpt->m_doneq_thread_n;
1765 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1766 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1767 mpt->m_doneq_thread_id[i].flag &=
1768 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1769 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1770 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1771 }
1772 while (mpt->m_doneq_thread_n) {
1773 cv_wait(&mpt->m_doneq_thread_cv,
1774 &mpt->m_doneq_mutex);
1775 }
1776 for (i = 0; i < doneq_thread_num; i++) {
1777 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1778 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1779 }
1780 kmem_free(mpt->m_doneq_thread_id,
1781 sizeof (mptsas_doneq_thread_list_t)
1782 * doneq_thread_num);
1783 mutex_exit(&mpt->m_doneq_mutex);
1784 cv_destroy(&mpt->m_doneq_thread_cv);
1785 mutex_destroy(&mpt->m_doneq_mutex);
1786 }
1787
1788 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1789
1790 mptsas_list_del(mpt);
1791
1792 /*
1793 * Cancel timeout threads for this mpt
1794 */
1795 mutex_enter(&mpt->m_mutex);
1796 if (mpt->m_quiesce_timeid) {
1797 timeout_id_t tid = mpt->m_quiesce_timeid;
1798 mpt->m_quiesce_timeid = 0;
1799 mutex_exit(&mpt->m_mutex);
1800 (void) untimeout(tid);
1801 mutex_enter(&mpt->m_mutex);
1802 }
1803
1804 if (mpt->m_restart_cmd_timeid) {
1805 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1806 mpt->m_restart_cmd_timeid = 0;
1807 mutex_exit(&mpt->m_mutex);
1808 (void) untimeout(tid);
1809 mutex_enter(&mpt->m_mutex);
1810 }
1811
1812 mutex_exit(&mpt->m_mutex);
1813
1814 /*
1815 * last mpt? ... if active, CANCEL watch threads.
1816 */
1817 mutex_enter(&mptsas_global_mutex);
1818 if (mptsas_head == NULL) {
1819 timeout_id_t tid;
1820 /*
1821 * Clear mptsas_timeouts_enable so that the watch thread
1822 * gets restarted on DDI_ATTACH
1823 */
1824 mptsas_timeouts_enabled = 0;
1825 if (mptsas_timeout_id) {
1826 tid = mptsas_timeout_id;
1827 mptsas_timeout_id = 0;
1828 mutex_exit(&mptsas_global_mutex);
1829 (void) untimeout(tid);
1830 mutex_enter(&mptsas_global_mutex);
1831 }
1832 if (mptsas_reset_watch) {
1833 tid = mptsas_reset_watch;
1834 mptsas_reset_watch = 0;
1835 mutex_exit(&mptsas_global_mutex);
1836 (void) untimeout(tid);
1837 mutex_enter(&mptsas_global_mutex);
1838 }
1839 }
1840 mutex_exit(&mptsas_global_mutex);
1841
1842 /*
1843 * Delete Phy stats
1844 */
1845 mptsas_destroy_phy_stats(mpt);
1846
1847 /*
1848 * Delete nt_active.
1849 */
1850 mutex_enter(&mpt->m_mutex);
1851 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1852 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1853 mptsas_free_active_slots(mpt);
1854 mutex_exit(&mpt->m_mutex);
1855
1856 /* deallocate everything that was allocated in mptsas_attach */
1857 mptsas_cache_destroy(mpt);
1858
1859 mptsas_hba_fini(mpt);
1860 mptsas_cfg_fini(mpt);
1861
1862 /* Lower the power informing PM Framework */
1863 if (mpt->m_options & MPTSAS_OPT_PM) {
1864 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1865 mptsas_log(mpt, CE_WARN,
1866 "!mptsas%d: Lower power request failed "
1867 "during detach, ignoring.",
1868 mpt->m_instance);
1869 }
1870
1871 mutex_destroy(&mpt->m_tx_waitq_mutex);
1872 mutex_destroy(&mpt->m_passthru_mutex);
1873 mutex_destroy(&mpt->m_mutex);
1874 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1875 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1876 }
1877 cv_destroy(&mpt->m_cv);
1878 cv_destroy(&mpt->m_passthru_cv);
1879 cv_destroy(&mpt->m_fw_cv);
1880 cv_destroy(&mpt->m_config_cv);
1881 cv_destroy(&mpt->m_fw_diag_cv);
1882
1883
1884 mptsas_smp_teardown(mpt);
1885 mptsas_hba_teardown(mpt);
1886
1887 mptsas_config_space_fini(mpt);
1888
1889 mptsas_free_handshake_msg(mpt);
1890
1891 mptsas_fm_fini(mpt);
1892 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1893 ddi_prop_remove_all(dip);
1894
1895 return (DDI_SUCCESS);
1896 }
1897
1898 static void
1899 mptsas_list_add(mptsas_t *mpt)
1900 {
1901 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1902
1903 if (mptsas_head == NULL) {
1904 mptsas_head = mpt;
1905 } else {
1906 mptsas_tail->m_next = mpt;
1907 }
1908 mptsas_tail = mpt;
1909 rw_exit(&mptsas_global_rwlock);
1910 }
1911
1912 static void
1913 mptsas_list_del(mptsas_t *mpt)
1914 {
1915 mptsas_t *m;
1916 /*
1917 * Remove device instance from the global linked list
1918 */
1919 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1920 if (mptsas_head == mpt) {
1921 m = mptsas_head = mpt->m_next;
1922 } else {
1923 for (m = mptsas_head; m != NULL; m = m->m_next) {
1924 if (m->m_next == mpt) {
1925 m->m_next = mpt->m_next;
1926 break;
1927 }
1928 }
1929 if (m == NULL) {
1930 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1931 }
1932 }
1933
1934 if (mptsas_tail == mpt) {
1935 mptsas_tail = m;
1936 }
1937 rw_exit(&mptsas_global_rwlock);
1938 }
1939
1940 static int
1941 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1942 {
1943 ddi_dma_attr_t task_dma_attrs;
1944
1945 task_dma_attrs = mpt->m_msg_dma_attr;
1946 task_dma_attrs.dma_attr_sgllen = 1;
1947 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1948
1949 /* allocate Task Management ddi_dma resources */
1950 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1951 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1952 alloc_size, NULL) == FALSE) {
1953 return (DDI_FAILURE);
1954 }
1955 mpt->m_hshk_dma_size = alloc_size;
1956
1957 return (DDI_SUCCESS);
1958 }
1959
1960 static void
1961 mptsas_free_handshake_msg(mptsas_t *mpt)
1962 {
1963 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1964 mpt->m_hshk_dma_size = 0;
1965 }
1966
1967 static int
1968 mptsas_hba_setup(mptsas_t *mpt)
1969 {
1970 scsi_hba_tran_t *hba_tran;
1971 int tran_flags;
1972
1973 /* Allocate a transport structure */
1974 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1975 SCSI_HBA_CANSLEEP);
1976 ASSERT(mpt->m_tran != NULL);
1977
1978 hba_tran->tran_hba_private = mpt;
1979 hba_tran->tran_tgt_private = NULL;
1980
1981 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1982 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1983
1984 hba_tran->tran_start = mptsas_scsi_start;
1985 hba_tran->tran_reset = mptsas_scsi_reset;
1986 hba_tran->tran_abort = mptsas_scsi_abort;
1987 hba_tran->tran_getcap = mptsas_scsi_getcap;
1988 hba_tran->tran_setcap = mptsas_scsi_setcap;
1989 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
1990 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
1991
1992 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
1993 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
1994 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
1995
1996 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
1997 hba_tran->tran_get_name = mptsas_get_name;
1998
1999 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2000 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2001 hba_tran->tran_bus_reset = NULL;
2002
2003 hba_tran->tran_add_eventcall = NULL;
2004 hba_tran->tran_get_eventcookie = NULL;
2005 hba_tran->tran_post_event = NULL;
2006 hba_tran->tran_remove_eventcall = NULL;
2007
2008 hba_tran->tran_bus_config = mptsas_bus_config;
2009
2010 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2011
2012 /*
2013 * All children of the HBA are iports. We need tran was cloned.
2014 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2015 * inherited to iport's tran vector.
2016 */
2017 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2018
2019 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2020 hba_tran, tran_flags) != DDI_SUCCESS) {
2021 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2022 scsi_hba_tran_free(hba_tran);
2023 mpt->m_tran = NULL;
2024 return (FALSE);
2025 }
2026 return (TRUE);
2027 }
2028
2029 static void
2030 mptsas_hba_teardown(mptsas_t *mpt)
2031 {
2032 (void) scsi_hba_detach(mpt->m_dip);
2033 if (mpt->m_tran != NULL) {
2034 scsi_hba_tran_free(mpt->m_tran);
2035 mpt->m_tran = NULL;
2036 }
2037 }
2038
2039 static void
2040 mptsas_iport_register(mptsas_t *mpt)
2041 {
2042 int i, j;
2043 mptsas_phymask_t mask = 0x0;
2044 /*
2045 * initial value of mask is 0
2046 */
2047 mutex_enter(&mpt->m_mutex);
2048 for (i = 0; i < mpt->m_num_phys; i++) {
2049 mptsas_phymask_t phy_mask = 0x0;
2050 char phy_mask_name[MPTSAS_MAX_PHYS];
2051 uint8_t current_port;
2052
2053 if (mpt->m_phy_info[i].attached_devhdl == 0)
2054 continue;
2055
2056 bzero(phy_mask_name, sizeof (phy_mask_name));
2057
2058 current_port = mpt->m_phy_info[i].port_num;
2059
2060 if ((mask & (1 << i)) != 0)
2061 continue;
2062
2063 for (j = 0; j < mpt->m_num_phys; j++) {
2064 if (mpt->m_phy_info[j].attached_devhdl &&
2065 (mpt->m_phy_info[j].port_num == current_port)) {
2066 phy_mask |= (1 << j);
2067 }
2068 }
2069 mask = mask | phy_mask;
2070
2071 for (j = 0; j < mpt->m_num_phys; j++) {
2072 if ((phy_mask >> j) & 0x01) {
2073 mpt->m_phy_info[j].phy_mask = phy_mask;
2074 }
2075 }
2076
2077 (void) sprintf(phy_mask_name, "%x", phy_mask);
2078
2079 mutex_exit(&mpt->m_mutex);
2080 /*
2081 * register a iport
2082 */
2083 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2084 mutex_enter(&mpt->m_mutex);
2085 }
2086 mutex_exit(&mpt->m_mutex);
2087 /*
2088 * register a virtual port for RAID volume always
2089 */
2090 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2091
2092 }
2093
2094 static int
2095 mptsas_smp_setup(mptsas_t *mpt)
2096 {
2097 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2098 ASSERT(mpt->m_smptran != NULL);
2099 mpt->m_smptran->smp_tran_hba_private = mpt;
2100 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2101 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2102 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2103 smp_hba_tran_free(mpt->m_smptran);
2104 mpt->m_smptran = NULL;
2105 return (FALSE);
2106 }
2107 /*
2108 * Initialize smp hash table
2109 */
2110 mptsas_hash_init(&mpt->m_active->m_smptbl);
2111 mpt->m_smp_devhdl = 0xFFFF;
2112
2113 return (TRUE);
2114 }
2115
2116 static void
2117 mptsas_smp_teardown(mptsas_t *mpt)
2118 {
2119 (void) smp_hba_detach(mpt->m_dip);
2120 if (mpt->m_smptran != NULL) {
2121 smp_hba_tran_free(mpt->m_smptran);
2122 mpt->m_smptran = NULL;
2123 }
2124 mpt->m_smp_devhdl = 0;
2125 }
2126
2127 static int
2128 mptsas_cache_create(mptsas_t *mpt)
2129 {
2130 int instance = mpt->m_instance;
2131 char buf[64];
2132
2133 /*
2134 * create kmem cache for packets
2135 */
2136 (void) sprintf(buf, "mptsas%d_cache", instance);
2137 mpt->m_kmem_cache = kmem_cache_create(buf,
2138 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2139 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2140 NULL, (void *)mpt, NULL, 0);
2141
2142 if (mpt->m_kmem_cache == NULL) {
2143 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2144 return (FALSE);
2145 }
2146
2147 /*
2148 * create kmem cache for extra SGL frames if SGL cannot
2149 * be accomodated into main request frame.
2150 */
2151 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2152 mpt->m_cache_frames = kmem_cache_create(buf,
2153 sizeof (mptsas_cache_frames_t), 8,
2154 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2155 NULL, (void *)mpt, NULL, 0);
2156
2157 if (mpt->m_cache_frames == NULL) {
2158 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2159 return (FALSE);
2160 }
2161
2162 return (TRUE);
2163 }
2164
2165 static void
2166 mptsas_cache_destroy(mptsas_t *mpt)
2167 {
2168 /* deallocate in reverse order */
2169 if (mpt->m_cache_frames) {
2170 kmem_cache_destroy(mpt->m_cache_frames);
2171 mpt->m_cache_frames = NULL;
2172 }
2173 if (mpt->m_kmem_cache) {
2174 kmem_cache_destroy(mpt->m_kmem_cache);
2175 mpt->m_kmem_cache = NULL;
2176 }
2177 }
2178
2179 static int
2180 mptsas_power(dev_info_t *dip, int component, int level)
2181 {
2182 #ifndef __lock_lint
2183 _NOTE(ARGUNUSED(component))
2184 #endif
2185 mptsas_t *mpt;
2186 int rval = DDI_SUCCESS;
2187 int polls = 0;
2188 uint32_t ioc_status;
2189
2190 if (scsi_hba_iport_unit_address(dip) != 0)
2191 return (DDI_SUCCESS);
2192
2193 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2194 if (mpt == NULL) {
2195 return (DDI_FAILURE);
2196 }
2197
2198 mutex_enter(&mpt->m_mutex);
2199
2200 /*
2201 * If the device is busy, don't lower its power level
2202 */
2203 if (mpt->m_busy && (mpt->m_power_level > level)) {
2204 mutex_exit(&mpt->m_mutex);
2205 return (DDI_FAILURE);
2206 }
2207 switch (level) {
2208 case PM_LEVEL_D0:
2209 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2210 MPTSAS_POWER_ON(mpt);
2211 /*
2212 * Wait up to 30 seconds for IOC to come out of reset.
2213 */
2214 while (((ioc_status = ddi_get32(mpt->m_datap,
2215 &mpt->m_reg->Doorbell)) &
2216 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2217 if (polls++ > 3000) {
2218 break;
2219 }
2220 delay(drv_usectohz(10000));
2221 }
2222 /*
2223 * If IOC is not in operational state, try to hard reset it.
2224 */
2225 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2226 MPI2_IOC_STATE_OPERATIONAL) {
2227 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2228 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2229 mptsas_log(mpt, CE_WARN,
2230 "mptsas_power: hard reset failed");
2231 mutex_exit(&mpt->m_mutex);
2232 return (DDI_FAILURE);
2233 }
2234 }
2235 mpt->m_power_level = PM_LEVEL_D0;
2236 break;
2237 case PM_LEVEL_D3:
2238 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2239 MPTSAS_POWER_OFF(mpt);
2240 break;
2241 default:
2242 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2243 mpt->m_instance, level);
2244 rval = DDI_FAILURE;
2245 break;
2246 }
2247 mutex_exit(&mpt->m_mutex);
2248 return (rval);
2249 }
2250
2251 /*
2252 * Initialize configuration space and figure out which
2253 * chip and revison of the chip the mpt driver is using.
2254 */
2255 static int
2256 mptsas_config_space_init(mptsas_t *mpt)
2257 {
2258 NDBG0(("mptsas_config_space_init"));
2259
2260 if (mpt->m_config_handle != NULL)
2261 return (TRUE);
2262
2263 if (pci_config_setup(mpt->m_dip,
2264 &mpt->m_config_handle) != DDI_SUCCESS) {
2265 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2266 return (FALSE);
2267 }
2268
2269 /*
2270 * This is a workaround for a XMITS ASIC bug which does not
2271 * drive the CBE upper bits.
2272 */
2273 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2274 PCI_STAT_PERROR) {
2275 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2276 PCI_STAT_PERROR);
2277 }
2278
2279 mptsas_setup_cmd_reg(mpt);
2280
2281 /*
2282 * Get the chip device id:
2283 */
2284 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2285
2286 /*
2287 * Save the revision.
2288 */
2289 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2290
2291 /*
2292 * Save the SubSystem Vendor and Device IDs
2293 */
2294 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2295 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2296
2297 /*
2298 * Set the latency timer to 0x40 as specified by the upa -> pci
2299 * bridge chip design team. This may be done by the sparc pci
2300 * bus nexus driver, but the driver should make sure the latency
2301 * timer is correct for performance reasons.
2302 */
2303 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2304 MPTSAS_LATENCY_TIMER);
2305
2306 (void) mptsas_get_pci_cap(mpt);
2307 return (TRUE);
2308 }
2309
2310 static void
2311 mptsas_config_space_fini(mptsas_t *mpt)
2312 {
2313 if (mpt->m_config_handle != NULL) {
2314 mptsas_disable_bus_master(mpt);
2315 pci_config_teardown(&mpt->m_config_handle);
2316 mpt->m_config_handle = NULL;
2317 }
2318 }
2319
2320 static void
2321 mptsas_setup_cmd_reg(mptsas_t *mpt)
2322 {
2323 ushort_t cmdreg;
2324
2325 /*
2326 * Set the command register to the needed values.
2327 */
2328 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2329 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2330 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2331 cmdreg &= ~PCI_COMM_IO;
2332 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2333 }
2334
2335 static void
2336 mptsas_disable_bus_master(mptsas_t *mpt)
2337 {
2338 ushort_t cmdreg;
2339
2340 /*
2341 * Clear the master enable bit in the PCI command register.
2342 * This prevents any bus mastering activity like DMA.
2343 */
2344 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2345 cmdreg &= ~PCI_COMM_ME;
2346 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2347 }
2348
2349 int
2350 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2351 {
2352 ddi_dma_attr_t attrs;
2353
2354 attrs = mpt->m_io_dma_attr;
2355 attrs.dma_attr_sgllen = 1;
2356
2357 ASSERT(dma_statep != NULL);
2358
2359 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2360 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2361 &dma_statep->cookie) == FALSE) {
2362 return (DDI_FAILURE);
2363 }
2364
2365 return (DDI_SUCCESS);
2366 }
2367
2368 void
2369 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2370 {
2371 ASSERT(dma_statep != NULL);
2372 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2373 dma_statep->size = 0;
2374 }
2375
2376 int
2377 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2378 {
2379 ddi_dma_attr_t attrs;
2380 ddi_dma_handle_t dma_handle;
2381 caddr_t memp;
2382 ddi_acc_handle_t accessp;
2383 int rval;
2384
2385 ASSERT(mutex_owned(&mpt->m_mutex));
2386
2387 attrs = mpt->m_msg_dma_attr;
2388 attrs.dma_attr_sgllen = 1;
2389 attrs.dma_attr_granular = size;
2390
2391 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2392 &accessp, &memp, size, NULL) == FALSE) {
2393 return (DDI_FAILURE);
2394 }
2395
2396 rval = (*callback) (mpt, memp, var, accessp);
2397
2398 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2399 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2400 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2401 rval = DDI_FAILURE;
2402 }
2403
2404 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2405 return (rval);
2406
2407 }
2408
2409 static int
2410 mptsas_alloc_request_frames(mptsas_t *mpt)
2411 {
2412 ddi_dma_attr_t frame_dma_attrs;
2413 caddr_t memp;
2414 ddi_dma_cookie_t cookie;
2415 size_t mem_size;
2416
2417 /*
2418 * re-alloc when it has already alloced
2419 */
2420 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2421 &mpt->m_acc_req_frame_hdl);
2422
2423 /*
2424 * The size of the request frame pool is:
2425 * Number of Request Frames * Request Frame Size
2426 */
2427 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2428
2429 /*
2430 * set the DMA attributes. System Request Message Frames must be
2431 * aligned on a 16-byte boundry.
2432 */
2433 frame_dma_attrs = mpt->m_msg_dma_attr;
2434 frame_dma_attrs.dma_attr_align = 16;
2435 frame_dma_attrs.dma_attr_sgllen = 1;
2436
2437 /*
2438 * allocate the request frame pool.
2439 */
2440 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2441 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2442 mem_size, &cookie) == FALSE) {
2443 return (DDI_FAILURE);
2444 }
2445
2446 /*
2447 * Store the request frame memory address. This chip uses this
2448 * address to dma to and from the driver's frame. The second
2449 * address is the address mpt uses to fill in the frame.
2450 */
2451 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2452 mpt->m_req_frame = memp;
2453
2454 /*
2455 * Clear the request frame pool.
2456 */
2457 bzero(mpt->m_req_frame, mem_size);
2458
2459 return (DDI_SUCCESS);
2460 }
2461
2462 static int
2463 mptsas_alloc_reply_frames(mptsas_t *mpt)
2464 {
2465 ddi_dma_attr_t frame_dma_attrs;
2466 caddr_t memp;
2467 ddi_dma_cookie_t cookie;
2468 size_t mem_size;
2469
2470 /*
2471 * re-alloc when it has already alloced
2472 */
2473 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2474 &mpt->m_acc_reply_frame_hdl);
2475
2476 /*
2477 * The size of the reply frame pool is:
2478 * Number of Reply Frames * Reply Frame Size
2479 */
2480 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2481
2482 /*
2483 * set the DMA attributes. System Reply Message Frames must be
2484 * aligned on a 4-byte boundry. This is the default.
2485 */
2486 frame_dma_attrs = mpt->m_msg_dma_attr;
2487 frame_dma_attrs.dma_attr_sgllen = 1;
2488
2489 /*
2490 * allocate the reply frame pool
2491 */
2492 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2493 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2494 mem_size, &cookie) == FALSE) {
2495 return (DDI_FAILURE);
2496 }
2497
2498 /*
2499 * Store the reply frame memory address. This chip uses this
2500 * address to dma to and from the driver's frame. The second
2501 * address is the address mpt uses to process the frame.
2502 */
2503 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2504 mpt->m_reply_frame = memp;
2505
2506 /*
2507 * Clear the reply frame pool.
2508 */
2509 bzero(mpt->m_reply_frame, mem_size);
2510
2511 return (DDI_SUCCESS);
2512 }
2513
2514 static int
2515 mptsas_alloc_free_queue(mptsas_t *mpt)
2516 {
2517 ddi_dma_attr_t frame_dma_attrs;
2518 caddr_t memp;
2519 ddi_dma_cookie_t cookie;
2520 size_t mem_size;
2521
2522 /*
2523 * re-alloc when it has already alloced
2524 */
2525 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2526 &mpt->m_acc_free_queue_hdl);
2527
2528 /*
2529 * The reply free queue size is:
2530 * Reply Free Queue Depth * 4
2531 * The "4" is the size of one 32 bit address (low part of 64-bit
2532 * address)
2533 */
2534 mem_size = mpt->m_free_queue_depth * 4;
2535
2536 /*
2537 * set the DMA attributes The Reply Free Queue must be aligned on a
2538 * 16-byte boundry.
2539 */
2540 frame_dma_attrs = mpt->m_msg_dma_attr;
2541 frame_dma_attrs.dma_attr_align = 16;
2542 frame_dma_attrs.dma_attr_sgllen = 1;
2543
2544 /*
2545 * allocate the reply free queue
2546 */
2547 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2548 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2549 mem_size, &cookie) == FALSE) {
2550 return (DDI_FAILURE);
2551 }
2552
2553 /*
2554 * Store the reply free queue memory address. This chip uses this
2555 * address to read from the reply free queue. The second address
2556 * is the address mpt uses to manage the queue.
2557 */
2558 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2559 mpt->m_free_queue = memp;
2560
2561 /*
2562 * Clear the reply free queue memory.
2563 */
2564 bzero(mpt->m_free_queue, mem_size);
2565
2566 return (DDI_SUCCESS);
2567 }
2568
2569 static int
2570 mptsas_alloc_post_queue(mptsas_t *mpt)
2571 {
2572 ddi_dma_attr_t frame_dma_attrs;
2573 caddr_t memp;
2574 ddi_dma_cookie_t cookie;
2575 size_t mem_size;
2576
2577 /*
2578 * re-alloc when it has already alloced
2579 */
2580 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2581 &mpt->m_acc_post_queue_hdl);
2582
2583 /*
2584 * The reply descriptor post queue size is:
2585 * Reply Descriptor Post Queue Depth * 8
2586 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2587 */
2588 mem_size = mpt->m_post_queue_depth * 8;
2589
2590 /*
2591 * set the DMA attributes. The Reply Descriptor Post Queue must be
2592 * aligned on a 16-byte boundry.
2593 */
2594 frame_dma_attrs = mpt->m_msg_dma_attr;
2595 frame_dma_attrs.dma_attr_align = 16;
2596 frame_dma_attrs.dma_attr_sgllen = 1;
2597
2598 /*
2599 * allocate the reply post queue
2600 */
2601 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2602 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2603 mem_size, &cookie) == FALSE) {
2604 return (DDI_FAILURE);
2605 }
2606
2607 /*
2608 * Store the reply descriptor post queue memory address. This chip
2609 * uses this address to write to the reply descriptor post queue. The
2610 * second address is the address mpt uses to manage the queue.
2611 */
2612 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2613 mpt->m_post_queue = memp;
2614
2615 /*
2616 * Clear the reply post queue memory.
2617 */
2618 bzero(mpt->m_post_queue, mem_size);
2619
2620 return (DDI_SUCCESS);
2621 }
2622
2623 static void
2624 mptsas_alloc_reply_args(mptsas_t *mpt)
2625 {
2626 if (mpt->m_replyh_args == NULL) {
2627 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2628 mpt->m_max_replies, KM_SLEEP);
2629 }
2630 }
2631
2632 static int
2633 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2634 {
2635 mptsas_cache_frames_t *frames = NULL;
2636 if (cmd->cmd_extra_frames == NULL) {
2637 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2638 if (frames == NULL) {
2639 return (DDI_FAILURE);
2640 }
2641 cmd->cmd_extra_frames = frames;
2642 }
2643 return (DDI_SUCCESS);
2644 }
2645
2646 static void
2647 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2648 {
2649 if (cmd->cmd_extra_frames) {
2650 kmem_cache_free(mpt->m_cache_frames,
2651 (void *)cmd->cmd_extra_frames);
2652 cmd->cmd_extra_frames = NULL;
2653 }
2654 }
2655
2656 static void
2657 mptsas_cfg_fini(mptsas_t *mpt)
2658 {
2659 NDBG0(("mptsas_cfg_fini"));
2660 ddi_regs_map_free(&mpt->m_datap);
2661 }
2662
2663 static void
2664 mptsas_hba_fini(mptsas_t *mpt)
2665 {
2666 NDBG0(("mptsas_hba_fini"));
2667
2668 /*
2669 * Free up any allocated memory
2670 */
2671 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2672 &mpt->m_acc_req_frame_hdl);
2673
2674 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2675 &mpt->m_acc_reply_frame_hdl);
2676
2677 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2678 &mpt->m_acc_free_queue_hdl);
2679
2680 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2681 &mpt->m_acc_post_queue_hdl);
2682
2683 if (mpt->m_replyh_args != NULL) {
2684 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2685 * mpt->m_max_replies);
2686 }
2687 }
2688
2689 static int
2690 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2691 {
2692 int lun = 0;
2693 char *sas_wwn = NULL;
2694 int phynum = -1;
2695 int reallen = 0;
2696
2697 /* Get the target num */
2698 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2699 LUN_PROP, 0);
2700
2701 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2702 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2703 /*
2704 * Stick in the address of form "pPHY,LUN"
2705 */
2706 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2707 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2708 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2709 == DDI_PROP_SUCCESS) {
2710 /*
2711 * Stick in the address of the form "wWWN,LUN"
2712 */
2713 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2714 ddi_prop_free(sas_wwn);
2715 } else {
2716 return (DDI_FAILURE);
2717 }
2718
2719 ASSERT(reallen < len);
2720 if (reallen >= len) {
2721 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2722 "length too small, it needs to be %d bytes", reallen + 1);
2723 }
2724 return (DDI_SUCCESS);
2725 }
2726
2727 /*
2728 * tran_tgt_init(9E) - target device instance initialization
2729 */
2730 static int
2731 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2732 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2733 {
2734 #ifndef __lock_lint
2735 _NOTE(ARGUNUSED(hba_tran))
2736 #endif
2737
2738 /*
2739 * At this point, the scsi_device structure already exists
2740 * and has been initialized.
2741 *
2742 * Use this function to allocate target-private data structures,
2743 * if needed by this HBA. Add revised flow-control and queue
2744 * properties for child here, if desired and if you can tell they
2745 * support tagged queueing by now.
2746 */
2747 mptsas_t *mpt;
2748 int lun = sd->sd_address.a_lun;
2749 mdi_pathinfo_t *pip = NULL;
2750 mptsas_tgt_private_t *tgt_private = NULL;
2751 mptsas_target_t *ptgt = NULL;
2752 char *psas_wwn = NULL;
2753 int phymask = 0;
2754 uint64_t sas_wwn = 0;
2755 mpt = SDEV2MPT(sd);
2756
2757 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2758
2759 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2760 (void *)hba_dip, (void *)tgt_dip, lun));
2761
2762 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2763 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2764 ddi_set_name_addr(tgt_dip, NULL);
2765 return (DDI_FAILURE);
2766 }
2767 /*
2768 * phymask is 0 means the virtual port for RAID
2769 */
2770 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2771 "phymask", 0);
2772 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2773 if ((pip = (void *)(sd->sd_private)) == NULL) {
2774 /*
2775 * Very bad news if this occurs. Somehow scsi_vhci has
2776 * lost the pathinfo node for this target.
2777 */
2778 return (DDI_NOT_WELL_FORMED);
2779 }
2780
2781 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2782 DDI_PROP_SUCCESS) {
2783 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2784 return (DDI_FAILURE);
2785 }
2786
2787 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2788 &psas_wwn) == MDI_SUCCESS) {
2789 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2790 sas_wwn = 0;
2791 }
2792 (void) mdi_prop_free(psas_wwn);
2793 }
2794 } else {
2795 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2796 DDI_PROP_DONTPASS, LUN_PROP, 0);
2797 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2798 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2799 DDI_PROP_SUCCESS) {
2800 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2801 sas_wwn = 0;
2802 }
2803 ddi_prop_free(psas_wwn);
2804 } else {
2805 sas_wwn = 0;
2806 }
2807 }
2808 ASSERT((sas_wwn != 0) || (phymask != 0));
2809 mutex_enter(&mpt->m_mutex);
2810 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2811 mutex_exit(&mpt->m_mutex);
2812 if (ptgt == NULL) {
2813 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2814 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2815 sas_wwn);
2816 return (DDI_FAILURE);
2817 }
2818 if (hba_tran->tran_tgt_private == NULL) {
2819 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2820 KM_SLEEP);
2821 tgt_private->t_lun = lun;
2822 tgt_private->t_private = ptgt;
2823 hba_tran->tran_tgt_private = tgt_private;
2824 }
2825
2826 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2827 return (DDI_SUCCESS);
2828 }
2829 mutex_enter(&mpt->m_mutex);
2830
2831 if (ptgt->m_deviceinfo &
2832 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2833 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2834 uchar_t *inq89 = NULL;
2835 int inq89_len = 0x238;
2836 int reallen = 0;
2837 int rval = 0;
2838 struct sata_id *sid = NULL;
2839 char model[SATA_ID_MODEL_LEN + 1];
2840 char fw[SATA_ID_FW_LEN + 1];
2841 char *vid, *pid;
2842 int i;
2843
2844 mutex_exit(&mpt->m_mutex);
2845 /*
2846 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2847 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2848 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2849 */
2850 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2851 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2852 inq89, inq89_len, &reallen, 1);
2853
2854 if (rval != 0) {
2855 if (inq89 != NULL) {
2856 kmem_free(inq89, inq89_len);
2857 }
2858
2859 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2860 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2861 return (DDI_SUCCESS);
2862 }
2863 sid = (void *)(&inq89[60]);
2864
2865 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2866 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2867
2868 model[SATA_ID_MODEL_LEN] = 0;
2869 fw[SATA_ID_FW_LEN] = 0;
2870
2871 /*
2872 * split model into into vid/pid
2873 */
2874 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2875 if ((*pid == ' ') || (*pid == '\t'))
2876 break;
2877 if (i < SATA_ID_MODEL_LEN) {
2878 vid = model;
2879 /*
2880 * terminate vid, establish pid
2881 */
2882 *pid++ = 0;
2883 } else {
2884 /*
2885 * vid will stay "ATA ", the rule is same
2886 * as sata framework implementation.
2887 */
2888 vid = NULL;
2889 /*
2890 * model is all pid
2891 */
2892 pid = model;
2893 }
2894
2895 /*
2896 * override SCSA "inquiry-*" properties
2897 */
2898 if (vid)
2899 (void) scsi_device_prop_update_inqstring(sd,
2900 INQUIRY_VENDOR_ID, vid, strlen(vid));
2901 if (pid)
2902 (void) scsi_device_prop_update_inqstring(sd,
2903 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2904 (void) scsi_device_prop_update_inqstring(sd,
2905 INQUIRY_REVISION_ID, fw, strlen(fw));
2906
2907 if (inq89 != NULL) {
2908 kmem_free(inq89, inq89_len);
2909 }
2910 } else {
2911 mutex_exit(&mpt->m_mutex);
2912 }
2913
2914 return (DDI_SUCCESS);
2915 }
2916 /*
2917 * tran_tgt_free(9E) - target device instance deallocation
2918 */
2919 static void
2920 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2921 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2922 {
2923 #ifndef __lock_lint
2924 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2925 #endif
2926
2927 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2928
2929 if (tgt_private != NULL) {
2930 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2931 hba_tran->tran_tgt_private = NULL;
2932 }
2933 }
2934
2935 /*
2936 * scsi_pkt handling
2937 *
2938 * Visible to the external world via the transport structure.
2939 */
2940
2941 /*
2942 * Notes:
2943 * - transport the command to the addressed SCSI target/lun device
2944 * - normal operation is to schedule the command to be transported,
2945 * and return TRAN_ACCEPT if this is successful.
2946 * - if NO_INTR, tran_start must poll device for command completion
2947 */
2948 static int
2949 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2950 {
2951 #ifndef __lock_lint
2952 _NOTE(ARGUNUSED(ap))
2953 #endif
2954 mptsas_t *mpt = PKT2MPT(pkt);
2955 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2956 int rval;
2957 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2958
2959 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2960 ASSERT(ptgt);
2961 if (ptgt == NULL)
2962 return (TRAN_FATAL_ERROR);
2963
2964 /*
2965 * prepare the pkt before taking mutex.
2966 */
2967 rval = mptsas_prepare_pkt(cmd);
2968 if (rval != TRAN_ACCEPT) {
2969 return (rval);
2970 }
2971
2972 /*
2973 * Send the command to target/lun, however your HBA requires it.
2974 * If busy, return TRAN_BUSY; if there's some other formatting error
2975 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2976 * return of TRAN_ACCEPT.
2977 *
2978 * Remember that access to shared resources, including the mptsas_t
2979 * data structure and the HBA hardware registers, must be protected
2980 * with mutexes, here and everywhere.
2981 *
2982 * Also remember that at interrupt time, you'll get an argument
2983 * to the interrupt handler which is a pointer to your mptsas_t
2984 * structure; you'll have to remember which commands are outstanding
2985 * and which scsi_pkt is the currently-running command so the
2986 * interrupt handler can refer to the pkt to set completion
2987 * status, call the target driver back through pkt_comp, etc.
2988 *
2989 * If the instance lock is held by other thread, don't spin to wait
2990 * for it. Instead, queue the cmd and next time when the instance lock
2991 * is not held, accept all the queued cmd. A extra tx_waitq is
2992 * introduced to protect the queue.
2993 *
2994 * The polled cmd will not be queud and accepted as usual.
2995 *
2996 * Under the tx_waitq mutex, record whether a thread is draining
2997 * the tx_waitq. An IO requesting thread that finds the instance
2998 * mutex contended appends to the tx_waitq and while holding the
2999 * tx_wait mutex, if the draining flag is not set, sets it and then
3000 * proceeds to spin for the instance mutex. This scheme ensures that
3001 * the last cmd in a burst be processed.
3002 *
3003 * we enable this feature only when the helper threads are enabled,
3004 * at which we think the loads are heavy.
3005 *
3006 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3007 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3008 */
3009
3010 if (mpt->m_doneq_thread_n) {
3011 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3012 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3013 mutex_exit(&mpt->m_mutex);
3014 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3015 mutex_enter(&mpt->m_mutex);
3016 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3017 mutex_exit(&mpt->m_mutex);
3018 } else {
3019 mutex_enter(&mpt->m_tx_waitq_mutex);
3020 /*
3021 * ptgt->m_dr_flag is protected by m_mutex or
3022 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3023 * is acquired.
3024 */
3025 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3026 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3027 /*
3028 * The command should be allowed to
3029 * retry by returning TRAN_BUSY to
3030 * to stall the I/O's which come from
3031 * scsi_vhci since the device/path is
3032 * in unstable state now.
3033 */
3034 mutex_exit(&mpt->m_tx_waitq_mutex);
3035 return (TRAN_BUSY);
3036 } else {
3037 /*
3038 * The device is offline, just fail the
3039 * command by returning
3040 * TRAN_FATAL_ERROR.
3041 */
3042 mutex_exit(&mpt->m_tx_waitq_mutex);
3043 return (TRAN_FATAL_ERROR);
3044 }
3045 }
3046 if (mpt->m_tx_draining) {
3047 cmd->cmd_flags |= CFLAG_TXQ;
3048 *mpt->m_tx_waitqtail = cmd;
3049 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3050 mutex_exit(&mpt->m_tx_waitq_mutex);
3051 } else { /* drain the queue */
3052 mpt->m_tx_draining = 1;
3053 mutex_exit(&mpt->m_tx_waitq_mutex);
3054 mutex_enter(&mpt->m_mutex);
3055 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3056 mutex_exit(&mpt->m_mutex);
3057 }
3058 }
3059 } else {
3060 mutex_enter(&mpt->m_mutex);
3061 /*
3062 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3063 * in this case, m_mutex is acquired.
3064 */
3065 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3066 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3067 /*
3068 * commands should be allowed to retry by
3069 * returning TRAN_BUSY to stall the I/O's
3070 * which come from scsi_vhci since the device/
3071 * path is in unstable state now.
3072 */
3073 mutex_exit(&mpt->m_mutex);
3074 return (TRAN_BUSY);
3075 } else {
3076 /*
3077 * The device is offline, just fail the
3078 * command by returning TRAN_FATAL_ERROR.
3079 */
3080 mutex_exit(&mpt->m_mutex);
3081 return (TRAN_FATAL_ERROR);
3082 }
3083 }
3084 rval = mptsas_accept_pkt(mpt, cmd);
3085 mutex_exit(&mpt->m_mutex);
3086 }
3087
3088 return (rval);
3089 }
3090
3091 /*
3092 * Accept all the queued cmds(if any) before accept the current one.
3093 */
3094 static int
3095 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3096 {
3097 int rval;
3098 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3099
3100 ASSERT(mutex_owned(&mpt->m_mutex));
3101 /*
3102 * The call to mptsas_accept_tx_waitq() must always be performed
3103 * because that is where mpt->m_tx_draining is cleared.
3104 */
3105 mutex_enter(&mpt->m_tx_waitq_mutex);
3106 mptsas_accept_tx_waitq(mpt);
3107 mutex_exit(&mpt->m_tx_waitq_mutex);
3108 /*
3109 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3110 * in this case, m_mutex is acquired.
3111 */
3112 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3113 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3114 /*
3115 * The command should be allowed to retry by returning
3116 * TRAN_BUSY to stall the I/O's which come from
3117 * scsi_vhci since the device/path is in unstable state
3118 * now.
3119 */
3120 return (TRAN_BUSY);
3121 } else {
3122 /*
3123 * The device is offline, just fail the command by
3124 * return TRAN_FATAL_ERROR.
3125 */
3126 return (TRAN_FATAL_ERROR);
3127 }
3128 }
3129 rval = mptsas_accept_pkt(mpt, cmd);
3130
3131 return (rval);
3132 }
3133
3134 static int
3135 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3136 {
3137 int rval = TRAN_ACCEPT;
3138 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3139
3140 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3141
3142 ASSERT(mutex_owned(&mpt->m_mutex));
3143
3144 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3145 rval = mptsas_prepare_pkt(cmd);
3146 if (rval != TRAN_ACCEPT) {
3147 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3148 return (rval);
3149 }
3150 }
3151
3152 /*
3153 * reset the throttle if we were draining
3154 */
3155 if ((ptgt->m_t_ncmds == 0) &&
3156 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3157 NDBG23(("reset throttle"));
3158 ASSERT(ptgt->m_reset_delay == 0);
3159 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3160 }
3161
3162 /*
3163 * If HBA is being reset, the DevHandles are being re-initialized,
3164 * which means that they could be invalid even if the target is still
3165 * attached. Check if being reset and if DevHandle is being
3166 * re-initialized. If this is the case, return BUSY so the I/O can be
3167 * retried later.
3168 */
3169 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3170 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3171 if (cmd->cmd_flags & CFLAG_TXQ) {
3172 mptsas_doneq_add(mpt, cmd);
3173 mptsas_doneq_empty(mpt);
3174 return (rval);
3175 } else {
3176 return (TRAN_BUSY);
3177 }
3178 }
3179
3180 /*
3181 * If device handle has already been invalidated, just
3182 * fail the command. In theory, command from scsi_vhci
3183 * client is impossible send down command with invalid
3184 * devhdl since devhdl is set after path offline, target
3185 * driver is not suppose to select a offlined path.
3186 */
3187 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3188 NDBG20(("rejecting command, it might because invalid devhdl "
3189 "request."));
3190 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3191 if (cmd->cmd_flags & CFLAG_TXQ) {
3192 mptsas_doneq_add(mpt, cmd);
3193 mptsas_doneq_empty(mpt);
3194 return (rval);
3195 } else {
3196 return (TRAN_FATAL_ERROR);
3197 }
3198 }
3199 /*
3200 * The first case is the normal case. mpt gets a command from the
3201 * target driver and starts it.
3202 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3203 * commands is m_max_requests - 2.
3204 */
3205 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3206 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3207 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3208 (ptgt->m_reset_delay == 0) &&
3209 (ptgt->m_t_nwait == 0) &&
3210 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3211 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3212 (void) mptsas_start_cmd(mpt, cmd);
3213 } else {
3214 mptsas_waitq_add(mpt, cmd);
3215 }
3216 } else {
3217 /*
3218 * Add this pkt to the work queue
3219 */
3220 mptsas_waitq_add(mpt, cmd);
3221
3222 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3223 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3224
3225 /*
3226 * Only flush the doneq if this is not a TM
3227 * cmd. For TM cmds the flushing of the
3228 * doneq will be done in those routines.
3229 */
3230 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3231 mptsas_doneq_empty(mpt);
3232 }
3233 }
3234 }
3235 return (rval);
3236 }
3237
3238 int
3239 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3240 {
3241 mptsas_slots_t *slots;
3242 int slot;
3243 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3244
3245 ASSERT(mutex_owned(&mpt->m_mutex));
3246 slots = mpt->m_active;
3247
3248 /*
3249 * Account for reserved TM request slot and reserved SMID of 0.
3250 */
3251 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3252
3253 /*
3254 * m_tags is equivalent to the SMID when sending requests. Since the
3255 * SMID cannot be 0, start out at one if rolling over past the size
3256 * of the request queue depth. Also, don't use the last SMID, which is
3257 * reserved for TM requests.
3258 */
3259 slot = (slots->m_tags)++;
3260 if (slots->m_tags > slots->m_n_slots) {
3261 slots->m_tags = 1;
3262 }
3263
3264 alloc_tag:
3265 /* Validate tag, should never fail. */
3266 if (slots->m_slot[slot] == NULL) {
3267 /*
3268 * Make sure SMID is not using reserved value of 0
3269 * and the TM request slot.
3270 */
3271 ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3272 cmd->cmd_slot = slot;
3273 slots->m_slot[slot] = cmd;
3274 mpt->m_ncmds++;
3275
3276 /*
3277 * only increment per target ncmds if this is not a
3278 * command that has no target associated with it (i.e. a
3279 * event acknoledgment)
3280 */
3281 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3282 ptgt->m_t_ncmds++;
3283 }
3284 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3285
3286 /*
3287 * If initial timout is less than or equal to one tick, bump
3288 * the timeout by a tick so that command doesn't timeout before
3289 * its allotted time.
3290 */
3291 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3292 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3293 }
3294 return (TRUE);
3295 } else {
3296 int i;
3297
3298 /*
3299 * If slot in use, scan until a free one is found. Don't use 0
3300 * or final slot, which is reserved for TM requests.
3301 */
3302 for (i = 0; i < slots->m_n_slots; i++) {
3303 slot = slots->m_tags;
3304 if (++(slots->m_tags) > slots->m_n_slots) {
3305 slots->m_tags = 1;
3306 }
3307 if (slots->m_slot[slot] == NULL) {
3308 NDBG22(("found free slot %d", slot));
3309 goto alloc_tag;
3310 }
3311 }
3312 }
3313 return (FALSE);
3314 }
3315
3316 /*
3317 * prepare the pkt:
3318 * the pkt may have been resubmitted or just reused so
3319 * initialize some fields and do some checks.
3320 */
3321 static int
3322 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3323 {
3324 struct scsi_pkt *pkt = CMD2PKT(cmd);
3325
3326 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3327
3328 /*
3329 * Reinitialize some fields that need it; the packet may
3330 * have been resubmitted
3331 */
3332 pkt->pkt_reason = CMD_CMPLT;
3333 pkt->pkt_state = 0;
3334 pkt->pkt_statistics = 0;
3335 pkt->pkt_resid = 0;
3336 cmd->cmd_age = 0;
3337 cmd->cmd_pkt_flags = pkt->pkt_flags;
3338
3339 /*
3340 * zero status byte.
3341 */
3342 *(pkt->pkt_scbp) = 0;
3343
3344 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3345 pkt->pkt_resid = cmd->cmd_dmacount;
3346
3347 /*
3348 * consistent packets need to be sync'ed first
3349 * (only for data going out)
3350 */
3351 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3352 (cmd->cmd_flags & CFLAG_DMASEND)) {
3353 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3354 DDI_DMA_SYNC_FORDEV);
3355 }
3356 }
3357
3358 cmd->cmd_flags =
3359 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3360 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3361
3362 return (TRAN_ACCEPT);
3363 }
3364
3365 /*
3366 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3367 *
3368 * One of three possibilities:
3369 * - allocate scsi_pkt
3370 * - allocate scsi_pkt and DMA resources
3371 * - allocate DMA resources to an already-allocated pkt
3372 */
3373 static struct scsi_pkt *
3374 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3375 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3376 int (*callback)(), caddr_t arg)
3377 {
3378 mptsas_cmd_t *cmd, *new_cmd;
3379 mptsas_t *mpt = ADDR2MPT(ap);
3380 int failure = 1;
3381 uint_t oldcookiec;
3382 mptsas_target_t *ptgt = NULL;
3383 int rval;
3384 mptsas_tgt_private_t *tgt_private;
3385 int kf;
3386
3387 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3388
3389 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3390 tran_tgt_private;
3391 ASSERT(tgt_private != NULL);
3392 if (tgt_private == NULL) {
3393 return (NULL);
3394 }
3395 ptgt = tgt_private->t_private;
3396 ASSERT(ptgt != NULL);
3397 if (ptgt == NULL)
3398 return (NULL);
3399 ap->a_target = ptgt->m_devhdl;
3400 ap->a_lun = tgt_private->t_lun;
3401
3402 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3403 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3404 statuslen *= 100; tgtlen *= 4;
3405 #endif
3406 NDBG3(("mptsas_scsi_init_pkt:\n"
3407 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3408 ap->a_target, (void *)pkt, (void *)bp,
3409 cmdlen, statuslen, tgtlen, flags));
3410
3411 /*
3412 * Allocate the new packet.
3413 */
3414 if (pkt == NULL) {
3415 ddi_dma_handle_t save_dma_handle;
3416 ddi_dma_handle_t save_arq_dma_handle;
3417 struct buf *save_arq_bp;
3418 ddi_dma_cookie_t save_arqcookie;
3419
3420 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3421
3422 if (cmd) {
3423 save_dma_handle = cmd->cmd_dmahandle;
3424 save_arq_dma_handle = cmd->cmd_arqhandle;
3425 save_arq_bp = cmd->cmd_arq_buf;
3426 save_arqcookie = cmd->cmd_arqcookie;
3427 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3428 cmd->cmd_dmahandle = save_dma_handle;
3429 cmd->cmd_arqhandle = save_arq_dma_handle;
3430 cmd->cmd_arq_buf = save_arq_bp;
3431 cmd->cmd_arqcookie = save_arqcookie;
3432
3433 pkt = (void *)((uchar_t *)cmd +
3434 sizeof (struct mptsas_cmd));
3435 pkt->pkt_ha_private = (opaque_t)cmd;
3436 pkt->pkt_address = *ap;
3437 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3438 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3439 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3440 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3441 cmd->cmd_cdblen = (uchar_t)cmdlen;
3442 cmd->cmd_scblen = statuslen;
3443 cmd->cmd_rqslen = SENSE_LENGTH;
3444 cmd->cmd_tgt_addr = ptgt;
3445 failure = 0;
3446 }
3447
3448 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3449 (tgtlen > PKT_PRIV_LEN) ||
3450 (statuslen > EXTCMDS_STATUS_SIZE)) {
3451 if (failure == 0) {
3452 /*
3453 * if extern alloc fails, all will be
3454 * deallocated, including cmd
3455 */
3456 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3457 cmdlen, tgtlen, statuslen, kf);
3458 }
3459 if (failure) {
3460 /*
3461 * if extern allocation fails, it will
3462 * deallocate the new pkt as well
3463 */
3464 return (NULL);
3465 }
3466 }
3467 new_cmd = cmd;
3468
3469 } else {
3470 cmd = PKT2CMD(pkt);
3471 new_cmd = NULL;
3472 }
3473
3474
3475 /* grab cmd->cmd_cookiec here as oldcookiec */
3476
3477 oldcookiec = cmd->cmd_cookiec;
3478
3479 /*
3480 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3481 * greater than 0 and we'll need to grab the next dma window
3482 */
3483 /*
3484 * SLM-not doing extra command frame right now; may add later
3485 */
3486
3487 if (cmd->cmd_nwin > 0) {
3488
3489 /*
3490 * Make sure we havn't gone past the the total number
3491 * of windows
3492 */
3493 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3494 return (NULL);
3495 }
3496 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3497 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3498 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3499 return (NULL);
3500 }
3501 goto get_dma_cookies;
3502 }
3503
3504
3505 if (flags & PKT_XARQ) {
3506 cmd->cmd_flags |= CFLAG_XARQ;
3507 }
3508
3509 /*
3510 * DMA resource allocation. This version assumes your
3511 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3512 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3513 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3514 */
3515 if (bp && (bp->b_bcount != 0) &&
3516 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3517
3518 int cnt, dma_flags;
3519 mptti_t *dmap; /* ptr to the S/G list */
3520
3521 /*
3522 * Set up DMA memory and position to the next DMA segment.
3523 */
3524 ASSERT(cmd->cmd_dmahandle != NULL);
3525
3526 if (bp->b_flags & B_READ) {
3527 dma_flags = DDI_DMA_READ;
3528 cmd->cmd_flags &= ~CFLAG_DMASEND;
3529 } else {
3530 dma_flags = DDI_DMA_WRITE;
3531 cmd->cmd_flags |= CFLAG_DMASEND;
3532 }
3533 if (flags & PKT_CONSISTENT) {
3534 cmd->cmd_flags |= CFLAG_CMDIOPB;
3535 dma_flags |= DDI_DMA_CONSISTENT;
3536 }
3537
3538 if (flags & PKT_DMA_PARTIAL) {
3539 dma_flags |= DDI_DMA_PARTIAL;
3540 }
3541
3542 /*
3543 * workaround for byte hole issue on psycho and
3544 * schizo pre 2.1
3545 */
3546 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3547 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3548 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3549 dma_flags |= DDI_DMA_CONSISTENT;
3550 }
3551
3552 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3553 dma_flags, callback, arg,
3554 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3555 if (rval == DDI_DMA_PARTIAL_MAP) {
3556 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3557 &cmd->cmd_nwin);
3558 cmd->cmd_winindex = 0;
3559 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3560 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3561 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3562 &cmd->cmd_cookiec);
3563 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3564 switch (rval) {
3565 case DDI_DMA_NORESOURCES:
3566 bioerror(bp, 0);
3567 break;
3568 case DDI_DMA_BADATTR:
3569 case DDI_DMA_NOMAPPING:
3570 bioerror(bp, EFAULT);
3571 break;
3572 case DDI_DMA_TOOBIG:
3573 default:
3574 bioerror(bp, EINVAL);
3575 break;
3576 }
3577 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3578 if (new_cmd) {
3579 mptsas_scsi_destroy_pkt(ap, pkt);
3580 }
3581 return ((struct scsi_pkt *)NULL);
3582 }
3583
3584 get_dma_cookies:
3585 cmd->cmd_flags |= CFLAG_DMAVALID;
3586 ASSERT(cmd->cmd_cookiec > 0);
3587
3588 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3589 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3590 cmd->cmd_cookiec);
3591 bioerror(bp, EINVAL);
3592 if (new_cmd) {
3593 mptsas_scsi_destroy_pkt(ap, pkt);
3594 }
3595 return ((struct scsi_pkt *)NULL);
3596 }
3597
3598 /*
3599 * Allocate extra SGL buffer if needed.
3600 */
3601 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3602 (cmd->cmd_extra_frames == NULL)) {
3603 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3604 DDI_FAILURE) {
3605 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3606 "failed");
3607 bioerror(bp, ENOMEM);
3608 if (new_cmd) {
3609 mptsas_scsi_destroy_pkt(ap, pkt);
3610 }
3611 return ((struct scsi_pkt *)NULL);
3612 }
3613 }
3614
3615 /*
3616 * Always use scatter-gather transfer
3617 * Use the loop below to store physical addresses of
3618 * DMA segments, from the DMA cookies, into your HBA's
3619 * scatter-gather list.
3620 * We need to ensure we have enough kmem alloc'd
3621 * for the sg entries since we are no longer using an
3622 * array inside mptsas_cmd_t.
3623 *
3624 * We check cmd->cmd_cookiec against oldcookiec so
3625 * the scatter-gather list is correctly allocated
3626 */
3627
3628 if (oldcookiec != cmd->cmd_cookiec) {
3629 if (cmd->cmd_sg != (mptti_t *)NULL) {
3630 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3631 oldcookiec);
3632 cmd->cmd_sg = NULL;
3633 }
3634 }
3635
3636 if (cmd->cmd_sg == (mptti_t *)NULL) {
3637 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3638 cmd->cmd_cookiec), kf);
3639
3640 if (cmd->cmd_sg == (mptti_t *)NULL) {
3641 mptsas_log(mpt, CE_WARN,
3642 "unable to kmem_alloc enough memory "
3643 "for scatter/gather list");
3644 /*
3645 * if we have an ENOMEM condition we need to behave
3646 * the same way as the rest of this routine
3647 */
3648
3649 bioerror(bp, ENOMEM);
3650 if (new_cmd) {
3651 mptsas_scsi_destroy_pkt(ap, pkt);
3652 }
3653 return ((struct scsi_pkt *)NULL);
3654 }
3655 }
3656
3657 dmap = cmd->cmd_sg;
3658
3659 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3660
3661 /*
3662 * store the first segment into the S/G list
3663 */
3664 dmap->count = cmd->cmd_cookie.dmac_size;
3665 dmap->addr.address64.Low = (uint32_t)
3666 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3667 dmap->addr.address64.High = (uint32_t)
3668 (cmd->cmd_cookie.dmac_laddress >> 32);
3669
3670 /*
3671 * dmacount counts the size of the dma for this window
3672 * (if partial dma is being used). totaldmacount
3673 * keeps track of the total amount of dma we have
3674 * transferred for all the windows (needed to calculate
3675 * the resid value below).
3676 */
3677 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3678 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3679
3680 /*
3681 * We already stored the first DMA scatter gather segment,
3682 * start at 1 if we need to store more.
3683 */
3684 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3685 /*
3686 * Get next DMA cookie
3687 */
3688 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3689 &cmd->cmd_cookie);
3690 dmap++;
3691
3692 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3693 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3694
3695 /*
3696 * store the segment parms into the S/G list
3697 */
3698 dmap->count = cmd->cmd_cookie.dmac_size;
3699 dmap->addr.address64.Low = (uint32_t)
3700 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3701 dmap->addr.address64.High = (uint32_t)
3702 (cmd->cmd_cookie.dmac_laddress >> 32);
3703 }
3704
3705 /*
3706 * If this was partially allocated we set the resid
3707 * the amount of data NOT transferred in this window
3708 * If there is only one window, the resid will be 0
3709 */
3710 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3711 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3712 }
3713 return (pkt);
3714 }
3715
3716 /*
3717 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3718 *
3719 * Notes:
3720 * - also frees DMA resources if allocated
3721 * - implicit DMA synchonization
3722 */
3723 static void
3724 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3725 {
3726 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3727 mptsas_t *mpt = ADDR2MPT(ap);
3728
3729 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3730 ap->a_target, (void *)pkt));
3731
3732 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3733 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3734 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3735 }
3736
3737 if (cmd->cmd_sg) {
3738 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3739 cmd->cmd_sg = NULL;
3740 }
3741
3742 mptsas_free_extra_sgl_frame(mpt, cmd);
3743
3744 if ((cmd->cmd_flags &
3745 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3746 CFLAG_SCBEXTERN)) == 0) {
3747 cmd->cmd_flags = CFLAG_FREE;
3748 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3749 } else {
3750 mptsas_pkt_destroy_extern(mpt, cmd);
3751 }
3752 }
3753
3754 /*
3755 * kmem cache constructor and destructor:
3756 * When constructing, we bzero the cmd and allocate the dma handle
3757 * When destructing, just free the dma handle
3758 */
3759 static int
3760 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3761 {
3762 mptsas_cmd_t *cmd = buf;
3763 mptsas_t *mpt = cdrarg;
3764 struct scsi_address ap;
3765 uint_t cookiec;
3766 ddi_dma_attr_t arq_dma_attr;
3767 int (*callback)(caddr_t);
3768
3769 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3770
3771 NDBG4(("mptsas_kmem_cache_constructor"));
3772
3773 ap.a_hba_tran = mpt->m_tran;
3774 ap.a_target = 0;
3775 ap.a_lun = 0;
3776
3777 /*
3778 * allocate a dma handle
3779 */
3780 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3781 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3782 cmd->cmd_dmahandle = NULL;
3783 return (-1);
3784 }
3785
3786 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3787 SENSE_LENGTH, B_READ, callback, NULL);
3788 if (cmd->cmd_arq_buf == NULL) {
3789 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3790 cmd->cmd_dmahandle = NULL;
3791 return (-1);
3792 }
3793
3794 /*
3795 * allocate a arq handle
3796 */
3797 arq_dma_attr = mpt->m_msg_dma_attr;
3798 arq_dma_attr.dma_attr_sgllen = 1;
3799 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3800 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3801 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3802 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3803 cmd->cmd_dmahandle = NULL;
3804 cmd->cmd_arqhandle = NULL;
3805 return (-1);
3806 }
3807
3808 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3809 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3810 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3811 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3812 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3813 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3814 cmd->cmd_dmahandle = NULL;
3815 cmd->cmd_arqhandle = NULL;
3816 cmd->cmd_arq_buf = NULL;
3817 return (-1);
3818 }
3819
3820 return (0);
3821 }
3822
3823 static void
3824 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3825 {
3826 #ifndef __lock_lint
3827 _NOTE(ARGUNUSED(cdrarg))
3828 #endif
3829 mptsas_cmd_t *cmd = buf;
3830
3831 NDBG4(("mptsas_kmem_cache_destructor"));
3832
3833 if (cmd->cmd_arqhandle) {
3834 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3835 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3836 cmd->cmd_arqhandle = NULL;
3837 }
3838 if (cmd->cmd_arq_buf) {
3839 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3840 cmd->cmd_arq_buf = NULL;
3841 }
3842 if (cmd->cmd_dmahandle) {
3843 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3844 cmd->cmd_dmahandle = NULL;
3845 }
3846 }
3847
3848 static int
3849 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3850 {
3851 mptsas_cache_frames_t *p = buf;
3852 mptsas_t *mpt = cdrarg;
3853 ddi_dma_attr_t frame_dma_attr;
3854 size_t mem_size, alloc_len;
3855 ddi_dma_cookie_t cookie;
3856 uint_t ncookie;
3857 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3858 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3859
3860 frame_dma_attr = mpt->m_msg_dma_attr;
3861 frame_dma_attr.dma_attr_align = 0x10;
3862 frame_dma_attr.dma_attr_sgllen = 1;
3863
3864 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3865 &p->m_dma_hdl) != DDI_SUCCESS) {
3866 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3867 " extra SGL.");
3868 return (DDI_FAILURE);
3869 }
3870
3871 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3872
3873 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3874 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3875 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3876 ddi_dma_free_handle(&p->m_dma_hdl);
3877 p->m_dma_hdl = NULL;
3878 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3879 " extra SGL.");
3880 return (DDI_FAILURE);
3881 }
3882
3883 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3884 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3885 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3886 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3887 ddi_dma_free_handle(&p->m_dma_hdl);
3888 p->m_dma_hdl = NULL;
3889 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3890 " extra SGL");
3891 return (DDI_FAILURE);
3892 }
3893
3894 /*
3895 * Store the SGL memory address. This chip uses this
3896 * address to dma to and from the driver. The second
3897 * address is the address mpt uses to fill in the SGL.
3898 */
3899 p->m_phys_addr = cookie.dmac_address;
3900
3901 return (DDI_SUCCESS);
3902 }
3903
3904 static void
3905 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3906 {
3907 #ifndef __lock_lint
3908 _NOTE(ARGUNUSED(cdrarg))
3909 #endif
3910 mptsas_cache_frames_t *p = buf;
3911 if (p->m_dma_hdl != NULL) {
3912 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3913 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3914 ddi_dma_free_handle(&p->m_dma_hdl);
3915 p->m_phys_addr = NULL;
3916 p->m_frames_addr = NULL;
3917 p->m_dma_hdl = NULL;
3918 p->m_acc_hdl = NULL;
3919 }
3920
3921 }
3922
3923 /*
3924 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3925 * for non-standard length cdb, pkt_private, status areas
3926 * if allocation fails, then deallocate all external space and the pkt
3927 */
3928 /* ARGSUSED */
3929 static int
3930 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3931 int cmdlen, int tgtlen, int statuslen, int kf)
3932 {
3933 caddr_t cdbp, scbp, tgt;
3934 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3935 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3936 struct scsi_address ap;
3937 size_t senselength;
3938 ddi_dma_attr_t ext_arq_dma_attr;
3939 uint_t cookiec;
3940
3941 NDBG3(("mptsas_pkt_alloc_extern: "
3942 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3943 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3944
3945 tgt = cdbp = scbp = NULL;
3946 cmd->cmd_scblen = statuslen;
3947 cmd->cmd_privlen = (uchar_t)tgtlen;
3948
3949 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3950 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3951 goto fail;
3952 }
3953 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3954 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3955 }
3956 if (tgtlen > PKT_PRIV_LEN) {
3957 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3958 goto fail;
3959 }
3960 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3961 cmd->cmd_pkt->pkt_private = tgt;
3962 }
3963 if (statuslen > EXTCMDS_STATUS_SIZE) {
3964 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3965 goto fail;
3966 }
3967 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3968 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3969
3970 /* allocate sense data buf for DMA */
3971
3972 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3973 struct scsi_arq_status, sts_sensedata);
3974 cmd->cmd_rqslen = (uchar_t)senselength;
3975
3976 ap.a_hba_tran = mpt->m_tran;
3977 ap.a_target = 0;
3978 ap.a_lun = 0;
3979
3980 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3981 (struct buf *)NULL, senselength, B_READ,
3982 callback, NULL);
3983
3984 if (cmd->cmd_ext_arq_buf == NULL) {
3985 goto fail;
3986 }
3987 /*
3988 * allocate a extern arq handle and bind the buf
3989 */
3990 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3991 ext_arq_dma_attr.dma_attr_sgllen = 1;
3992 if ((ddi_dma_alloc_handle(mpt->m_dip,
3993 &ext_arq_dma_attr, callback,
3994 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3995 goto fail;
3996 }
3997
3998 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3999 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4000 callback, NULL, &cmd->cmd_ext_arqcookie,
4001 &cookiec)
4002 != DDI_SUCCESS) {
4003 goto fail;
4004 }
4005 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4006 }
4007 return (0);
4008 fail:
4009 mptsas_pkt_destroy_extern(mpt, cmd);
4010 return (1);
4011 }
4012
4013 /*
4014 * deallocate external pkt space and deallocate the pkt
4015 */
4016 static void
4017 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4018 {
4019 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4020
4021 if (cmd->cmd_flags & CFLAG_FREE) {
4022 mptsas_log(mpt, CE_PANIC,
4023 "mptsas_pkt_destroy_extern: freeing free packet");
4024 _NOTE(NOT_REACHED)
4025 /* NOTREACHED */
4026 }
4027 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4028 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4029 }
4030 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4031 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4032 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4033 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4034 }
4035 if (cmd->cmd_ext_arqhandle) {
4036 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4037 cmd->cmd_ext_arqhandle = NULL;
4038 }
4039 if (cmd->cmd_ext_arq_buf)
4040 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4041 }
4042 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4043 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4044 }
4045 cmd->cmd_flags = CFLAG_FREE;
4046 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4047 }
4048
4049 /*
4050 * tran_sync_pkt(9E) - explicit DMA synchronization
4051 */
4052 /*ARGSUSED*/
4053 static void
4054 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4055 {
4056 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4057
4058 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4059 ap->a_target, (void *)pkt));
4060
4061 if (cmd->cmd_dmahandle) {
4062 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4063 (cmd->cmd_flags & CFLAG_DMASEND) ?
4064 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4065 }
4066 }
4067
4068 /*
4069 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4070 */
4071 /*ARGSUSED*/
4072 static void
4073 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4074 {
4075 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4076 mptsas_t *mpt = ADDR2MPT(ap);
4077
4078 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4079 ap->a_target, (void *)pkt));
4080
4081 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4082 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4083 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4084 }
4085
4086 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4087 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4088 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4089 }
4090
4091 mptsas_free_extra_sgl_frame(mpt, cmd);
4092 }
4093
4094 static void
4095 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4096 {
4097 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4098 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4099 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4100 DDI_DMA_SYNC_FORCPU);
4101 }
4102 (*pkt->pkt_comp)(pkt);
4103 }
4104
4105 static void
4106 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4107 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4108 {
4109 uint_t cookiec;
4110 mptti_t *dmap;
4111 uint32_t flags;
4112 pMpi2SGESimple64_t sge;
4113 pMpi2SGEChain64_t sgechain;
4114 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4115
4116 /*
4117 * Save the number of entries in the DMA
4118 * Scatter/Gather list
4119 */
4120 cookiec = cmd->cmd_cookiec;
4121
4122 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4123
4124 /*
4125 * Set read/write bit in control.
4126 */
4127 if (cmd->cmd_flags & CFLAG_DMASEND) {
4128 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4129 } else {
4130 *control |= MPI2_SCSIIO_CONTROL_READ;
4131 }
4132
4133 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4134
4135 /*
4136 * We have 2 cases here. First where we can fit all the
4137 * SG elements into the main frame, and the case
4138 * where we can't.
4139 * If we have more cookies than we can attach to a frame
4140 * we will need to use a chain element to point
4141 * a location of memory where the rest of the S/G
4142 * elements reside.
4143 */
4144 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4145 dmap = cmd->cmd_sg;
4146 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4147 while (cookiec--) {
4148 ddi_put32(acc_hdl,
4149 &sge->Address.Low, dmap->addr.address64.Low);
4150 ddi_put32(acc_hdl,
4151 &sge->Address.High, dmap->addr.address64.High);
4152 ddi_put32(acc_hdl, &sge->FlagsLength,
4153 dmap->count);
4154 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4155 flags |= ((uint32_t)
4156 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4157 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4158 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4159 MPI2_SGE_FLAGS_SHIFT);
4160
4161 /*
4162 * If this is the last cookie, we set the flags
4163 * to indicate so
4164 */
4165 if (cookiec == 0) {
4166 flags |=
4167 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4168 | MPI2_SGE_FLAGS_END_OF_BUFFER
4169 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4170 MPI2_SGE_FLAGS_SHIFT);
4171 }
4172 if (cmd->cmd_flags & CFLAG_DMASEND) {
4173 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4174 MPI2_SGE_FLAGS_SHIFT);
4175 } else {
4176 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4177 MPI2_SGE_FLAGS_SHIFT);
4178 }
4179 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4180 dmap++;
4181 sge++;
4182 }
4183 } else {
4184 /*
4185 * Hereby we start to deal with multiple frames.
4186 * The process is as follows:
4187 * 1. Determine how many frames are needed for SGL element
4188 * storage; Note that all frames are stored in contiguous
4189 * memory space and in 64-bit DMA mode each element is
4190 * 3 double-words (12 bytes) long.
4191 * 2. Fill up the main frame. We need to do this separately
4192 * since it contains the SCSI IO request header and needs
4193 * dedicated processing. Note that the last 4 double-words
4194 * of the SCSI IO header is for SGL element storage
4195 * (MPI2_SGE_IO_UNION).
4196 * 3. Fill the chain element in the main frame, so the DMA
4197 * engine can use the following frames.
4198 * 4. Enter a loop to fill the remaining frames. Note that the
4199 * last frame contains no chain element. The remaining
4200 * frames go into the mpt SGL buffer allocated on the fly,
4201 * not immediately following the main message frame, as in
4202 * Gen1.
4203 * Some restrictions:
4204 * 1. For 64-bit DMA, the simple element and chain element
4205 * are both of 3 double-words (12 bytes) in size, even
4206 * though all frames are stored in the first 4G of mem
4207 * range and the higher 32-bits of the address are always 0.
4208 * 2. On some controllers (like the 1064/1068), a frame can
4209 * hold SGL elements with the last 1 or 2 double-words
4210 * (4 or 8 bytes) un-used. On these controllers, we should
4211 * recognize that there's not enough room for another SGL
4212 * element and move the sge pointer to the next frame.
4213 */
4214 int i, j, k, l, frames, sgemax;
4215 int temp;
4216 uint8_t chainflags;
4217 uint16_t chainlength;
4218 mptsas_cache_frames_t *p;
4219
4220 /*
4221 * Sgemax is the number of SGE's that will fit
4222 * each extra frame and frames is total
4223 * number of frames we'll need. 1 sge entry per
4224 * frame is reseverd for the chain element thus the -1 below.
4225 */
4226 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4227 - 1);
4228 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4229
4230 /*
4231 * A little check to see if we need to round up the number
4232 * of frames we need
4233 */
4234 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4235 sgemax) > 1) {
4236 frames = (temp + 1);
4237 } else {
4238 frames = temp;
4239 }
4240 dmap = cmd->cmd_sg;
4241 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4242
4243 /*
4244 * First fill in the main frame
4245 */
4246 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4247 ddi_put32(acc_hdl, &sge->Address.Low,
4248 dmap->addr.address64.Low);
4249 ddi_put32(acc_hdl, &sge->Address.High,
4250 dmap->addr.address64.High);
4251 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4252 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4253 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4254 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4255 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4256 MPI2_SGE_FLAGS_SHIFT);
4257
4258 /*
4259 * If this is the last SGE of this frame
4260 * we set the end of list flag
4261 */
4262 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4263 flags |= ((uint32_t)
4264 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4265 MPI2_SGE_FLAGS_SHIFT);
4266 }
4267 if (cmd->cmd_flags & CFLAG_DMASEND) {
4268 flags |=
4269 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4270 MPI2_SGE_FLAGS_SHIFT);
4271 } else {
4272 flags |=
4273 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4274 MPI2_SGE_FLAGS_SHIFT);
4275 }
4276 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4277 dmap++;
4278 sge++;
4279 }
4280
4281 /*
4282 * Fill in the chain element in the main frame.
4283 * About calculation on ChainOffset:
4284 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4285 * in the end reserved for SGL element storage
4286 * (MPI2_SGE_IO_UNION); we should count it in our
4287 * calculation. See its definition in the header file.
4288 * 2. Constant j is the counter of the current SGL element
4289 * that will be processed, and (j - 1) is the number of
4290 * SGL elements that have been processed (stored in the
4291 * main frame).
4292 * 3. ChainOffset value should be in units of double-words (4
4293 * bytes) so the last value should be divided by 4.
4294 */
4295 ddi_put8(acc_hdl, &frame->ChainOffset,
4296 (sizeof (MPI2_SCSI_IO_REQUEST) -
4297 sizeof (MPI2_SGE_IO_UNION) +
4298 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4299 sgechain = (pMpi2SGEChain64_t)sge;
4300 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4301 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4302 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4303 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4304
4305 /*
4306 * The size of the next frame is the accurate size of space
4307 * (in bytes) used to store the SGL elements. j is the counter
4308 * of SGL elements. (j - 1) is the number of SGL elements that
4309 * have been processed (stored in frames).
4310 */
4311 if (frames >= 2) {
4312 chainlength = mpt->m_req_frame_size /
4313 sizeof (MPI2_SGE_SIMPLE64) *
4314 sizeof (MPI2_SGE_SIMPLE64);
4315 } else {
4316 chainlength = ((cookiec - (j - 1)) *
4317 sizeof (MPI2_SGE_SIMPLE64));
4318 }
4319
4320 p = cmd->cmd_extra_frames;
4321
4322 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4323 ddi_put32(acc_hdl, &sgechain->Address.Low,
4324 p->m_phys_addr);
4325 /* SGL is allocated in the first 4G mem range */
4326 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4327
4328 /*
4329 * If there are more than 2 frames left we have to
4330 * fill in the next chain offset to the location of
4331 * the chain element in the next frame.
4332 * sgemax is the number of simple elements in an extra
4333 * frame. Note that the value NextChainOffset should be
4334 * in double-words (4 bytes).
4335 */
4336 if (frames >= 2) {
4337 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4338 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4339 } else {
4340 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4341 }
4342
4343 /*
4344 * Jump to next frame;
4345 * Starting here, chain buffers go into the per command SGL.
4346 * This buffer is allocated when chain buffers are needed.
4347 */
4348 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4349 i = cookiec;
4350
4351 /*
4352 * Start filling in frames with SGE's. If we
4353 * reach the end of frame and still have SGE's
4354 * to fill we need to add a chain element and
4355 * use another frame. j will be our counter
4356 * for what cookie we are at and i will be
4357 * the total cookiec. k is the current frame
4358 */
4359 for (k = 1; k <= frames; k++) {
4360 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4361
4362 /*
4363 * If we have reached the end of frame
4364 * and we have more SGE's to fill in
4365 * we have to fill the final entry
4366 * with a chain element and then
4367 * continue to the next frame
4368 */
4369 if ((l == (sgemax + 1)) && (k != frames)) {
4370 sgechain = (pMpi2SGEChain64_t)sge;
4371 j--;
4372 chainflags = (
4373 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4374 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4375 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4376 ddi_put8(p->m_acc_hdl,
4377 &sgechain->Flags, chainflags);
4378 /*
4379 * k is the frame counter and (k + 1)
4380 * is the number of the next frame.
4381 * Note that frames are in contiguous
4382 * memory space.
4383 */
4384 ddi_put32(p->m_acc_hdl,
4385 &sgechain->Address.Low,
4386 (p->m_phys_addr +
4387 (mpt->m_req_frame_size * k)));
4388 ddi_put32(p->m_acc_hdl,
4389 &sgechain->Address.High, 0);
4390
4391 /*
4392 * If there are more than 2 frames left
4393 * we have to next chain offset to
4394 * the location of the chain element
4395 * in the next frame and fill in the
4396 * length of the next chain
4397 */
4398 if ((frames - k) >= 2) {
4399 ddi_put8(p->m_acc_hdl,
4400 &sgechain->NextChainOffset,
4401 (sgemax *
4402 sizeof (MPI2_SGE_SIMPLE64))
4403 >> 2);
4404 ddi_put16(p->m_acc_hdl,
4405 &sgechain->Length,
4406 mpt->m_req_frame_size /
4407 sizeof (MPI2_SGE_SIMPLE64) *
4408 sizeof (MPI2_SGE_SIMPLE64));
4409 } else {
4410 /*
4411 * This is the last frame. Set
4412 * the NextChainOffset to 0 and
4413 * Length is the total size of
4414 * all remaining simple elements
4415 */
4416 ddi_put8(p->m_acc_hdl,
4417 &sgechain->NextChainOffset,
4418 0);
4419 ddi_put16(p->m_acc_hdl,
4420 &sgechain->Length,
4421 (cookiec - j) *
4422 sizeof (MPI2_SGE_SIMPLE64));
4423 }
4424
4425 /* Jump to the next frame */
4426 sge = (pMpi2SGESimple64_t)
4427 ((char *)p->m_frames_addr +
4428 (int)mpt->m_req_frame_size * k);
4429
4430 continue;
4431 }
4432
4433 ddi_put32(p->m_acc_hdl,
4434 &sge->Address.Low,
4435 dmap->addr.address64.Low);
4436 ddi_put32(p->m_acc_hdl,
4437 &sge->Address.High,
4438 dmap->addr.address64.High);
4439 ddi_put32(p->m_acc_hdl,
4440 &sge->FlagsLength, dmap->count);
4441 flags = ddi_get32(p->m_acc_hdl,
4442 &sge->FlagsLength);
4443 flags |= ((uint32_t)(
4444 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4445 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4446 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4447 MPI2_SGE_FLAGS_SHIFT);
4448
4449 /*
4450 * If we are at the end of the frame and
4451 * there is another frame to fill in
4452 * we set the last simple element as last
4453 * element
4454 */
4455 if ((l == sgemax) && (k != frames)) {
4456 flags |= ((uint32_t)
4457 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4458 MPI2_SGE_FLAGS_SHIFT);
4459 }
4460
4461 /*
4462 * If this is the final cookie we
4463 * indicate it by setting the flags
4464 */
4465 if (j == i) {
4466 flags |= ((uint32_t)
4467 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4468 MPI2_SGE_FLAGS_END_OF_BUFFER |
4469 MPI2_SGE_FLAGS_END_OF_LIST) <<
4470 MPI2_SGE_FLAGS_SHIFT);
4471 }
4472 if (cmd->cmd_flags & CFLAG_DMASEND) {
4473 flags |=
4474 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4475 MPI2_SGE_FLAGS_SHIFT);
4476 } else {
4477 flags |=
4478 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4479 MPI2_SGE_FLAGS_SHIFT);
4480 }
4481 ddi_put32(p->m_acc_hdl,
4482 &sge->FlagsLength, flags);
4483 dmap++;
4484 sge++;
4485 }
4486 }
4487
4488 /*
4489 * Sync DMA with the chain buffers that were just created
4490 */
4491 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4492 }
4493 }
4494
4495 /*
4496 * Interrupt handling
4497 * Utility routine. Poll for status of a command sent to HBA
4498 * without interrupts (a FLAG_NOINTR command).
4499 */
4500 int
4501 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4502 {
4503 int rval = TRUE;
4504
4505 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4506
4507 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4508 mptsas_restart_hba(mpt);
4509 }
4510
4511 /*
4512 * Wait, using drv_usecwait(), long enough for the command to
4513 * reasonably return from the target if the target isn't
4514 * "dead". A polled command may well be sent from scsi_poll, and
4515 * there are retries built in to scsi_poll if the transport
4516 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4517 * and retries the transport up to scsi_poll_busycnt times
4518 * (currently 60) if
4519 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4520 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4521 *
4522 * limit the waiting to avoid a hang in the event that the
4523 * cmd never gets started but we are still receiving interrupts
4524 */
4525 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4526 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4527 NDBG5(("mptsas_poll: command incomplete"));
4528 rval = FALSE;
4529 break;
4530 }
4531 }
4532
4533 if (rval == FALSE) {
4534
4535 /*
4536 * this isn't supposed to happen, the hba must be wedged
4537 * Mark this cmd as a timeout.
4538 */
4539 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4540 (STAT_TIMEOUT|STAT_ABORTED));
4541
4542 if (poll_cmd->cmd_queued == FALSE) {
4543
4544 NDBG5(("mptsas_poll: not on waitq"));
4545
4546 poll_cmd->cmd_pkt->pkt_state |=
4547 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4548 } else {
4549
4550 /* find and remove it from the waitq */
4551 NDBG5(("mptsas_poll: delete from waitq"));
4552 mptsas_waitq_delete(mpt, poll_cmd);
4553 }
4554
4555 }
4556 mptsas_fma_check(mpt, poll_cmd);
4557 NDBG5(("mptsas_poll: done"));
4558 return (rval);
4559 }
4560
4561 /*
4562 * Used for polling cmds and TM function
4563 */
4564 static int
4565 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4566 {
4567 int cnt;
4568 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4569 uint32_t int_mask;
4570
4571 NDBG5(("mptsas_wait_intr"));
4572
4573 mpt->m_polled_intr = 1;
4574
4575 /*
4576 * Get the current interrupt mask and disable interrupts. When
4577 * re-enabling ints, set mask to saved value.
4578 */
4579 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4580 MPTSAS_DISABLE_INTR(mpt);
4581
4582 /*
4583 * Keep polling for at least (polltime * 1000) seconds
4584 */
4585 for (cnt = 0; cnt < polltime; cnt++) {
4586 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4587 DDI_DMA_SYNC_FORCPU);
4588
4589 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4590 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4591
4592 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4593 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4594 ddi_get32(mpt->m_acc_post_queue_hdl,
4595 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4596 drv_usecwait(1000);
4597 continue;
4598 }
4599
4600 /*
4601 * The reply is valid, process it according to its
4602 * type.
4603 */
4604 mptsas_process_intr(mpt, reply_desc_union);
4605
4606 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4607 mpt->m_post_index = 0;
4608 }
4609
4610 /*
4611 * Update the global reply index
4612 */
4613 ddi_put32(mpt->m_datap,
4614 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4615 mpt->m_polled_intr = 0;
4616
4617 /*
4618 * Re-enable interrupts and quit.
4619 */
4620 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4621 int_mask);
4622 return (TRUE);
4623
4624 }
4625
4626 /*
4627 * Clear polling flag, re-enable interrupts and quit.
4628 */
4629 mpt->m_polled_intr = 0;
4630 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4631 return (FALSE);
4632 }
4633
4634 static void
4635 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4636 pMpi2ReplyDescriptorsUnion_t reply_desc)
4637 {
4638 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4639 uint16_t SMID;
4640 mptsas_slots_t *slots = mpt->m_active;
4641 mptsas_cmd_t *cmd = NULL;
4642 struct scsi_pkt *pkt;
4643
4644 ASSERT(mutex_owned(&mpt->m_mutex));
4645
4646 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4647 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4648
4649 /*
4650 * This is a success reply so just complete the IO. First, do a sanity
4651 * check on the SMID. The final slot is used for TM requests, which
4652 * would not come into this reply handler.
4653 */
4654 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4655 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4656 SMID);
4657 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4658 return;
4659 }
4660
4661 cmd = slots->m_slot[SMID];
4662
4663 /*
4664 * print warning and return if the slot is empty
4665 */
4666 if (cmd == NULL) {
4667 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4668 "in slot %d", SMID);
4669 return;
4670 }
4671
4672 pkt = CMD2PKT(cmd);
4673 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4674 STATE_GOT_STATUS);
4675 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4676 pkt->pkt_state |= STATE_XFERRED_DATA;
4677 }
4678 pkt->pkt_resid = 0;
4679
4680 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4681 cmd->cmd_flags |= CFLAG_FINISHED;
4682 cv_broadcast(&mpt->m_passthru_cv);
4683 return;
4684 } else {
4685 mptsas_remove_cmd(mpt, cmd);
4686 }
4687
4688 if (cmd->cmd_flags & CFLAG_RETRY) {
4689 /*
4690 * The target returned QFULL or busy, do not add tihs
4691 * pkt to the doneq since the hba will retry
4692 * this cmd.
4693 *
4694 * The pkt has already been resubmitted in
4695 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4696 * Remove this cmd_flag here.
4697 */
4698 cmd->cmd_flags &= ~CFLAG_RETRY;
4699 } else {
4700 mptsas_doneq_add(mpt, cmd);
4701 }
4702 }
4703
4704 static void
4705 mptsas_handle_address_reply(mptsas_t *mpt,
4706 pMpi2ReplyDescriptorsUnion_t reply_desc)
4707 {
4708 pMpi2AddressReplyDescriptor_t address_reply;
4709 pMPI2DefaultReply_t reply;
4710 mptsas_fw_diagnostic_buffer_t *pBuffer;
4711 uint32_t reply_addr;
4712 uint16_t SMID, iocstatus;
4713 mptsas_slots_t *slots = mpt->m_active;
4714 mptsas_cmd_t *cmd = NULL;
4715 uint8_t function, buffer_type;
4716 m_replyh_arg_t *args;
4717 int reply_frame_no;
4718
4719 ASSERT(mutex_owned(&mpt->m_mutex));
4720
4721 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4722 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4723 &address_reply->ReplyFrameAddress);
4724 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4725
4726 /*
4727 * If reply frame is not in the proper range we should ignore this
4728 * message and exit the interrupt handler.
4729 */
4730 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4731 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4732 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4733 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4734 mpt->m_reply_frame_size != 0)) {
4735 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4736 "address 0x%x\n", reply_addr);
4737 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4738 return;
4739 }
4740
4741 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4742 DDI_DMA_SYNC_FORCPU);
4743 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4744 mpt->m_reply_frame_dma_addr));
4745 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4746
4747 /*
4748 * don't get slot information and command for events since these values
4749 * don't exist
4750 */
4751 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4752 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4753 /*
4754 * This could be a TM reply, which use the last allocated SMID,
4755 * so allow for that.
4756 */
4757 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4758 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4759 "%d\n", SMID);
4760 ddi_fm_service_impact(mpt->m_dip,
4761 DDI_SERVICE_UNAFFECTED);
4762 return;
4763 }
4764
4765 cmd = slots->m_slot[SMID];
4766
4767 /*
4768 * print warning and return if the slot is empty
4769 */
4770 if (cmd == NULL) {
4771 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4772 "reply in slot %d", SMID);
4773 return;
4774 }
4775 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4776 (cmd->cmd_flags & CFLAG_CONFIG) ||
4777 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4778 cmd->cmd_rfm = reply_addr;
4779 cmd->cmd_flags |= CFLAG_FINISHED;
4780 cv_broadcast(&mpt->m_passthru_cv);
4781 cv_broadcast(&mpt->m_config_cv);
4782 cv_broadcast(&mpt->m_fw_diag_cv);
4783 return;
4784 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4785 mptsas_remove_cmd(mpt, cmd);
4786 }
4787 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4788 }
4789 /*
4790 * Depending on the function, we need to handle
4791 * the reply frame (and cmd) differently.
4792 */
4793 switch (function) {
4794 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4795 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4796 break;
4797 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4798 cmd->cmd_rfm = reply_addr;
4799 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4800 cmd);
4801 break;
4802 case MPI2_FUNCTION_FW_DOWNLOAD:
4803 cmd->cmd_flags |= CFLAG_FINISHED;
4804 cv_signal(&mpt->m_fw_cv);
4805 break;
4806 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4807 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4808 mpt->m_reply_frame_size;
4809 args = &mpt->m_replyh_args[reply_frame_no];
4810 args->mpt = (void *)mpt;
4811 args->rfm = reply_addr;
4812
4813 /*
4814 * Record the event if its type is enabled in
4815 * this mpt instance by ioctl.
4816 */
4817 mptsas_record_event(args);
4818
4819 /*
4820 * Handle time critical events
4821 * NOT_RESPONDING/ADDED only now
4822 */
4823 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4824 /*
4825 * Would not return main process,
4826 * just let taskq resolve ack action
4827 * and ack would be sent in taskq thread
4828 */
4829 NDBG20(("send mptsas_handle_event_sync success"));
4830 }
4831
4832 if (mpt->m_in_reset) {
4833 NDBG20(("dropping event received during reset"));
4834 return;
4835 }
4836
4837 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4838 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4839 mptsas_log(mpt, CE_WARN, "No memory available"
4840 "for dispatch taskq");
4841 /*
4842 * Return the reply frame to the free queue.
4843 */
4844 ddi_put32(mpt->m_acc_free_queue_hdl,
4845 &((uint32_t *)(void *)
4846 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4847 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4848 DDI_DMA_SYNC_FORDEV);
4849 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4850 mpt->m_free_index = 0;
4851 }
4852
4853 ddi_put32(mpt->m_datap,
4854 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4855 }
4856 return;
4857 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4858 /*
4859 * If SMID is 0, this implies that the reply is due to a
4860 * release function with a status that the buffer has been
4861 * released. Set the buffer flags accordingly.
4862 */
4863 if (SMID == 0) {
4864 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4865 &reply->IOCStatus);
4866 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4867 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4868 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4869 pBuffer =
4870 &mpt->m_fw_diag_buffer_list[buffer_type];
4871 pBuffer->valid_data = TRUE;
4872 pBuffer->owned_by_firmware = FALSE;
4873 pBuffer->immediate = FALSE;
4874 }
4875 } else {
4876 /*
4877 * Normal handling of diag post reply with SMID.
4878 */
4879 cmd = slots->m_slot[SMID];
4880
4881 /*
4882 * print warning and return if the slot is empty
4883 */
4884 if (cmd == NULL) {
4885 mptsas_log(mpt, CE_WARN, "?NULL command for "
4886 "address reply in slot %d", SMID);
4887 return;
4888 }
4889 cmd->cmd_rfm = reply_addr;
4890 cmd->cmd_flags |= CFLAG_FINISHED;
4891 cv_broadcast(&mpt->m_fw_diag_cv);
4892 }
4893 return;
4894 default:
4895 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4896 break;
4897 }
4898
4899 /*
4900 * Return the reply frame to the free queue.
4901 */
4902 ddi_put32(mpt->m_acc_free_queue_hdl,
4903 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4904 reply_addr);
4905 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4906 DDI_DMA_SYNC_FORDEV);
4907 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4908 mpt->m_free_index = 0;
4909 }
4910 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4911 mpt->m_free_index);
4912
4913 if (cmd->cmd_flags & CFLAG_FW_CMD)
4914 return;
4915
4916 if (cmd->cmd_flags & CFLAG_RETRY) {
4917 /*
4918 * The target returned QFULL or busy, do not add tihs
4919 * pkt to the doneq since the hba will retry
4920 * this cmd.
4921 *
4922 * The pkt has already been resubmitted in
4923 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4924 * Remove this cmd_flag here.
4925 */
4926 cmd->cmd_flags &= ~CFLAG_RETRY;
4927 } else {
4928 mptsas_doneq_add(mpt, cmd);
4929 }
4930 }
4931
4932 static void
4933 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4934 mptsas_cmd_t *cmd)
4935 {
4936 uint8_t scsi_status, scsi_state;
4937 uint16_t ioc_status;
4938 uint32_t xferred, sensecount, responsedata, loginfo = 0;
4939 struct scsi_pkt *pkt;
4940 struct scsi_arq_status *arqstat;
4941 struct buf *bp;
4942 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
4943 uint8_t *sensedata = NULL;
4944
4945 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4946 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4947 bp = cmd->cmd_ext_arq_buf;
4948 } else {
4949 bp = cmd->cmd_arq_buf;
4950 }
4951
4952 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4953 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4954 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4955 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
4956 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
4957 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
4958 &reply->ResponseInfo);
4959
4960 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
4961 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
4962 &reply->IOCLogInfo);
4963 mptsas_log(mpt, CE_NOTE,
4964 "?Log info 0x%x received for target %d.\n"
4965 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4966 loginfo, Tgt(cmd), scsi_status, ioc_status,
4967 scsi_state);
4968 }
4969
4970 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4971 scsi_status, ioc_status, scsi_state));
4972
4973 pkt = CMD2PKT(cmd);
4974 *(pkt->pkt_scbp) = scsi_status;
4975
4976 if (loginfo == 0x31170000) {
4977 /*
4978 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4979 * 0x31170000 comes, that means the device missing delay
4980 * is in progressing, the command need retry later.
4981 */
4982 *(pkt->pkt_scbp) = STATUS_BUSY;
4983 return;
4984 }
4985
4986 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4987 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4988 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4989 pkt->pkt_reason = CMD_INCOMPLETE;
4990 pkt->pkt_state |= STATE_GOT_BUS;
4991 if (ptgt->m_reset_delay == 0) {
4992 mptsas_set_throttle(mpt, ptgt,
4993 DRAIN_THROTTLE);
4994 }
4995 return;
4996 }
4997
4998 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4999 responsedata &= 0x000000FF;
5000 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5001 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5002 pkt->pkt_reason = CMD_TLR_OFF;
5003 return;
5004 }
5005 }
5006
5007
5008 switch (scsi_status) {
5009 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5010 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5011 arqstat = (void*)(pkt->pkt_scbp);
5012 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5013 (pkt->pkt_scbp));
5014 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5015 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5016 if (cmd->cmd_flags & CFLAG_XARQ) {
5017 pkt->pkt_state |= STATE_XARQ_DONE;
5018 }
5019 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5020 pkt->pkt_state |= STATE_XFERRED_DATA;
5021 }
5022 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5023 arqstat->sts_rqpkt_state = pkt->pkt_state;
5024 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5025 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5026 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5027
5028 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5029 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5030 cmd->cmd_rqslen));
5031 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5032 cmd->cmd_flags |= CFLAG_CMDARQ;
5033 /*
5034 * Set proper status for pkt if autosense was valid
5035 */
5036 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5037 struct scsi_status zero_status = { 0 };
5038 arqstat->sts_rqpkt_status = zero_status;
5039 }
5040
5041 /*
5042 * ASC=0x47 is parity error
5043 * ASC=0x48 is initiator detected error received
5044 */
5045 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5046 ((scsi_sense_asc(sensedata) == 0x47) ||
5047 (scsi_sense_asc(sensedata) == 0x48))) {
5048 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5049 }
5050
5051 /*
5052 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5053 * ASC/ASCQ=0x25/0x00 means invalid lun
5054 */
5055 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5056 (scsi_sense_asc(sensedata) == 0x3F) &&
5057 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5058 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5059 (scsi_sense_asc(sensedata) == 0x25) &&
5060 (scsi_sense_ascq(sensedata) == 0x00))) {
5061 mptsas_topo_change_list_t *topo_node = NULL;
5062
5063 topo_node = kmem_zalloc(
5064 sizeof (mptsas_topo_change_list_t),
5065 KM_NOSLEEP);
5066 if (topo_node == NULL) {
5067 mptsas_log(mpt, CE_NOTE, "No memory"
5068 "resource for handle SAS dynamic"
5069 "reconfigure.\n");
5070 break;
5071 }
5072 topo_node->mpt = mpt;
5073 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5074 topo_node->un.phymask = ptgt->m_phymask;
5075 topo_node->devhdl = ptgt->m_devhdl;
5076 topo_node->object = (void *)ptgt;
5077 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5078
5079 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5080 mptsas_handle_dr,
5081 (void *)topo_node,
5082 DDI_NOSLEEP)) != DDI_SUCCESS) {
5083 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5084 "for handle SAS dynamic reconfigure"
5085 "failed. \n");
5086 }
5087 }
5088 break;
5089 case MPI2_SCSI_STATUS_GOOD:
5090 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5091 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5092 pkt->pkt_reason = CMD_DEV_GONE;
5093 pkt->pkt_state |= STATE_GOT_BUS;
5094 if (ptgt->m_reset_delay == 0) {
5095 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5096 }
5097 NDBG31(("lost disk for target%d, command:%x",
5098 Tgt(cmd), pkt->pkt_cdbp[0]));
5099 break;
5100 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5101 NDBG31(("data overrun: xferred=%d", xferred));
5102 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5103 pkt->pkt_reason = CMD_DATA_OVR;
5104 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5105 | STATE_SENT_CMD | STATE_GOT_STATUS
5106 | STATE_XFERRED_DATA);
5107 pkt->pkt_resid = 0;
5108 break;
5109 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5110 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5111 NDBG31(("data underrun: xferred=%d", xferred));
5112 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5113 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5114 | STATE_SENT_CMD | STATE_GOT_STATUS);
5115 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5116 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5117 pkt->pkt_state |= STATE_XFERRED_DATA;
5118 }
5119 break;
5120 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5121 mptsas_set_pkt_reason(mpt,
5122 cmd, CMD_RESET, STAT_BUS_RESET);
5123 break;
5124 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5125 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5126 mptsas_set_pkt_reason(mpt,
5127 cmd, CMD_RESET, STAT_DEV_RESET);
5128 break;
5129 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5130 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5131 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5132 mptsas_set_pkt_reason(mpt,
5133 cmd, CMD_TERMINATED, STAT_TERMINATED);
5134 break;
5135 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5136 case MPI2_IOCSTATUS_BUSY:
5137 /*
5138 * set throttles to drain
5139 */
5140 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5141 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5142 while (ptgt != NULL) {
5143 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5144
5145 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5146 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5147 }
5148
5149 /*
5150 * retry command
5151 */
5152 cmd->cmd_flags |= CFLAG_RETRY;
5153 cmd->cmd_pkt_flags |= FLAG_HEAD;
5154
5155 (void) mptsas_accept_pkt(mpt, cmd);
5156 break;
5157 default:
5158 mptsas_log(mpt, CE_WARN,
5159 "unknown ioc_status = %x\n", ioc_status);
5160 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5161 "count = %x, scsi_status = %x", scsi_state,
5162 xferred, scsi_status);
5163 break;
5164 }
5165 break;
5166 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5167 mptsas_handle_qfull(mpt, cmd);
5168 break;
5169 case MPI2_SCSI_STATUS_BUSY:
5170 NDBG31(("scsi_status busy received"));
5171 break;
5172 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5173 NDBG31(("scsi_status reservation conflict received"));
5174 break;
5175 default:
5176 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5177 scsi_status, ioc_status);
5178 mptsas_log(mpt, CE_WARN,
5179 "mptsas_process_intr: invalid scsi status\n");
5180 break;
5181 }
5182 }
5183
5184 static void
5185 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5186 mptsas_cmd_t *cmd)
5187 {
5188 uint8_t task_type;
5189 uint16_t ioc_status;
5190 uint32_t log_info;
5191 uint16_t dev_handle;
5192 struct scsi_pkt *pkt = CMD2PKT(cmd);
5193
5194 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5195 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5196 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5197 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5198
5199 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5200 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5201 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5202 task_type, ioc_status, log_info, dev_handle);
5203 pkt->pkt_reason = CMD_INCOMPLETE;
5204 return;
5205 }
5206
5207 switch (task_type) {
5208 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5209 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5210 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5211 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5212 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5213 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5214 break;
5215 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5216 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5217 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5218 /*
5219 * Check for invalid DevHandle of 0 in case application
5220 * sends bad command. DevHandle of 0 could cause problems.
5221 */
5222 if (dev_handle == 0) {
5223 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5224 " DevHandle of 0.");
5225 } else {
5226 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5227 task_type);
5228 }
5229 break;
5230 default:
5231 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5232 task_type);
5233 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5234 break;
5235 }
5236 }
5237
5238 static void
5239 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5240 {
5241 mptsas_t *mpt = arg->mpt;
5242 uint64_t t = arg->t;
5243 mptsas_cmd_t *cmd;
5244 struct scsi_pkt *pkt;
5245 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5246
5247 mutex_enter(&item->mutex);
5248 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5249 if (!item->doneq) {
5250 cv_wait(&item->cv, &item->mutex);
5251 }
5252 pkt = NULL;
5253 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5254 cmd->cmd_flags |= CFLAG_COMPLETED;
5255 pkt = CMD2PKT(cmd);
5256 }
5257 mutex_exit(&item->mutex);
5258 if (pkt) {
5259 mptsas_pkt_comp(pkt, cmd);
5260 }
5261 mutex_enter(&item->mutex);
5262 }
5263 mutex_exit(&item->mutex);
5264 mutex_enter(&mpt->m_doneq_mutex);
5265 mpt->m_doneq_thread_n--;
5266 cv_broadcast(&mpt->m_doneq_thread_cv);
5267 mutex_exit(&mpt->m_doneq_mutex);
5268 }
5269
5270
5271 /*
5272 * mpt interrupt handler.
5273 */
5274 static uint_t
5275 mptsas_intr(caddr_t arg1, caddr_t arg2)
5276 {
5277 mptsas_t *mpt = (void *)arg1;
5278 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5279 uchar_t did_reply = FALSE;
5280
5281 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5282
5283 mutex_enter(&mpt->m_mutex);
5284
5285 /*
5286 * If interrupts are shared by two channels then check whether this
5287 * interrupt is genuinely for this channel by making sure first the
5288 * chip is in high power state.
5289 */
5290 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5291 (mpt->m_power_level != PM_LEVEL_D0)) {
5292 mutex_exit(&mpt->m_mutex);
5293 return (DDI_INTR_UNCLAIMED);
5294 }
5295
5296 /*
5297 * If polling, interrupt was triggered by some shared interrupt because
5298 * IOC interrupts are disabled during polling, so polling routine will
5299 * handle any replies. Considering this, if polling is happening,
5300 * return with interrupt unclaimed.
5301 */
5302 if (mpt->m_polled_intr) {
5303 mutex_exit(&mpt->m_mutex);
5304 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5305 return (DDI_INTR_UNCLAIMED);
5306 }
5307
5308 /*
5309 * Read the istat register.
5310 */
5311 if ((INTPENDING(mpt)) != 0) {
5312 /*
5313 * read fifo until empty.
5314 */
5315 #ifndef __lock_lint
5316 _NOTE(CONSTCOND)
5317 #endif
5318 while (TRUE) {
5319 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5320 DDI_DMA_SYNC_FORCPU);
5321 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5322 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5323
5324 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5325 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5326 ddi_get32(mpt->m_acc_post_queue_hdl,
5327 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5328 break;
5329 }
5330
5331 /*
5332 * The reply is valid, process it according to its
5333 * type. Also, set a flag for updating the reply index
5334 * after they've all been processed.
5335 */
5336 did_reply = TRUE;
5337
5338 mptsas_process_intr(mpt, reply_desc_union);
5339
5340 /*
5341 * Increment post index and roll over if needed.
5342 */
5343 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5344 mpt->m_post_index = 0;
5345 }
5346 }
5347
5348 /*
5349 * Update the global reply index if at least one reply was
5350 * processed.
5351 */
5352 if (did_reply) {
5353 ddi_put32(mpt->m_datap,
5354 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5355 }
5356 } else {
5357 mutex_exit(&mpt->m_mutex);
5358 return (DDI_INTR_UNCLAIMED);
5359 }
5360 NDBG1(("mptsas_intr complete"));
5361
5362 /*
5363 * If no helper threads are created, process the doneq in ISR. If
5364 * helpers are created, use the doneq length as a metric to measure the
5365 * load on the interrupt CPU. If it is long enough, which indicates the
5366 * load is heavy, then we deliver the IO completions to the helpers.
5367 * This measurement has some limitations, although it is simple and
5368 * straightforward and works well for most of the cases at present.
5369 */
5370 if (!mpt->m_doneq_thread_n ||
5371 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5372 mptsas_doneq_empty(mpt);
5373 } else {
5374 mptsas_deliver_doneq_thread(mpt);
5375 }
5376
5377 /*
5378 * If there are queued cmd, start them now.
5379 */
5380 if (mpt->m_waitq != NULL) {
5381 mptsas_restart_waitq(mpt);
5382 }
5383
5384 mutex_exit(&mpt->m_mutex);
5385 return (DDI_INTR_CLAIMED);
5386 }
5387
5388 static void
5389 mptsas_process_intr(mptsas_t *mpt,
5390 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5391 {
5392 uint8_t reply_type;
5393
5394 ASSERT(mutex_owned(&mpt->m_mutex));
5395
5396 /*
5397 * The reply is valid, process it according to its
5398 * type. Also, set a flag for updated the reply index
5399 * after they've all been processed.
5400 */
5401 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5402 &reply_desc_union->Default.ReplyFlags);
5403 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5404 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5405 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5406 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5407 mptsas_handle_address_reply(mpt, reply_desc_union);
5408 } else {
5409 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5410 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5411 }
5412
5413 /*
5414 * Clear the reply descriptor for re-use and increment
5415 * index.
5416 */
5417 ddi_put64(mpt->m_acc_post_queue_hdl,
5418 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5419 0xFFFFFFFFFFFFFFFF);
5420 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5421 DDI_DMA_SYNC_FORDEV);
5422 }
5423
5424 /*
5425 * handle qfull condition
5426 */
5427 static void
5428 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5429 {
5430 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5431
5432 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5433 (ptgt->m_qfull_retries == 0)) {
5434 /*
5435 * We have exhausted the retries on QFULL, or,
5436 * the target driver has indicated that it
5437 * wants to handle QFULL itself by setting
5438 * qfull-retries capability to 0. In either case
5439 * we want the target driver's QFULL handling
5440 * to kick in. We do this by having pkt_reason
5441 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5442 */
5443 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5444 } else {
5445 if (ptgt->m_reset_delay == 0) {
5446 ptgt->m_t_throttle =
5447 max((ptgt->m_t_ncmds - 2), 0);
5448 }
5449
5450 cmd->cmd_pkt_flags |= FLAG_HEAD;
5451 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5452 cmd->cmd_flags |= CFLAG_RETRY;
5453
5454 (void) mptsas_accept_pkt(mpt, cmd);
5455
5456 /*
5457 * when target gives queue full status with no commands
5458 * outstanding (m_t_ncmds == 0), throttle is set to 0
5459 * (HOLD_THROTTLE), and the queue full handling start
5460 * (see psarc/1994/313); if there are commands outstanding,
5461 * throttle is set to (m_t_ncmds - 2)
5462 */
5463 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5464 /*
5465 * By setting throttle to QFULL_THROTTLE, we
5466 * avoid submitting new commands and in
5467 * mptsas_restart_cmd find out slots which need
5468 * their throttles to be cleared.
5469 */
5470 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5471 if (mpt->m_restart_cmd_timeid == 0) {
5472 mpt->m_restart_cmd_timeid =
5473 timeout(mptsas_restart_cmd, mpt,
5474 ptgt->m_qfull_retry_interval);
5475 }
5476 }
5477 }
5478 }
5479
5480 mptsas_phymask_t
5481 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5482 {
5483 mptsas_phymask_t phy_mask = 0;
5484 uint8_t i = 0;
5485
5486 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5487
5488 ASSERT(mutex_owned(&mpt->m_mutex));
5489
5490 /*
5491 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5492 */
5493 if (physport == 0xFF) {
5494 return (0);
5495 }
5496
5497 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5498 if (mpt->m_phy_info[i].attached_devhdl &&
5499 (mpt->m_phy_info[i].phy_mask != 0) &&
5500 (mpt->m_phy_info[i].port_num == physport)) {
5501 phy_mask = mpt->m_phy_info[i].phy_mask;
5502 break;
5503 }
5504 }
5505 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5506 mpt->m_instance, physport, phy_mask));
5507 return (phy_mask);
5508 }
5509
5510 /*
5511 * mpt free device handle after device gone, by use of passthrough
5512 */
5513 static int
5514 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5515 {
5516 Mpi2SasIoUnitControlRequest_t req;
5517 Mpi2SasIoUnitControlReply_t rep;
5518 int ret;
5519
5520 ASSERT(mutex_owned(&mpt->m_mutex));
5521
5522 /*
5523 * Need to compose a SAS IO Unit Control request message
5524 * and call mptsas_do_passthru() function
5525 */
5526 bzero(&req, sizeof (req));
5527 bzero(&rep, sizeof (rep));
5528
5529 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5530 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5531 req.DevHandle = LE_16(devhdl);
5532
5533 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5534 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5535 if (ret != 0) {
5536 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5537 "Control error %d", ret);
5538 return (DDI_FAILURE);
5539 }
5540
5541 /* do passthrough success, check the ioc status */
5542 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5543 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5544 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5545 return (DDI_FAILURE);
5546 }
5547
5548 return (DDI_SUCCESS);
5549 }
5550
5551 static void
5552 mptsas_update_phymask(mptsas_t *mpt)
5553 {
5554 mptsas_phymask_t mask = 0, phy_mask;
5555 char *phy_mask_name;
5556 uint8_t current_port;
5557 int i, j;
5558
5559 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5560
5561 ASSERT(mutex_owned(&mpt->m_mutex));
5562
5563 (void) mptsas_get_sas_io_unit_page(mpt);
5564
5565 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5566
5567 for (i = 0; i < mpt->m_num_phys; i++) {
5568 phy_mask = 0x00;
5569
5570 if (mpt->m_phy_info[i].attached_devhdl == 0)
5571 continue;
5572
5573 bzero(phy_mask_name, sizeof (phy_mask_name));
5574
5575 current_port = mpt->m_phy_info[i].port_num;
5576
5577 if ((mask & (1 << i)) != 0)
5578 continue;
5579
5580 for (j = 0; j < mpt->m_num_phys; j++) {
5581 if (mpt->m_phy_info[j].attached_devhdl &&
5582 (mpt->m_phy_info[j].port_num == current_port)) {
5583 phy_mask |= (1 << j);
5584 }
5585 }
5586 mask = mask | phy_mask;
5587
5588 for (j = 0; j < mpt->m_num_phys; j++) {
5589 if ((phy_mask >> j) & 0x01) {
5590 mpt->m_phy_info[j].phy_mask = phy_mask;
5591 }
5592 }
5593
5594 (void) sprintf(phy_mask_name, "%x", phy_mask);
5595
5596 mutex_exit(&mpt->m_mutex);
5597 /*
5598 * register a iport, if the port has already been existed
5599 * SCSA will do nothing and just return.
5600 */
5601 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5602 mutex_enter(&mpt->m_mutex);
5603 }
5604 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5605 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5606 }
5607
5608 /*
5609 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5610 * 1. Directly attched Device Added/Removed.
5611 * 2. Expander Device Added/Removed.
5612 * 3. Indirectly Attached Device Added/Expander.
5613 * 4. LUNs of a existing device status change.
5614 * 5. RAID volume created/deleted.
5615 * 6. Member of RAID volume is released because of RAID deletion.
5616 * 7. Physical disks are removed because of RAID creation.
5617 */
5618 static void
5619 mptsas_handle_dr(void *args) {
5620 mptsas_topo_change_list_t *topo_node = NULL;
5621 mptsas_topo_change_list_t *save_node = NULL;
5622 mptsas_t *mpt;
5623 dev_info_t *parent = NULL;
5624 mptsas_phymask_t phymask = 0;
5625 char *phy_mask_name;
5626 uint8_t flags = 0, physport = 0xff;
5627 uint8_t port_update = 0;
5628 uint_t event;
5629
5630 topo_node = (mptsas_topo_change_list_t *)args;
5631
5632 mpt = topo_node->mpt;
5633 event = topo_node->event;
5634 flags = topo_node->flags;
5635
5636 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5637
5638 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5639
5640 switch (event) {
5641 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5642 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5643 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5644 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5645 /*
5646 * Direct attached or expander attached device added
5647 * into system or a Phys Disk that is being unhidden.
5648 */
5649 port_update = 1;
5650 }
5651 break;
5652 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5653 /*
5654 * New expander added into system, it must be the head
5655 * of topo_change_list_t
5656 */
5657 port_update = 1;
5658 break;
5659 default:
5660 port_update = 0;
5661 break;
5662 }
5663 /*
5664 * All cases port_update == 1 may cause initiator port form change
5665 */
5666 mutex_enter(&mpt->m_mutex);
5667 if (mpt->m_port_chng && port_update) {
5668 /*
5669 * mpt->m_port_chng flag indicates some PHYs of initiator
5670 * port have changed to online. So when expander added or
5671 * directly attached device online event come, we force to
5672 * update port information by issueing SAS IO Unit Page and
5673 * update PHYMASKs.
5674 */
5675 (void) mptsas_update_phymask(mpt);
5676 mpt->m_port_chng = 0;
5677
5678 }
5679 mutex_exit(&mpt->m_mutex);
5680 while (topo_node) {
5681 phymask = 0;
5682 if (parent == NULL) {
5683 physport = topo_node->un.physport;
5684 event = topo_node->event;
5685 flags = topo_node->flags;
5686 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5687 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5688 /*
5689 * For all offline events, phymask is known
5690 */
5691 phymask = topo_node->un.phymask;
5692 goto find_parent;
5693 }
5694 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5695 goto handle_topo_change;
5696 }
5697 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5698 phymask = topo_node->un.phymask;
5699 goto find_parent;
5700 }
5701
5702 if ((flags ==
5703 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5704 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5705 /*
5706 * There is no any field in IR_CONFIG_CHANGE
5707 * event indicate physport/phynum, let's get
5708 * parent after SAS Device Page0 request.
5709 */
5710 goto handle_topo_change;
5711 }
5712
5713 mutex_enter(&mpt->m_mutex);
5714 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5715 /*
5716 * If the direct attached device added or a
5717 * phys disk is being unhidden, argument
5718 * physport actually is PHY#, so we have to get
5719 * phymask according PHY#.
5720 */
5721 physport = mpt->m_phy_info[physport].port_num;
5722 }
5723
5724 /*
5725 * Translate physport to phymask so that we can search
5726 * parent dip.
5727 */
5728 phymask = mptsas_physport_to_phymask(mpt,
5729 physport);
5730 mutex_exit(&mpt->m_mutex);
5731
5732 find_parent:
5733 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5734 /*
5735 * For RAID topology change node, write the iport name
5736 * as v0.
5737 */
5738 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5739 (void) sprintf(phy_mask_name, "v0");
5740 } else {
5741 /*
5742 * phymask can bo 0 if the drive has been
5743 * pulled by the time an add event is
5744 * processed. If phymask is 0, just skip this
5745 * event and continue.
5746 */
5747 if (phymask == 0) {
5748 mutex_enter(&mpt->m_mutex);
5749 save_node = topo_node;
5750 topo_node = topo_node->next;
5751 ASSERT(save_node);
5752 kmem_free(save_node,
5753 sizeof (mptsas_topo_change_list_t));
5754 mutex_exit(&mpt->m_mutex);
5755
5756 parent = NULL;
5757 continue;
5758 }
5759 (void) sprintf(phy_mask_name, "%x", phymask);
5760 }
5761 parent = scsi_hba_iport_find(mpt->m_dip,
5762 phy_mask_name);
5763 if (parent == NULL) {
5764 mptsas_log(mpt, CE_WARN, "Failed to find an "
5765 "iport, should not happen!");
5766 goto out;
5767 }
5768
5769 }
5770 ASSERT(parent);
5771 handle_topo_change:
5772
5773 mutex_enter(&mpt->m_mutex);
5774 /*
5775 * If HBA is being reset, don't perform operations depending
5776 * on the IOC. We must free the topo list, however.
5777 */
5778 if (!mpt->m_in_reset)
5779 mptsas_handle_topo_change(topo_node, parent);
5780 else
5781 NDBG20(("skipping topo change received during reset"));
5782 save_node = topo_node;
5783 topo_node = topo_node->next;
5784 ASSERT(save_node);
5785 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5786 mutex_exit(&mpt->m_mutex);
5787
5788 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5789 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5790 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5791 /*
5792 * If direct attached device associated, make sure
5793 * reset the parent before start the next one. But
5794 * all devices associated with expander shares the
5795 * parent. Also, reset parent if this is for RAID.
5796 */
5797 parent = NULL;
5798 }
5799 }
5800 out:
5801 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5802 }
5803
5804 static void
5805 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5806 dev_info_t *parent)
5807 {
5808 mptsas_target_t *ptgt = NULL;
5809 mptsas_smp_t *psmp = NULL;
5810 mptsas_t *mpt = (void *)topo_node->mpt;
5811 uint16_t devhdl;
5812 uint16_t attached_devhdl;
5813 uint64_t sas_wwn = 0;
5814 int rval = 0;
5815 uint32_t page_address;
5816 uint8_t phy, flags;
5817 char *addr = NULL;
5818 dev_info_t *lundip;
5819 int circ = 0, circ1 = 0;
5820 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5821
5822 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5823
5824 ASSERT(mutex_owned(&mpt->m_mutex));
5825
5826 switch (topo_node->event) {
5827 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5828 {
5829 char *phy_mask_name;
5830 mptsas_phymask_t phymask = 0;
5831
5832 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5833 /*
5834 * Get latest RAID info.
5835 */
5836 (void) mptsas_get_raid_info(mpt);
5837 ptgt = mptsas_search_by_devhdl(
5838 &mpt->m_active->m_tgttbl, topo_node->devhdl);
5839 if (ptgt == NULL)
5840 break;
5841 } else {
5842 ptgt = (void *)topo_node->object;
5843 }
5844
5845 if (ptgt == NULL) {
5846 /*
5847 * If a Phys Disk was deleted, RAID info needs to be
5848 * updated to reflect the new topology.
5849 */
5850 (void) mptsas_get_raid_info(mpt);
5851
5852 /*
5853 * Get sas device page 0 by DevHandle to make sure if
5854 * SSP/SATA end device exist.
5855 */
5856 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5857 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5858 topo_node->devhdl;
5859
5860 rval = mptsas_get_target_device_info(mpt, page_address,
5861 &devhdl, &ptgt);
5862 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5863 mptsas_log(mpt, CE_NOTE,
5864 "mptsas_handle_topo_change: target %d is "
5865 "not a SAS/SATA device. \n",
5866 topo_node->devhdl);
5867 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5868 mptsas_log(mpt, CE_NOTE,
5869 "mptsas_handle_topo_change: could not "
5870 "allocate memory. \n");
5871 }
5872 /*
5873 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5874 * else to do, just leave.
5875 */
5876 if (rval != DEV_INFO_SUCCESS) {
5877 return;
5878 }
5879 }
5880
5881 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5882
5883 mutex_exit(&mpt->m_mutex);
5884 flags = topo_node->flags;
5885
5886 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5887 phymask = ptgt->m_phymask;
5888 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5889 (void) sprintf(phy_mask_name, "%x", phymask);
5890 parent = scsi_hba_iport_find(mpt->m_dip,
5891 phy_mask_name);
5892 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5893 if (parent == NULL) {
5894 mptsas_log(mpt, CE_WARN, "Failed to find a "
5895 "iport for PD, should not happen!");
5896 mutex_enter(&mpt->m_mutex);
5897 break;
5898 }
5899 }
5900
5901 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5902 ndi_devi_enter(parent, &circ1);
5903 (void) mptsas_config_raid(parent, topo_node->devhdl,
5904 &lundip);
5905 ndi_devi_exit(parent, circ1);
5906 } else {
5907 /*
5908 * hold nexus for bus configure
5909 */
5910 ndi_devi_enter(scsi_vhci_dip, &circ);
5911 ndi_devi_enter(parent, &circ1);
5912 rval = mptsas_config_target(parent, ptgt);
5913 /*
5914 * release nexus for bus configure
5915 */
5916 ndi_devi_exit(parent, circ1);
5917 ndi_devi_exit(scsi_vhci_dip, circ);
5918
5919 /*
5920 * Add parent's props for SMHBA support
5921 */
5922 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5923 bzero(attached_wwnstr,
5924 sizeof (attached_wwnstr));
5925 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
5926 ptgt->m_sas_wwn);
5927 if (ddi_prop_update_string(DDI_DEV_T_NONE,
5928 parent,
5929 SCSI_ADDR_PROP_ATTACHED_PORT,
5930 attached_wwnstr)
5931 != DDI_PROP_SUCCESS) {
5932 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5933 parent,
5934 SCSI_ADDR_PROP_ATTACHED_PORT);
5935 mptsas_log(mpt, CE_WARN, "Failed to"
5936 "attached-port props");
5937 return;
5938 }
5939 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5940 MPTSAS_NUM_PHYS, 1) !=
5941 DDI_PROP_SUCCESS) {
5942 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5943 parent, MPTSAS_NUM_PHYS);
5944 mptsas_log(mpt, CE_WARN, "Failed to"
5945 " create num-phys props");
5946 return;
5947 }
5948
5949 /*
5950 * Update PHY info for smhba
5951 */
5952 mutex_enter(&mpt->m_mutex);
5953 if (mptsas_smhba_phy_init(mpt)) {
5954 mutex_exit(&mpt->m_mutex);
5955 mptsas_log(mpt, CE_WARN, "mptsas phy"
5956 " update failed");
5957 return;
5958 }
5959 mutex_exit(&mpt->m_mutex);
5960 mptsas_smhba_set_phy_props(mpt,
5961 ddi_get_name_addr(parent), parent,
5962 1, &attached_devhdl);
5963 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5964 MPTSAS_VIRTUAL_PORT, 0) !=
5965 DDI_PROP_SUCCESS) {
5966 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5967 parent, MPTSAS_VIRTUAL_PORT);
5968 mptsas_log(mpt, CE_WARN,
5969 "mptsas virtual-port"
5970 "port prop update failed");
5971 return;
5972 }
5973 }
5974 }
5975 mutex_enter(&mpt->m_mutex);
5976
5977 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5978 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5979 ptgt->m_phymask));
5980 break;
5981 }
5982 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5983 {
5984 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5985 devhdl = topo_node->devhdl;
5986 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
5987 if (ptgt == NULL)
5988 break;
5989
5990 sas_wwn = ptgt->m_sas_wwn;
5991 phy = ptgt->m_phynum;
5992
5993 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5994
5995 if (sas_wwn) {
5996 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
5997 } else {
5998 (void) sprintf(addr, "p%x", phy);
5999 }
6000 ASSERT(ptgt->m_devhdl == devhdl);
6001
6002 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6003 (topo_node->flags ==
6004 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6005 /*
6006 * Get latest RAID info if RAID volume status changes
6007 * or Phys Disk status changes
6008 */
6009 (void) mptsas_get_raid_info(mpt);
6010 }
6011 /*
6012 * Abort all outstanding command on the device
6013 */
6014 rval = mptsas_do_scsi_reset(mpt, devhdl);
6015 if (rval) {
6016 NDBG20(("mptsas%d handle_topo_change to reset target "
6017 "before offline devhdl:%x, phymask:%x, rval:%x",
6018 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6019 rval));
6020 }
6021
6022 mutex_exit(&mpt->m_mutex);
6023
6024 ndi_devi_enter(scsi_vhci_dip, &circ);
6025 ndi_devi_enter(parent, &circ1);
6026 rval = mptsas_offline_target(parent, addr);
6027 ndi_devi_exit(parent, circ1);
6028 ndi_devi_exit(scsi_vhci_dip, circ);
6029 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6030 "phymask:%x, rval:%x", mpt->m_instance,
6031 ptgt->m_devhdl, ptgt->m_phymask, rval));
6032
6033 kmem_free(addr, SCSI_MAXNAMELEN);
6034
6035 /*
6036 * Clear parent's props for SMHBA support
6037 */
6038 flags = topo_node->flags;
6039 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6040 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6041 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6042 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6043 DDI_PROP_SUCCESS) {
6044 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6045 SCSI_ADDR_PROP_ATTACHED_PORT);
6046 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6047 "prop update failed");
6048 break;
6049 }
6050 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6051 MPTSAS_NUM_PHYS, 0) !=
6052 DDI_PROP_SUCCESS) {
6053 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6054 MPTSAS_NUM_PHYS);
6055 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6056 "prop update failed");
6057 break;
6058 }
6059 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6060 MPTSAS_VIRTUAL_PORT, 1) !=
6061 DDI_PROP_SUCCESS) {
6062 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6063 MPTSAS_VIRTUAL_PORT);
6064 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6065 "prop update failed");
6066 break;
6067 }
6068 }
6069
6070 mutex_enter(&mpt->m_mutex);
6071 if (rval == DDI_SUCCESS) {
6072 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6073 ptgt->m_sas_wwn, ptgt->m_phymask);
6074 ptgt = NULL;
6075 } else {
6076 /*
6077 * clean DR_INTRANSITION flag to allow I/O down to
6078 * PHCI driver since failover finished.
6079 * Invalidate the devhdl
6080 */
6081 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6082 ptgt->m_tgt_unconfigured = 0;
6083 mutex_enter(&mpt->m_tx_waitq_mutex);
6084 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6085 mutex_exit(&mpt->m_tx_waitq_mutex);
6086 }
6087
6088 /*
6089 * Send SAS IO Unit Control to free the dev handle
6090 */
6091 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6092 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6093 rval = mptsas_free_devhdl(mpt, devhdl);
6094
6095 NDBG20(("mptsas%d handle_topo_change to remove "
6096 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6097 rval));
6098 }
6099
6100 break;
6101 }
6102 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6103 {
6104 devhdl = topo_node->devhdl;
6105 /*
6106 * If this is the remove handle event, do a reset first.
6107 */
6108 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6109 rval = mptsas_do_scsi_reset(mpt, devhdl);
6110 if (rval) {
6111 NDBG20(("mpt%d reset target before remove "
6112 "devhdl:%x, rval:%x", mpt->m_instance,
6113 devhdl, rval));
6114 }
6115 }
6116
6117 /*
6118 * Send SAS IO Unit Control to free the dev handle
6119 */
6120 rval = mptsas_free_devhdl(mpt, devhdl);
6121 NDBG20(("mptsas%d handle_topo_change to remove "
6122 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6123 rval));
6124 break;
6125 }
6126 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6127 {
6128 mptsas_smp_t smp;
6129 dev_info_t *smpdip;
6130 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6131
6132 devhdl = topo_node->devhdl;
6133
6134 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6135 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6136 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6137 if (rval != DDI_SUCCESS) {
6138 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6139 "handle %x", devhdl);
6140 return;
6141 }
6142
6143 psmp = mptsas_smp_alloc(smptbl, &smp);
6144 if (psmp == NULL) {
6145 return;
6146 }
6147
6148 mutex_exit(&mpt->m_mutex);
6149 ndi_devi_enter(parent, &circ1);
6150 (void) mptsas_online_smp(parent, psmp, &smpdip);
6151 ndi_devi_exit(parent, circ1);
6152
6153 mutex_enter(&mpt->m_mutex);
6154 break;
6155 }
6156 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6157 {
6158 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6159 devhdl = topo_node->devhdl;
6160 uint32_t dev_info;
6161
6162 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6163 if (psmp == NULL)
6164 break;
6165 /*
6166 * The mptsas_smp_t data is released only if the dip is offlined
6167 * successfully.
6168 */
6169 mutex_exit(&mpt->m_mutex);
6170
6171 ndi_devi_enter(parent, &circ1);
6172 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6173 ndi_devi_exit(parent, circ1);
6174
6175 dev_info = psmp->m_deviceinfo;
6176 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6177 DEVINFO_DIRECT_ATTACHED) {
6178 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6179 MPTSAS_VIRTUAL_PORT, 1) !=
6180 DDI_PROP_SUCCESS) {
6181 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6182 MPTSAS_VIRTUAL_PORT);
6183 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6184 "prop update failed");
6185 return;
6186 }
6187 /*
6188 * Check whether the smp connected to the iport,
6189 */
6190 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6191 MPTSAS_NUM_PHYS, 0) !=
6192 DDI_PROP_SUCCESS) {
6193 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6194 MPTSAS_NUM_PHYS);
6195 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6196 "prop update failed");
6197 return;
6198 }
6199 /*
6200 * Clear parent's attached-port props
6201 */
6202 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6203 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6204 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6205 DDI_PROP_SUCCESS) {
6206 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6207 SCSI_ADDR_PROP_ATTACHED_PORT);
6208 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6209 "prop update failed");
6210 return;
6211 }
6212 }
6213
6214 mutex_enter(&mpt->m_mutex);
6215 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6216 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6217 if (rval == DDI_SUCCESS) {
6218 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6219 psmp->m_phymask);
6220 } else {
6221 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6222 }
6223
6224 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6225
6226 break;
6227 }
6228 default:
6229 return;
6230 }
6231 }
6232
6233 /*
6234 * Record the event if its type is enabled in mpt instance by ioctl.
6235 */
6236 static void
6237 mptsas_record_event(void *args)
6238 {
6239 m_replyh_arg_t *replyh_arg;
6240 pMpi2EventNotificationReply_t eventreply;
6241 uint32_t event, rfm;
6242 mptsas_t *mpt;
6243 int i, j;
6244 uint16_t event_data_len;
6245 boolean_t sendAEN = FALSE;
6246
6247 replyh_arg = (m_replyh_arg_t *)args;
6248 rfm = replyh_arg->rfm;
6249 mpt = replyh_arg->mpt;
6250
6251 eventreply = (pMpi2EventNotificationReply_t)
6252 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6253 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6254
6255
6256 /*
6257 * Generate a system event to let anyone who cares know that a
6258 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6259 * event mask is set to.
6260 */
6261 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6262 sendAEN = TRUE;
6263 }
6264
6265 /*
6266 * Record the event only if it is not masked. Determine which dword
6267 * and bit of event mask to test.
6268 */
6269 i = (uint8_t)(event / 32);
6270 j = (uint8_t)(event % 32);
6271 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6272 i = mpt->m_event_index;
6273 mpt->m_events[i].Type = event;
6274 mpt->m_events[i].Number = ++mpt->m_event_number;
6275 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6276 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6277 &eventreply->EventDataLength);
6278
6279 if (event_data_len > 0) {
6280 /*
6281 * Limit data to size in m_event entry
6282 */
6283 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6284 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6285 }
6286 for (j = 0; j < event_data_len; j++) {
6287 mpt->m_events[i].Data[j] =
6288 ddi_get32(mpt->m_acc_reply_frame_hdl,
6289 &(eventreply->EventData[j]));
6290 }
6291
6292 /*
6293 * check for index wrap-around
6294 */
6295 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6296 i = 0;
6297 }
6298 mpt->m_event_index = (uint8_t)i;
6299
6300 /*
6301 * Set flag to send the event.
6302 */
6303 sendAEN = TRUE;
6304 }
6305 }
6306
6307 /*
6308 * Generate a system event if flag is set to let anyone who cares know
6309 * that an event has occurred.
6310 */
6311 if (sendAEN) {
6312 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6313 "SAS", NULL, NULL, DDI_NOSLEEP);
6314 }
6315 }
6316
6317 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6318 /*
6319 * handle sync events from ioc in interrupt
6320 * return value:
6321 * DDI_SUCCESS: The event is handled by this func
6322 * DDI_FAILURE: Event is not handled
6323 */
6324 static int
6325 mptsas_handle_event_sync(void *args)
6326 {
6327 m_replyh_arg_t *replyh_arg;
6328 pMpi2EventNotificationReply_t eventreply;
6329 uint32_t event, rfm;
6330 mptsas_t *mpt;
6331 uint_t iocstatus;
6332
6333 replyh_arg = (m_replyh_arg_t *)args;
6334 rfm = replyh_arg->rfm;
6335 mpt = replyh_arg->mpt;
6336
6337 ASSERT(mutex_owned(&mpt->m_mutex));
6338
6339 eventreply = (pMpi2EventNotificationReply_t)
6340 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6341 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6342
6343 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6344 &eventreply->IOCStatus)) {
6345 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6346 mptsas_log(mpt, CE_WARN,
6347 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6348 "IOCLogInfo=0x%x", iocstatus,
6349 ddi_get32(mpt->m_acc_reply_frame_hdl,
6350 &eventreply->IOCLogInfo));
6351 } else {
6352 mptsas_log(mpt, CE_WARN,
6353 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6354 "IOCLogInfo=0x%x", iocstatus,
6355 ddi_get32(mpt->m_acc_reply_frame_hdl,
6356 &eventreply->IOCLogInfo));
6357 }
6358 }
6359
6360 /*
6361 * figure out what kind of event we got and handle accordingly
6362 */
6363 switch (event) {
6364 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6365 {
6366 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6367 uint8_t num_entries, expstatus, phy;
6368 uint8_t phystatus, physport, state, i;
6369 uint8_t start_phy_num, link_rate;
6370 uint16_t dev_handle, reason_code;
6371 uint16_t enc_handle, expd_handle;
6372 char string[80], curr[80], prev[80];
6373 mptsas_topo_change_list_t *topo_head = NULL;
6374 mptsas_topo_change_list_t *topo_tail = NULL;
6375 mptsas_topo_change_list_t *topo_node = NULL;
6376 mptsas_target_t *ptgt;
6377 mptsas_smp_t *psmp;
6378 mptsas_hash_table_t *tgttbl, *smptbl;
6379 uint8_t flags = 0, exp_flag;
6380 smhba_info_t *pSmhba = NULL;
6381
6382 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6383
6384 tgttbl = &mpt->m_active->m_tgttbl;
6385 smptbl = &mpt->m_active->m_smptbl;
6386
6387 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6388 eventreply->EventData;
6389
6390 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6391 &sas_topo_change_list->EnclosureHandle);
6392 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6393 &sas_topo_change_list->ExpanderDevHandle);
6394 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6395 &sas_topo_change_list->NumEntries);
6396 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6397 &sas_topo_change_list->StartPhyNum);
6398 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6399 &sas_topo_change_list->ExpStatus);
6400 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6401 &sas_topo_change_list->PhysicalPort);
6402
6403 string[0] = 0;
6404 if (expd_handle) {
6405 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6406 switch (expstatus) {
6407 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6408 (void) sprintf(string, " added");
6409 /*
6410 * New expander device added
6411 */
6412 mpt->m_port_chng = 1;
6413 topo_node = kmem_zalloc(
6414 sizeof (mptsas_topo_change_list_t),
6415 KM_SLEEP);
6416 topo_node->mpt = mpt;
6417 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6418 topo_node->un.physport = physport;
6419 topo_node->devhdl = expd_handle;
6420 topo_node->flags = flags;
6421 topo_node->object = NULL;
6422 if (topo_head == NULL) {
6423 topo_head = topo_tail = topo_node;
6424 } else {
6425 topo_tail->next = topo_node;
6426 topo_tail = topo_node;
6427 }
6428 break;
6429 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6430 (void) sprintf(string, " not responding, "
6431 "removed");
6432 psmp = mptsas_search_by_devhdl(smptbl,
6433 expd_handle);
6434 if (psmp == NULL)
6435 break;
6436
6437 topo_node = kmem_zalloc(
6438 sizeof (mptsas_topo_change_list_t),
6439 KM_SLEEP);
6440 topo_node->mpt = mpt;
6441 topo_node->un.phymask = psmp->m_phymask;
6442 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6443 topo_node->devhdl = expd_handle;
6444 topo_node->flags = flags;
6445 topo_node->object = NULL;
6446 if (topo_head == NULL) {
6447 topo_head = topo_tail = topo_node;
6448 } else {
6449 topo_tail->next = topo_node;
6450 topo_tail = topo_node;
6451 }
6452 break;
6453 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6454 break;
6455 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6456 (void) sprintf(string, " not responding, "
6457 "delaying removal");
6458 break;
6459 default:
6460 break;
6461 }
6462 } else {
6463 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6464 }
6465
6466 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6467 enc_handle, expd_handle, string));
6468 for (i = 0; i < num_entries; i++) {
6469 phy = i + start_phy_num;
6470 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6471 &sas_topo_change_list->PHY[i].PhyStatus);
6472 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6473 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6474 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6475 /*
6476 * Filter out processing of Phy Vacant Status unless
6477 * the reason code is "Not Responding". Process all
6478 * other combinations of Phy Status and Reason Codes.
6479 */
6480 if ((phystatus &
6481 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6482 (reason_code !=
6483 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6484 continue;
6485 }
6486 curr[0] = 0;
6487 prev[0] = 0;
6488 string[0] = 0;
6489 switch (reason_code) {
6490 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6491 {
6492 NDBG20(("mptsas%d phy %d physical_port %d "
6493 "dev_handle %d added", mpt->m_instance, phy,
6494 physport, dev_handle));
6495 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6496 &sas_topo_change_list->PHY[i].LinkRate);
6497 state = (link_rate &
6498 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6499 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6500 switch (state) {
6501 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6502 (void) sprintf(curr, "is disabled");
6503 break;
6504 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6505 (void) sprintf(curr, "is offline, "
6506 "failed speed negotiation");
6507 break;
6508 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6509 (void) sprintf(curr, "SATA OOB "
6510 "complete");
6511 break;
6512 case SMP_RESET_IN_PROGRESS:
6513 (void) sprintf(curr, "SMP reset in "
6514 "progress");
6515 break;
6516 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6517 (void) sprintf(curr, "is online at "
6518 "1.5 Gbps");
6519 break;
6520 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6521 (void) sprintf(curr, "is online at 3.0 "
6522 "Gbps");
6523 break;
6524 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6525 (void) sprintf(curr, "is online at 6.0 "
6526 "Gbps");
6527 break;
6528 default:
6529 (void) sprintf(curr, "state is "
6530 "unknown");
6531 break;
6532 }
6533 /*
6534 * New target device added into the system.
6535 * Set association flag according to if an
6536 * expander is used or not.
6537 */
6538 exp_flag =
6539 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6540 if (flags ==
6541 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6542 flags = exp_flag;
6543 }
6544 topo_node = kmem_zalloc(
6545 sizeof (mptsas_topo_change_list_t),
6546 KM_SLEEP);
6547 topo_node->mpt = mpt;
6548 topo_node->event =
6549 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6550 if (expd_handle == 0) {
6551 /*
6552 * Per MPI 2, if expander dev handle
6553 * is 0, it's a directly attached
6554 * device. So driver use PHY to decide
6555 * which iport is associated
6556 */
6557 physport = phy;
6558 mpt->m_port_chng = 1;
6559 }
6560 topo_node->un.physport = physport;
6561 topo_node->devhdl = dev_handle;
6562 topo_node->flags = flags;
6563 topo_node->object = NULL;
6564 if (topo_head == NULL) {
6565 topo_head = topo_tail = topo_node;
6566 } else {
6567 topo_tail->next = topo_node;
6568 topo_tail = topo_node;
6569 }
6570 break;
6571 }
6572 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6573 {
6574 NDBG20(("mptsas%d phy %d physical_port %d "
6575 "dev_handle %d removed", mpt->m_instance,
6576 phy, physport, dev_handle));
6577 /*
6578 * Set association flag according to if an
6579 * expander is used or not.
6580 */
6581 exp_flag =
6582 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6583 if (flags ==
6584 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6585 flags = exp_flag;
6586 }
6587 /*
6588 * Target device is removed from the system
6589 * Before the device is really offline from
6590 * from system.
6591 */
6592 ptgt = mptsas_search_by_devhdl(tgttbl,
6593 dev_handle);
6594 /*
6595 * If ptgt is NULL here, it means that the
6596 * DevHandle is not in the hash table. This is
6597 * reasonable sometimes. For example, if a
6598 * disk was pulled, then added, then pulled
6599 * again, the disk will not have been put into
6600 * the hash table because the add event will
6601 * have an invalid phymask. BUT, this does not
6602 * mean that the DevHandle is invalid. The
6603 * controller will still have a valid DevHandle
6604 * that must be removed. To do this, use the
6605 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6606 */
6607 if (ptgt == NULL) {
6608 topo_node = kmem_zalloc(
6609 sizeof (mptsas_topo_change_list_t),
6610 KM_SLEEP);
6611 topo_node->mpt = mpt;
6612 topo_node->un.phymask = 0;
6613 topo_node->event =
6614 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6615 topo_node->devhdl = dev_handle;
6616 topo_node->flags = flags;
6617 topo_node->object = NULL;
6618 if (topo_head == NULL) {
6619 topo_head = topo_tail =
6620 topo_node;
6621 } else {
6622 topo_tail->next = topo_node;
6623 topo_tail = topo_node;
6624 }
6625 break;
6626 }
6627
6628 /*
6629 * Update DR flag immediately avoid I/O failure
6630 * before failover finish. Pay attention to the
6631 * mutex protect, we need grab m_tx_waitq_mutex
6632 * during set m_dr_flag because we won't add
6633 * the following command into waitq, instead,
6634 * we need return TRAN_BUSY in the tran_start
6635 * context.
6636 */
6637 mutex_enter(&mpt->m_tx_waitq_mutex);
6638 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6639 mutex_exit(&mpt->m_tx_waitq_mutex);
6640
6641 topo_node = kmem_zalloc(
6642 sizeof (mptsas_topo_change_list_t),
6643 KM_SLEEP);
6644 topo_node->mpt = mpt;
6645 topo_node->un.phymask = ptgt->m_phymask;
6646 topo_node->event =
6647 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6648 topo_node->devhdl = dev_handle;
6649 topo_node->flags = flags;
6650 topo_node->object = NULL;
6651 if (topo_head == NULL) {
6652 topo_head = topo_tail = topo_node;
6653 } else {
6654 topo_tail->next = topo_node;
6655 topo_tail = topo_node;
6656 }
6657 break;
6658 }
6659 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6660 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6661 &sas_topo_change_list->PHY[i].LinkRate);
6662 state = (link_rate &
6663 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6664 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6665 pSmhba = &mpt->m_phy_info[i].smhba_info;
6666 pSmhba->negotiated_link_rate = state;
6667 switch (state) {
6668 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6669 (void) sprintf(curr, "is disabled");
6670 mptsas_smhba_log_sysevent(mpt,
6671 ESC_SAS_PHY_EVENT,
6672 SAS_PHY_REMOVE,
6673 &mpt->m_phy_info[i].smhba_info);
6674 mpt->m_phy_info[i].smhba_info.
6675 negotiated_link_rate
6676 = 0x1;
6677 break;
6678 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6679 (void) sprintf(curr, "is offline, "
6680 "failed speed negotiation");
6681 mptsas_smhba_log_sysevent(mpt,
6682 ESC_SAS_PHY_EVENT,
6683 SAS_PHY_OFFLINE,
6684 &mpt->m_phy_info[i].smhba_info);
6685 break;
6686 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6687 (void) sprintf(curr, "SATA OOB "
6688 "complete");
6689 break;
6690 case SMP_RESET_IN_PROGRESS:
6691 (void) sprintf(curr, "SMP reset in "
6692 "progress");
6693 break;
6694 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6695 (void) sprintf(curr, "is online at "
6696 "1.5 Gbps");
6697 if ((expd_handle == 0) &&
6698 (enc_handle == 1)) {
6699 mpt->m_port_chng = 1;
6700 }
6701 mptsas_smhba_log_sysevent(mpt,
6702 ESC_SAS_PHY_EVENT,
6703 SAS_PHY_ONLINE,
6704 &mpt->m_phy_info[i].smhba_info);
6705 break;
6706 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6707 (void) sprintf(curr, "is online at 3.0 "
6708 "Gbps");
6709 if ((expd_handle == 0) &&
6710 (enc_handle == 1)) {
6711 mpt->m_port_chng = 1;
6712 }
6713 mptsas_smhba_log_sysevent(mpt,
6714 ESC_SAS_PHY_EVENT,
6715 SAS_PHY_ONLINE,
6716 &mpt->m_phy_info[i].smhba_info);
6717 break;
6718 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6719 (void) sprintf(curr, "is online at "
6720 "6.0 Gbps");
6721 if ((expd_handle == 0) &&
6722 (enc_handle == 1)) {
6723 mpt->m_port_chng = 1;
6724 }
6725 mptsas_smhba_log_sysevent(mpt,
6726 ESC_SAS_PHY_EVENT,
6727 SAS_PHY_ONLINE,
6728 &mpt->m_phy_info[i].smhba_info);
6729 break;
6730 default:
6731 (void) sprintf(curr, "state is "
6732 "unknown");
6733 break;
6734 }
6735
6736 state = (link_rate &
6737 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6738 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6739 switch (state) {
6740 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6741 (void) sprintf(prev, ", was disabled");
6742 break;
6743 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6744 (void) sprintf(prev, ", was offline, "
6745 "failed speed negotiation");
6746 break;
6747 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6748 (void) sprintf(prev, ", was SATA OOB "
6749 "complete");
6750 break;
6751 case SMP_RESET_IN_PROGRESS:
6752 (void) sprintf(prev, ", was SMP reset "
6753 "in progress");
6754 break;
6755 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6756 (void) sprintf(prev, ", was online at "
6757 "1.5 Gbps");
6758 break;
6759 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6760 (void) sprintf(prev, ", was online at "
6761 "3.0 Gbps");
6762 break;
6763 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6764 (void) sprintf(prev, ", was online at "
6765 "6.0 Gbps");
6766 break;
6767 default:
6768 break;
6769 }
6770 (void) sprintf(&string[strlen(string)], "link "
6771 "changed, ");
6772 break;
6773 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6774 continue;
6775 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6776 (void) sprintf(&string[strlen(string)],
6777 "target not responding, delaying "
6778 "removal");
6779 break;
6780 }
6781 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6782 mpt->m_instance, phy, dev_handle, string, curr,
6783 prev));
6784 }
6785 if (topo_head != NULL) {
6786 /*
6787 * Launch DR taskq to handle topology change
6788 */
6789 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6790 mptsas_handle_dr, (void *)topo_head,
6791 DDI_NOSLEEP)) != DDI_SUCCESS) {
6792 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6793 "for handle SAS DR event failed. \n");
6794 }
6795 }
6796 break;
6797 }
6798 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6799 {
6800 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6801 mptsas_topo_change_list_t *topo_head = NULL;
6802 mptsas_topo_change_list_t *topo_tail = NULL;
6803 mptsas_topo_change_list_t *topo_node = NULL;
6804 mptsas_target_t *ptgt;
6805 mptsas_hash_table_t *tgttbl;
6806 uint8_t num_entries, i, reason;
6807 uint16_t volhandle, diskhandle;
6808
6809 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6810 eventreply->EventData;
6811 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6812 &irChangeList->NumElements);
6813
6814 tgttbl = &mpt->m_active->m_tgttbl;
6815
6816 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6817 mpt->m_instance));
6818
6819 for (i = 0; i < num_entries; i++) {
6820 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6821 &irChangeList->ConfigElement[i].ReasonCode);
6822 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6823 &irChangeList->ConfigElement[i].VolDevHandle);
6824 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6825 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6826
6827 switch (reason) {
6828 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6829 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6830 {
6831 NDBG20(("mptsas %d volume added\n",
6832 mpt->m_instance));
6833
6834 topo_node = kmem_zalloc(
6835 sizeof (mptsas_topo_change_list_t),
6836 KM_SLEEP);
6837
6838 topo_node->mpt = mpt;
6839 topo_node->event =
6840 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6841 topo_node->un.physport = 0xff;
6842 topo_node->devhdl = volhandle;
6843 topo_node->flags =
6844 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6845 topo_node->object = NULL;
6846 if (topo_head == NULL) {
6847 topo_head = topo_tail = topo_node;
6848 } else {
6849 topo_tail->next = topo_node;
6850 topo_tail = topo_node;
6851 }
6852 break;
6853 }
6854 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6855 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6856 {
6857 NDBG20(("mptsas %d volume deleted\n",
6858 mpt->m_instance));
6859 ptgt = mptsas_search_by_devhdl(tgttbl,
6860 volhandle);
6861 if (ptgt == NULL)
6862 break;
6863
6864 /*
6865 * Clear any flags related to volume
6866 */
6867 (void) mptsas_delete_volume(mpt, volhandle);
6868
6869 /*
6870 * Update DR flag immediately avoid I/O failure
6871 */
6872 mutex_enter(&mpt->m_tx_waitq_mutex);
6873 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6874 mutex_exit(&mpt->m_tx_waitq_mutex);
6875
6876 topo_node = kmem_zalloc(
6877 sizeof (mptsas_topo_change_list_t),
6878 KM_SLEEP);
6879 topo_node->mpt = mpt;
6880 topo_node->un.phymask = ptgt->m_phymask;
6881 topo_node->event =
6882 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6883 topo_node->devhdl = volhandle;
6884 topo_node->flags =
6885 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6886 topo_node->object = (void *)ptgt;
6887 if (topo_head == NULL) {
6888 topo_head = topo_tail = topo_node;
6889 } else {
6890 topo_tail->next = topo_node;
6891 topo_tail = topo_node;
6892 }
6893 break;
6894 }
6895 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6896 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6897 {
6898 ptgt = mptsas_search_by_devhdl(tgttbl,
6899 diskhandle);
6900 if (ptgt == NULL)
6901 break;
6902
6903 /*
6904 * Update DR flag immediately avoid I/O failure
6905 */
6906 mutex_enter(&mpt->m_tx_waitq_mutex);
6907 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6908 mutex_exit(&mpt->m_tx_waitq_mutex);
6909
6910 topo_node = kmem_zalloc(
6911 sizeof (mptsas_topo_change_list_t),
6912 KM_SLEEP);
6913 topo_node->mpt = mpt;
6914 topo_node->un.phymask = ptgt->m_phymask;
6915 topo_node->event =
6916 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6917 topo_node->devhdl = diskhandle;
6918 topo_node->flags =
6919 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6920 topo_node->object = (void *)ptgt;
6921 if (topo_head == NULL) {
6922 topo_head = topo_tail = topo_node;
6923 } else {
6924 topo_tail->next = topo_node;
6925 topo_tail = topo_node;
6926 }
6927 break;
6928 }
6929 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6930 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6931 {
6932 /*
6933 * The physical drive is released by a IR
6934 * volume. But we cannot get the the physport
6935 * or phynum from the event data, so we only
6936 * can get the physport/phynum after SAS
6937 * Device Page0 request for the devhdl.
6938 */
6939 topo_node = kmem_zalloc(
6940 sizeof (mptsas_topo_change_list_t),
6941 KM_SLEEP);
6942 topo_node->mpt = mpt;
6943 topo_node->un.phymask = 0;
6944 topo_node->event =
6945 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6946 topo_node->devhdl = diskhandle;
6947 topo_node->flags =
6948 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6949 topo_node->object = NULL;
6950 mpt->m_port_chng = 1;
6951 if (topo_head == NULL) {
6952 topo_head = topo_tail = topo_node;
6953 } else {
6954 topo_tail->next = topo_node;
6955 topo_tail = topo_node;
6956 }
6957 break;
6958 }
6959 default:
6960 break;
6961 }
6962 }
6963
6964 if (topo_head != NULL) {
6965 /*
6966 * Launch DR taskq to handle topology change
6967 */
6968 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6969 mptsas_handle_dr, (void *)topo_head,
6970 DDI_NOSLEEP)) != DDI_SUCCESS) {
6971 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6972 "for handle SAS DR event failed. \n");
6973 }
6974 }
6975 break;
6976 }
6977 default:
6978 return (DDI_FAILURE);
6979 }
6980
6981 return (DDI_SUCCESS);
6982 }
6983
6984 /*
6985 * handle events from ioc
6986 */
6987 static void
6988 mptsas_handle_event(void *args)
6989 {
6990 m_replyh_arg_t *replyh_arg;
6991 pMpi2EventNotificationReply_t eventreply;
6992 uint32_t event, iocloginfo, rfm;
6993 uint32_t status;
6994 uint8_t port;
6995 mptsas_t *mpt;
6996 uint_t iocstatus;
6997
6998 replyh_arg = (m_replyh_arg_t *)args;
6999 rfm = replyh_arg->rfm;
7000 mpt = replyh_arg->mpt;
7001
7002 mutex_enter(&mpt->m_mutex);
7003 /*
7004 * If HBA is being reset, drop incoming event.
7005 */
7006 if (mpt->m_in_reset) {
7007 NDBG20(("dropping event received prior to reset"));
7008 mutex_exit(&mpt->m_mutex);
7009 return;
7010 }
7011
7012 eventreply = (pMpi2EventNotificationReply_t)
7013 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7014 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7015
7016 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7017 &eventreply->IOCStatus)) {
7018 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7019 mptsas_log(mpt, CE_WARN,
7020 "!mptsas_handle_event: IOCStatus=0x%x, "
7021 "IOCLogInfo=0x%x", iocstatus,
7022 ddi_get32(mpt->m_acc_reply_frame_hdl,
7023 &eventreply->IOCLogInfo));
7024 } else {
7025 mptsas_log(mpt, CE_WARN,
7026 "mptsas_handle_event: IOCStatus=0x%x, "
7027 "IOCLogInfo=0x%x", iocstatus,
7028 ddi_get32(mpt->m_acc_reply_frame_hdl,
7029 &eventreply->IOCLogInfo));
7030 }
7031 }
7032
7033 /*
7034 * figure out what kind of event we got and handle accordingly
7035 */
7036 switch (event) {
7037 case MPI2_EVENT_LOG_ENTRY_ADDED:
7038 break;
7039 case MPI2_EVENT_LOG_DATA:
7040 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7041 &eventreply->IOCLogInfo);
7042 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7043 iocloginfo));
7044 break;
7045 case MPI2_EVENT_STATE_CHANGE:
7046 NDBG20(("mptsas%d state change.", mpt->m_instance));
7047 break;
7048 case MPI2_EVENT_HARD_RESET_RECEIVED:
7049 NDBG20(("mptsas%d event change.", mpt->m_instance));
7050 break;
7051 case MPI2_EVENT_SAS_DISCOVERY:
7052 {
7053 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7054 char string[80];
7055 uint8_t rc;
7056
7057 sasdiscovery =
7058 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7059
7060 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7061 &sasdiscovery->ReasonCode);
7062 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7063 &sasdiscovery->PhysicalPort);
7064 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7065 &sasdiscovery->DiscoveryStatus);
7066
7067 string[0] = 0;
7068 switch (rc) {
7069 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7070 (void) sprintf(string, "STARTING");
7071 break;
7072 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7073 (void) sprintf(string, "COMPLETED");
7074 break;
7075 default:
7076 (void) sprintf(string, "UNKNOWN");
7077 break;
7078 }
7079
7080 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7081 port, status));
7082
7083 break;
7084 }
7085 case MPI2_EVENT_EVENT_CHANGE:
7086 NDBG20(("mptsas%d event change.", mpt->m_instance));
7087 break;
7088 case MPI2_EVENT_TASK_SET_FULL:
7089 {
7090 pMpi2EventDataTaskSetFull_t taskfull;
7091
7092 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7093
7094 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7095 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7096 &taskfull->CurrentDepth)));
7097 break;
7098 }
7099 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7100 {
7101 /*
7102 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7103 * in mptsas_handle_event_sync() of interrupt context
7104 */
7105 break;
7106 }
7107 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7108 {
7109 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7110 uint8_t rc;
7111 char string[80];
7112
7113 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7114 eventreply->EventData;
7115
7116 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7117 &encstatus->ReasonCode);
7118 switch (rc) {
7119 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7120 (void) sprintf(string, "added");
7121 break;
7122 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7123 (void) sprintf(string, ", not responding");
7124 break;
7125 default:
7126 break;
7127 }
7128 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7129 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7130 &encstatus->EnclosureHandle), string));
7131 break;
7132 }
7133
7134 /*
7135 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7136 * mptsas_handle_event_sync,in here just send ack message.
7137 */
7138 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7139 {
7140 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7141 uint8_t rc;
7142 uint16_t devhdl;
7143 uint64_t wwn = 0;
7144 uint32_t wwn_lo, wwn_hi;
7145
7146 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7147 eventreply->EventData;
7148 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7149 &statuschange->ReasonCode);
7150 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7151 (uint32_t *)(void *)&statuschange->SASAddress);
7152 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7153 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7154 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7155 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7156 &statuschange->DevHandle);
7157
7158 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7159 wwn));
7160
7161 switch (rc) {
7162 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7163 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7164 ddi_get8(mpt->m_acc_reply_frame_hdl,
7165 &statuschange->ASC),
7166 ddi_get8(mpt->m_acc_reply_frame_hdl,
7167 &statuschange->ASCQ)));
7168 break;
7169
7170 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7171 NDBG20(("Device not supported"));
7172 break;
7173
7174 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7175 NDBG20(("IOC internally generated the Target Reset "
7176 "for devhdl:%x", devhdl));
7177 break;
7178
7179 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7180 NDBG20(("IOC's internally generated Target Reset "
7181 "completed for devhdl:%x", devhdl));
7182 break;
7183
7184 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7185 NDBG20(("IOC internally generated Abort Task"));
7186 break;
7187
7188 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7189 NDBG20(("IOC's internally generated Abort Task "
7190 "completed"));
7191 break;
7192
7193 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7194 NDBG20(("IOC internally generated Abort Task Set"));
7195 break;
7196
7197 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7198 NDBG20(("IOC internally generated Clear Task Set"));
7199 break;
7200
7201 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7202 NDBG20(("IOC internally generated Query Task"));
7203 break;
7204
7205 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7206 NDBG20(("Device sent an Asynchronous Notification"));
7207 break;
7208
7209 default:
7210 break;
7211 }
7212 break;
7213 }
7214 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7215 {
7216 /*
7217 * IR TOPOLOGY CHANGE LIST Event has already been handled
7218 * in mpt_handle_event_sync() of interrupt context
7219 */
7220 break;
7221 }
7222 case MPI2_EVENT_IR_OPERATION_STATUS:
7223 {
7224 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7225 char reason_str[80];
7226 uint8_t rc, percent;
7227 uint16_t handle;
7228
7229 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7230 eventreply->EventData;
7231 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7232 &irOpStatus->RAIDOperation);
7233 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7234 &irOpStatus->PercentComplete);
7235 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7236 &irOpStatus->VolDevHandle);
7237
7238 switch (rc) {
7239 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7240 (void) sprintf(reason_str, "resync");
7241 break;
7242 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7243 (void) sprintf(reason_str, "online capacity "
7244 "expansion");
7245 break;
7246 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7247 (void) sprintf(reason_str, "consistency check");
7248 break;
7249 default:
7250 (void) sprintf(reason_str, "unknown reason %x",
7251 rc);
7252 }
7253
7254 NDBG20(("mptsas%d raid operational status: (%s)"
7255 "\thandle(0x%04x), percent complete(%d)\n",
7256 mpt->m_instance, reason_str, handle, percent));
7257 break;
7258 }
7259 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7260 {
7261 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7262 uint8_t phy_num;
7263 uint8_t primitive;
7264
7265 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7266 eventreply->EventData;
7267
7268 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7269 &sas_broadcast->PhyNum);
7270 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7271 &sas_broadcast->Primitive);
7272
7273 switch (primitive) {
7274 case MPI2_EVENT_PRIMITIVE_CHANGE:
7275 mptsas_smhba_log_sysevent(mpt,
7276 ESC_SAS_HBA_PORT_BROADCAST,
7277 SAS_PORT_BROADCAST_CHANGE,
7278 &mpt->m_phy_info[phy_num].smhba_info);
7279 break;
7280 case MPI2_EVENT_PRIMITIVE_SES:
7281 mptsas_smhba_log_sysevent(mpt,
7282 ESC_SAS_HBA_PORT_BROADCAST,
7283 SAS_PORT_BROADCAST_SES,
7284 &mpt->m_phy_info[phy_num].smhba_info);
7285 break;
7286 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7287 mptsas_smhba_log_sysevent(mpt,
7288 ESC_SAS_HBA_PORT_BROADCAST,
7289 SAS_PORT_BROADCAST_D01_4,
7290 &mpt->m_phy_info[phy_num].smhba_info);
7291 break;
7292 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7293 mptsas_smhba_log_sysevent(mpt,
7294 ESC_SAS_HBA_PORT_BROADCAST,
7295 SAS_PORT_BROADCAST_D04_7,
7296 &mpt->m_phy_info[phy_num].smhba_info);
7297 break;
7298 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7299 mptsas_smhba_log_sysevent(mpt,
7300 ESC_SAS_HBA_PORT_BROADCAST,
7301 SAS_PORT_BROADCAST_D16_7,
7302 &mpt->m_phy_info[phy_num].smhba_info);
7303 break;
7304 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7305 mptsas_smhba_log_sysevent(mpt,
7306 ESC_SAS_HBA_PORT_BROADCAST,
7307 SAS_PORT_BROADCAST_D29_7,
7308 &mpt->m_phy_info[phy_num].smhba_info);
7309 break;
7310 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7311 mptsas_smhba_log_sysevent(mpt,
7312 ESC_SAS_HBA_PORT_BROADCAST,
7313 SAS_PORT_BROADCAST_D24_0,
7314 &mpt->m_phy_info[phy_num].smhba_info);
7315 break;
7316 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7317 mptsas_smhba_log_sysevent(mpt,
7318 ESC_SAS_HBA_PORT_BROADCAST,
7319 SAS_PORT_BROADCAST_D27_4,
7320 &mpt->m_phy_info[phy_num].smhba_info);
7321 break;
7322 default:
7323 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7324 " %x received",
7325 mpt->m_instance, primitive));
7326 break;
7327 }
7328 NDBG20(("mptsas%d sas broadcast primitive: "
7329 "\tprimitive(0x%04x), phy(%d) complete\n",
7330 mpt->m_instance, primitive, phy_num));
7331 break;
7332 }
7333 case MPI2_EVENT_IR_VOLUME:
7334 {
7335 Mpi2EventDataIrVolume_t *irVolume;
7336 uint16_t devhandle;
7337 uint32_t state;
7338 int config, vol;
7339 mptsas_slots_t *slots = mpt->m_active;
7340 uint8_t found = FALSE;
7341
7342 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7343 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7344 &irVolume->NewValue);
7345 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7346 &irVolume->VolDevHandle);
7347
7348 NDBG20(("EVENT_IR_VOLUME event is received"));
7349
7350 /*
7351 * Get latest RAID info and then find the DevHandle for this
7352 * event in the configuration. If the DevHandle is not found
7353 * just exit the event.
7354 */
7355 (void) mptsas_get_raid_info(mpt);
7356 for (config = 0; (config < slots->m_num_raid_configs) &&
7357 (!found); config++) {
7358 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7359 if (slots->m_raidconfig[config].m_raidvol[vol].
7360 m_raidhandle == devhandle) {
7361 found = TRUE;
7362 break;
7363 }
7364 }
7365 }
7366 if (!found) {
7367 break;
7368 }
7369
7370 switch (irVolume->ReasonCode) {
7371 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7372 {
7373 uint32_t i;
7374 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7375 state;
7376
7377 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7378 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7379 ", auto-config of hot-swap drives is %s"
7380 ", write caching is %s"
7381 ", hot-spare pool mask is %02x\n",
7382 vol, state &
7383 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7384 ? "disabled" : "enabled",
7385 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7386 ? "controlled by member disks" :
7387 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7388 ? "disabled" :
7389 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7390 ? "enabled" :
7391 "incorrectly set",
7392 (state >> 16) & 0xff);
7393 break;
7394 }
7395 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7396 {
7397 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7398 (uint8_t)state;
7399
7400 mptsas_log(mpt, CE_NOTE,
7401 "Volume %d is now %s\n", vol,
7402 state == MPI2_RAID_VOL_STATE_OPTIMAL
7403 ? "optimal" :
7404 state == MPI2_RAID_VOL_STATE_DEGRADED
7405 ? "degraded" :
7406 state == MPI2_RAID_VOL_STATE_ONLINE
7407 ? "online" :
7408 state == MPI2_RAID_VOL_STATE_INITIALIZING
7409 ? "initializing" :
7410 state == MPI2_RAID_VOL_STATE_FAILED
7411 ? "failed" :
7412 state == MPI2_RAID_VOL_STATE_MISSING
7413 ? "missing" :
7414 "state unknown");
7415 break;
7416 }
7417 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7418 {
7419 slots->m_raidconfig[config].m_raidvol[vol].
7420 m_statusflags = state;
7421
7422 mptsas_log(mpt, CE_NOTE,
7423 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7424 vol,
7425 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7426 ? ", enabled" : ", disabled",
7427 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7428 ? ", quiesced" : "",
7429 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7430 ? ", inactive" : ", active",
7431 state &
7432 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7433 ? ", bad block table is full" : "",
7434 state &
7435 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7436 ? ", resync in progress" : "",
7437 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7438 ? ", background initialization in progress" : "",
7439 state &
7440 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7441 ? ", capacity expansion in progress" : "",
7442 state &
7443 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7444 ? ", consistency check in progress" : "",
7445 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7446 ? ", data scrub in progress" : "");
7447 break;
7448 }
7449 default:
7450 break;
7451 }
7452 break;
7453 }
7454 case MPI2_EVENT_IR_PHYSICAL_DISK:
7455 {
7456 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7457 uint16_t devhandle, enchandle, slot;
7458 uint32_t status, state;
7459 uint8_t physdisknum, reason;
7460
7461 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7462 eventreply->EventData;
7463 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7464 &irPhysDisk->PhysDiskNum);
7465 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7466 &irPhysDisk->PhysDiskDevHandle);
7467 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7468 &irPhysDisk->EnclosureHandle);
7469 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7470 &irPhysDisk->Slot);
7471 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7472 &irPhysDisk->NewValue);
7473 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7474 &irPhysDisk->ReasonCode);
7475
7476 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7477
7478 switch (reason) {
7479 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7480 mptsas_log(mpt, CE_NOTE,
7481 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7482 "for enclosure with handle 0x%x is now in hot "
7483 "spare pool %d",
7484 physdisknum, devhandle, slot, enchandle,
7485 (state >> 16) & 0xff);
7486 break;
7487
7488 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7489 status = state;
7490 mptsas_log(mpt, CE_NOTE,
7491 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7492 "for enclosure with handle 0x%x is now "
7493 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7494 enchandle,
7495 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7496 ? ", inactive" : ", active",
7497 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7498 ? ", out of sync" : "",
7499 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7500 ? ", quiesced" : "",
7501 status &
7502 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7503 ? ", write cache enabled" : "",
7504 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7505 ? ", capacity expansion target" : "");
7506 break;
7507
7508 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7509 mptsas_log(mpt, CE_NOTE,
7510 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7511 "for enclosure with handle 0x%x is now %s\n",
7512 physdisknum, devhandle, slot, enchandle,
7513 state == MPI2_RAID_PD_STATE_OPTIMAL
7514 ? "optimal" :
7515 state == MPI2_RAID_PD_STATE_REBUILDING
7516 ? "rebuilding" :
7517 state == MPI2_RAID_PD_STATE_DEGRADED
7518 ? "degraded" :
7519 state == MPI2_RAID_PD_STATE_HOT_SPARE
7520 ? "a hot spare" :
7521 state == MPI2_RAID_PD_STATE_ONLINE
7522 ? "online" :
7523 state == MPI2_RAID_PD_STATE_OFFLINE
7524 ? "offline" :
7525 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7526 ? "not compatible" :
7527 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7528 ? "not configured" :
7529 "state unknown");
7530 break;
7531 }
7532 break;
7533 }
7534 default:
7535 NDBG20(("mptsas%d: unknown event %x received",
7536 mpt->m_instance, event));
7537 break;
7538 }
7539
7540 /*
7541 * Return the reply frame to the free queue.
7542 */
7543 ddi_put32(mpt->m_acc_free_queue_hdl,
7544 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7545 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7546 DDI_DMA_SYNC_FORDEV);
7547 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7548 mpt->m_free_index = 0;
7549 }
7550 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7551 mpt->m_free_index);
7552 mutex_exit(&mpt->m_mutex);
7553 }
7554
7555 /*
7556 * invoked from timeout() to restart qfull cmds with throttle == 0
7557 */
7558 static void
7559 mptsas_restart_cmd(void *arg)
7560 {
7561 mptsas_t *mpt = arg;
7562 mptsas_target_t *ptgt = NULL;
7563
7564 mutex_enter(&mpt->m_mutex);
7565
7566 mpt->m_restart_cmd_timeid = 0;
7567
7568 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7569 MPTSAS_HASH_FIRST);
7570 while (ptgt != NULL) {
7571 if (ptgt->m_reset_delay == 0) {
7572 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7573 mptsas_set_throttle(mpt, ptgt,
7574 MAX_THROTTLE);
7575 }
7576 }
7577
7578 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7579 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7580 }
7581 mptsas_restart_hba(mpt);
7582 mutex_exit(&mpt->m_mutex);
7583 }
7584
7585 void
7586 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7587 {
7588 int slot;
7589 mptsas_slots_t *slots = mpt->m_active;
7590 int t;
7591 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7592
7593 ASSERT(cmd != NULL);
7594 ASSERT(cmd->cmd_queued == FALSE);
7595
7596 /*
7597 * Task Management cmds are removed in their own routines. Also,
7598 * we don't want to modify timeout based on TM cmds.
7599 */
7600 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7601 return;
7602 }
7603
7604 t = Tgt(cmd);
7605 slot = cmd->cmd_slot;
7606
7607 /*
7608 * remove the cmd.
7609 */
7610 if (cmd == slots->m_slot[slot]) {
7611 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7612 slots->m_slot[slot] = NULL;
7613 mpt->m_ncmds--;
7614
7615 /*
7616 * only decrement per target ncmds if command
7617 * has a target associated with it.
7618 */
7619 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7620 ptgt->m_t_ncmds--;
7621 /*
7622 * reset throttle if we just ran an untagged command
7623 * to a tagged target
7624 */
7625 if ((ptgt->m_t_ncmds == 0) &&
7626 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7627 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7628 }
7629 }
7630
7631 }
7632
7633 /*
7634 * This is all we need to do for ioc commands.
7635 */
7636 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7637 mptsas_return_to_pool(mpt, cmd);
7638 return;
7639 }
7640
7641 /*
7642 * Figure out what to set tag Q timeout for...
7643 *
7644 * Optimize: If we have duplicate's of same timeout
7645 * we're using, then we'll use it again until we run
7646 * out of duplicates. This should be the normal case
7647 * for block and raw I/O.
7648 * If no duplicates, we have to scan through tag que and
7649 * find the longest timeout value and use it. This is
7650 * going to take a while...
7651 * Add 1 to m_n_slots to account for TM request.
7652 */
7653 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7654 if (--(ptgt->m_dups) == 0) {
7655 if (ptgt->m_t_ncmds) {
7656 mptsas_cmd_t *ssp;
7657 uint_t n = 0;
7658 ushort_t nslots = (slots->m_n_slots + 1);
7659 ushort_t i;
7660 /*
7661 * This crude check assumes we don't do
7662 * this too often which seems reasonable
7663 * for block and raw I/O.
7664 */
7665 for (i = 0; i < nslots; i++) {
7666 ssp = slots->m_slot[i];
7667 if (ssp && (Tgt(ssp) == t) &&
7668 (ssp->cmd_pkt->pkt_time > n)) {
7669 n = ssp->cmd_pkt->pkt_time;
7670 ptgt->m_dups = 1;
7671 } else if (ssp && (Tgt(ssp) == t) &&
7672 (ssp->cmd_pkt->pkt_time == n)) {
7673 ptgt->m_dups++;
7674 }
7675 }
7676 ptgt->m_timebase = n;
7677 } else {
7678 ptgt->m_dups = 0;
7679 ptgt->m_timebase = 0;
7680 }
7681 }
7682 }
7683 ptgt->m_timeout = ptgt->m_timebase;
7684
7685 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7686 }
7687
7688 /*
7689 * accept all cmds on the tx_waitq if any and then
7690 * start a fresh request from the top of the device queue.
7691 *
7692 * since there are always cmds queued on the tx_waitq, and rare cmds on
7693 * the instance waitq, so this function should not be invoked in the ISR,
7694 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7695 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7696 */
7697 static void
7698 mptsas_restart_hba(mptsas_t *mpt)
7699 {
7700 ASSERT(mutex_owned(&mpt->m_mutex));
7701
7702 mutex_enter(&mpt->m_tx_waitq_mutex);
7703 if (mpt->m_tx_waitq) {
7704 mptsas_accept_tx_waitq(mpt);
7705 }
7706 mutex_exit(&mpt->m_tx_waitq_mutex);
7707 mptsas_restart_waitq(mpt);
7708 }
7709
7710 /*
7711 * start a fresh request from the top of the device queue
7712 */
7713 static void
7714 mptsas_restart_waitq(mptsas_t *mpt)
7715 {
7716 mptsas_cmd_t *cmd, *next_cmd;
7717 mptsas_target_t *ptgt = NULL;
7718
7719 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7720
7721 ASSERT(mutex_owned(&mpt->m_mutex));
7722
7723 /*
7724 * If there is a reset delay, don't start any cmds. Otherwise, start
7725 * as many cmds as possible.
7726 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7727 * commands is m_max_requests - 2.
7728 */
7729 cmd = mpt->m_waitq;
7730
7731 while (cmd != NULL) {
7732 next_cmd = cmd->cmd_linkp;
7733 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7734 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7735 /*
7736 * passthru command get slot need
7737 * set CFLAG_PREPARED.
7738 */
7739 cmd->cmd_flags |= CFLAG_PREPARED;
7740 mptsas_waitq_delete(mpt, cmd);
7741 mptsas_start_passthru(mpt, cmd);
7742 }
7743 cmd = next_cmd;
7744 continue;
7745 }
7746 if (cmd->cmd_flags & CFLAG_CONFIG) {
7747 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7748 /*
7749 * Send the config page request and delete it
7750 * from the waitq.
7751 */
7752 cmd->cmd_flags |= CFLAG_PREPARED;
7753 mptsas_waitq_delete(mpt, cmd);
7754 mptsas_start_config_page_access(mpt, cmd);
7755 }
7756 cmd = next_cmd;
7757 continue;
7758 }
7759 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7760 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7761 /*
7762 * Send the FW Diag request and delete if from
7763 * the waitq.
7764 */
7765 cmd->cmd_flags |= CFLAG_PREPARED;
7766 mptsas_waitq_delete(mpt, cmd);
7767 mptsas_start_diag(mpt, cmd);
7768 }
7769 cmd = next_cmd;
7770 continue;
7771 }
7772
7773 ptgt = cmd->cmd_tgt_addr;
7774 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7775 (ptgt->m_t_ncmds == 0)) {
7776 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7777 }
7778 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7779 (ptgt && (ptgt->m_reset_delay == 0)) &&
7780 (ptgt && (ptgt->m_t_ncmds <
7781 ptgt->m_t_throttle))) {
7782 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7783 mptsas_waitq_delete(mpt, cmd);
7784 (void) mptsas_start_cmd(mpt, cmd);
7785 }
7786 }
7787 cmd = next_cmd;
7788 }
7789 }
7790 /*
7791 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7792 * Accept all those queued cmds before new cmd is accept so that the
7793 * cmds are sent in order.
7794 */
7795 static void
7796 mptsas_accept_tx_waitq(mptsas_t *mpt)
7797 {
7798 mptsas_cmd_t *cmd;
7799
7800 ASSERT(mutex_owned(&mpt->m_mutex));
7801 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7802
7803 /*
7804 * A Bus Reset could occur at any time and flush the tx_waitq,
7805 * so we cannot count on the tx_waitq to contain even one cmd.
7806 * And when the m_tx_waitq_mutex is released and run
7807 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7808 */
7809 cmd = mpt->m_tx_waitq;
7810 for (;;) {
7811 if ((cmd = mpt->m_tx_waitq) == NULL) {
7812 mpt->m_tx_draining = 0;
7813 break;
7814 }
7815 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7816 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7817 }
7818 cmd->cmd_linkp = NULL;
7819 mutex_exit(&mpt->m_tx_waitq_mutex);
7820 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7821 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7822 "to accept cmd on queue\n");
7823 mutex_enter(&mpt->m_tx_waitq_mutex);
7824 }
7825 }
7826
7827
7828 /*
7829 * mpt tag type lookup
7830 */
7831 static char mptsas_tag_lookup[] =
7832 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7833
7834 static int
7835 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7836 {
7837 struct scsi_pkt *pkt = CMD2PKT(cmd);
7838 uint32_t control = 0;
7839 int n;
7840 caddr_t mem;
7841 pMpi2SCSIIORequest_t io_request;
7842 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7843 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7844 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7845 uint16_t SMID, io_flags = 0;
7846 uint32_t request_desc_low, request_desc_high;
7847
7848 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7849
7850 /*
7851 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7852 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7853 */
7854 SMID = cmd->cmd_slot;
7855
7856 /*
7857 * It is possible for back to back device reset to
7858 * happen before the reset delay has expired. That's
7859 * ok, just let the device reset go out on the bus.
7860 */
7861 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7862 ASSERT(ptgt->m_reset_delay == 0);
7863 }
7864
7865 /*
7866 * if a non-tagged cmd is submitted to an active tagged target
7867 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7868 * to be untagged
7869 */
7870 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7871 (ptgt->m_t_ncmds > 1) &&
7872 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7873 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7874 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7875 NDBG23(("target=%d, untagged cmd, start draining\n",
7876 ptgt->m_devhdl));
7877
7878 if (ptgt->m_reset_delay == 0) {
7879 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7880 }
7881
7882 mptsas_remove_cmd(mpt, cmd);
7883 cmd->cmd_pkt_flags |= FLAG_HEAD;
7884 mptsas_waitq_add(mpt, cmd);
7885 }
7886 return (DDI_FAILURE);
7887 }
7888
7889 /*
7890 * Set correct tag bits.
7891 */
7892 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7893 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7894 FLAG_TAGMASK) >> 12)]) {
7895 case MSG_SIMPLE_QTAG:
7896 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7897 break;
7898 case MSG_HEAD_QTAG:
7899 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7900 break;
7901 case MSG_ORDERED_QTAG:
7902 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7903 break;
7904 default:
7905 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7906 break;
7907 }
7908 } else {
7909 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7910 ptgt->m_t_throttle = 1;
7911 }
7912 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7913 }
7914
7915 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7916 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7917 }
7918
7919 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7920 io_request = (pMpi2SCSIIORequest_t)mem;
7921
7922 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7923 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7924 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7925 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7926 MPI2_FUNCTION_SCSI_IO_REQUEST);
7927
7928 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7929 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7930
7931 io_flags = cmd->cmd_cdblen;
7932 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7933 /*
7934 * setup the Scatter/Gather DMA list for this request
7935 */
7936 if (cmd->cmd_cookiec > 0) {
7937 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7938 } else {
7939 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7940 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7941 MPI2_SGE_FLAGS_END_OF_BUFFER |
7942 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7943 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7944 }
7945
7946 /*
7947 * save ARQ information
7948 */
7949 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7950 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7951 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7952 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7953 cmd->cmd_ext_arqcookie.dmac_address);
7954 } else {
7955 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7956 cmd->cmd_arqcookie.dmac_address);
7957 }
7958
7959 ddi_put32(acc_hdl, &io_request->Control, control);
7960
7961 NDBG31(("starting message=0x%p, with cmd=0x%p",
7962 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7963
7964 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7965
7966 /*
7967 * Build request descriptor and write it to the request desc post reg.
7968 */
7969 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7970 request_desc_high = ptgt->m_devhdl << 16;
7971 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7972
7973 /*
7974 * Start timeout.
7975 */
7976 #ifdef MPTSAS_TEST
7977 /*
7978 * Temporarily set timebase = 0; needed for
7979 * timeout torture test.
7980 */
7981 if (mptsas_test_timeouts) {
7982 ptgt->m_timebase = 0;
7983 }
7984 #endif
7985 n = pkt->pkt_time - ptgt->m_timebase;
7986
7987 if (n == 0) {
7988 (ptgt->m_dups)++;
7989 ptgt->m_timeout = ptgt->m_timebase;
7990 } else if (n > 0) {
7991 ptgt->m_timeout =
7992 ptgt->m_timebase = pkt->pkt_time;
7993 ptgt->m_dups = 1;
7994 } else if (n < 0) {
7995 ptgt->m_timeout = ptgt->m_timebase;
7996 }
7997 #ifdef MPTSAS_TEST
7998 /*
7999 * Set back to a number higher than
8000 * mptsas_scsi_watchdog_tick
8001 * so timeouts will happen in mptsas_watchsubr
8002 */
8003 if (mptsas_test_timeouts) {
8004 ptgt->m_timebase = 60;
8005 }
8006 #endif
8007
8008 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8009 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8010 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8011 return (DDI_FAILURE);
8012 }
8013 return (DDI_SUCCESS);
8014 }
8015
8016 /*
8017 * Select a helper thread to handle current doneq
8018 */
8019 static void
8020 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8021 {
8022 uint64_t t, i;
8023 uint32_t min = 0xffffffff;
8024 mptsas_doneq_thread_list_t *item;
8025
8026 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8027 item = &mpt->m_doneq_thread_id[i];
8028 /*
8029 * If the completed command on help thread[i] less than
8030 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8031 * pick a thread which has least completed command.
8032 */
8033
8034 mutex_enter(&item->mutex);
8035 if (item->len < mpt->m_doneq_thread_threshold) {
8036 t = i;
8037 mutex_exit(&item->mutex);
8038 break;
8039 }
8040 if (item->len < min) {
8041 min = item->len;
8042 t = i;
8043 }
8044 mutex_exit(&item->mutex);
8045 }
8046 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8047 mptsas_doneq_mv(mpt, t);
8048 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8049 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8050 }
8051
8052 /*
8053 * move the current global doneq to the doneq of thead[t]
8054 */
8055 static void
8056 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8057 {
8058 mptsas_cmd_t *cmd;
8059 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8060
8061 ASSERT(mutex_owned(&item->mutex));
8062 while ((cmd = mpt->m_doneq) != NULL) {
8063 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8064 mpt->m_donetail = &mpt->m_doneq;
8065 }
8066 cmd->cmd_linkp = NULL;
8067 *item->donetail = cmd;
8068 item->donetail = &cmd->cmd_linkp;
8069 mpt->m_doneq_len--;
8070 item->len++;
8071 }
8072 }
8073
8074 void
8075 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8076 {
8077 struct scsi_pkt *pkt = CMD2PKT(cmd);
8078
8079 /* Check all acc and dma handles */
8080 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8081 DDI_SUCCESS) ||
8082 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8083 DDI_SUCCESS) ||
8084 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8085 DDI_SUCCESS) ||
8086 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8087 DDI_SUCCESS) ||
8088 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8089 DDI_SUCCESS) ||
8090 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8091 DDI_SUCCESS) ||
8092 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8093 DDI_SUCCESS)) {
8094 ddi_fm_service_impact(mpt->m_dip,
8095 DDI_SERVICE_UNAFFECTED);
8096 ddi_fm_acc_err_clear(mpt->m_config_handle,
8097 DDI_FME_VER0);
8098 pkt->pkt_reason = CMD_TRAN_ERR;
8099 pkt->pkt_statistics = 0;
8100 }
8101 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8102 DDI_SUCCESS) ||
8103 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8104 DDI_SUCCESS) ||
8105 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8106 DDI_SUCCESS) ||
8107 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8108 DDI_SUCCESS) ||
8109 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8110 DDI_SUCCESS)) {
8111 ddi_fm_service_impact(mpt->m_dip,
8112 DDI_SERVICE_UNAFFECTED);
8113 pkt->pkt_reason = CMD_TRAN_ERR;
8114 pkt->pkt_statistics = 0;
8115 }
8116 if (cmd->cmd_dmahandle &&
8117 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8118 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8119 pkt->pkt_reason = CMD_TRAN_ERR;
8120 pkt->pkt_statistics = 0;
8121 }
8122 if ((cmd->cmd_extra_frames &&
8123 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8124 DDI_SUCCESS) ||
8125 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8126 DDI_SUCCESS)))) {
8127 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8128 pkt->pkt_reason = CMD_TRAN_ERR;
8129 pkt->pkt_statistics = 0;
8130 }
8131 if (cmd->cmd_arqhandle &&
8132 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8133 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8134 pkt->pkt_reason = CMD_TRAN_ERR;
8135 pkt->pkt_statistics = 0;
8136 }
8137 if (cmd->cmd_ext_arqhandle &&
8138 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8139 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8140 pkt->pkt_reason = CMD_TRAN_ERR;
8141 pkt->pkt_statistics = 0;
8142 }
8143 }
8144
8145 /*
8146 * These routines manipulate the queue of commands that
8147 * are waiting for their completion routines to be called.
8148 * The queue is usually in FIFO order but on an MP system
8149 * it's possible for the completion routines to get out
8150 * of order. If that's a problem you need to add a global
8151 * mutex around the code that calls the completion routine
8152 * in the interrupt handler.
8153 */
8154 static void
8155 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8156 {
8157 struct scsi_pkt *pkt = CMD2PKT(cmd);
8158
8159 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8160
8161 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8162 cmd->cmd_linkp = NULL;
8163 cmd->cmd_flags |= CFLAG_FINISHED;
8164 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8165
8166 mptsas_fma_check(mpt, cmd);
8167
8168 /*
8169 * only add scsi pkts that have completion routines to
8170 * the doneq. no intr cmds do not have callbacks.
8171 */
8172 if (pkt && (pkt->pkt_comp)) {
8173 *mpt->m_donetail = cmd;
8174 mpt->m_donetail = &cmd->cmd_linkp;
8175 mpt->m_doneq_len++;
8176 }
8177 }
8178
8179 static mptsas_cmd_t *
8180 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8181 {
8182 mptsas_cmd_t *cmd;
8183 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8184
8185 /* pop one off the done queue */
8186 if ((cmd = item->doneq) != NULL) {
8187 /* if the queue is now empty fix the tail pointer */
8188 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8189 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8190 item->donetail = &item->doneq;
8191 }
8192 cmd->cmd_linkp = NULL;
8193 item->len--;
8194 }
8195 return (cmd);
8196 }
8197
8198 static void
8199 mptsas_doneq_empty(mptsas_t *mpt)
8200 {
8201 if (mpt->m_doneq && !mpt->m_in_callback) {
8202 mptsas_cmd_t *cmd, *next;
8203 struct scsi_pkt *pkt;
8204
8205 mpt->m_in_callback = 1;
8206 cmd = mpt->m_doneq;
8207 mpt->m_doneq = NULL;
8208 mpt->m_donetail = &mpt->m_doneq;
8209 mpt->m_doneq_len = 0;
8210
8211 mutex_exit(&mpt->m_mutex);
8212 /*
8213 * run the completion routines of all the
8214 * completed commands
8215 */
8216 while (cmd != NULL) {
8217 next = cmd->cmd_linkp;
8218 cmd->cmd_linkp = NULL;
8219 /* run this command's completion routine */
8220 cmd->cmd_flags |= CFLAG_COMPLETED;
8221 pkt = CMD2PKT(cmd);
8222 mptsas_pkt_comp(pkt, cmd);
8223 cmd = next;
8224 }
8225 mutex_enter(&mpt->m_mutex);
8226 mpt->m_in_callback = 0;
8227 }
8228 }
8229
8230 /*
8231 * These routines manipulate the target's queue of pending requests
8232 */
8233 void
8234 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8235 {
8236 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8237 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8238 cmd->cmd_queued = TRUE;
8239 if (ptgt)
8240 ptgt->m_t_nwait++;
8241 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8242 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8243 mpt->m_waitqtail = &cmd->cmd_linkp;
8244 }
8245 mpt->m_waitq = cmd;
8246 } else {
8247 cmd->cmd_linkp = NULL;
8248 *(mpt->m_waitqtail) = cmd;
8249 mpt->m_waitqtail = &cmd->cmd_linkp;
8250 }
8251 }
8252
8253 static mptsas_cmd_t *
8254 mptsas_waitq_rm(mptsas_t *mpt)
8255 {
8256 mptsas_cmd_t *cmd;
8257 mptsas_target_t *ptgt;
8258 NDBG7(("mptsas_waitq_rm"));
8259
8260 MPTSAS_WAITQ_RM(mpt, cmd);
8261
8262 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8263 if (cmd) {
8264 ptgt = cmd->cmd_tgt_addr;
8265 if (ptgt) {
8266 ptgt->m_t_nwait--;
8267 ASSERT(ptgt->m_t_nwait >= 0);
8268 }
8269 }
8270 return (cmd);
8271 }
8272
8273 /*
8274 * remove specified cmd from the middle of the wait queue.
8275 */
8276 static void
8277 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8278 {
8279 mptsas_cmd_t *prevp = mpt->m_waitq;
8280 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8281
8282 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8283 (void *)mpt, (void *)cmd));
8284 if (ptgt) {
8285 ptgt->m_t_nwait--;
8286 ASSERT(ptgt->m_t_nwait >= 0);
8287 }
8288
8289 if (prevp == cmd) {
8290 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8291 mpt->m_waitqtail = &mpt->m_waitq;
8292
8293 cmd->cmd_linkp = NULL;
8294 cmd->cmd_queued = FALSE;
8295 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8296 (void *)mpt, (void *)cmd));
8297 return;
8298 }
8299
8300 while (prevp != NULL) {
8301 if (prevp->cmd_linkp == cmd) {
8302 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8303 mpt->m_waitqtail = &prevp->cmd_linkp;
8304
8305 cmd->cmd_linkp = NULL;
8306 cmd->cmd_queued = FALSE;
8307 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8308 (void *)mpt, (void *)cmd));
8309 return;
8310 }
8311 prevp = prevp->cmd_linkp;
8312 }
8313 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8314 }
8315
8316 static mptsas_cmd_t *
8317 mptsas_tx_waitq_rm(mptsas_t *mpt)
8318 {
8319 mptsas_cmd_t *cmd;
8320 NDBG7(("mptsas_tx_waitq_rm"));
8321
8322 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8323
8324 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8325
8326 return (cmd);
8327 }
8328
8329 /*
8330 * remove specified cmd from the middle of the tx_waitq.
8331 */
8332 static void
8333 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8334 {
8335 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8336
8337 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8338 (void *)mpt, (void *)cmd));
8339
8340 if (prevp == cmd) {
8341 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8342 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8343
8344 cmd->cmd_linkp = NULL;
8345 cmd->cmd_queued = FALSE;
8346 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8347 (void *)mpt, (void *)cmd));
8348 return;
8349 }
8350
8351 while (prevp != NULL) {
8352 if (prevp->cmd_linkp == cmd) {
8353 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8354 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8355
8356 cmd->cmd_linkp = NULL;
8357 cmd->cmd_queued = FALSE;
8358 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8359 (void *)mpt, (void *)cmd));
8360 return;
8361 }
8362 prevp = prevp->cmd_linkp;
8363 }
8364 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8365 }
8366
8367 /*
8368 * device and bus reset handling
8369 *
8370 * Notes:
8371 * - RESET_ALL: reset the controller
8372 * - RESET_TARGET: reset the target specified in scsi_address
8373 */
8374 static int
8375 mptsas_scsi_reset(struct scsi_address *ap, int level)
8376 {
8377 mptsas_t *mpt = ADDR2MPT(ap);
8378 int rval;
8379 mptsas_tgt_private_t *tgt_private;
8380 mptsas_target_t *ptgt = NULL;
8381
8382 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8383 ptgt = tgt_private->t_private;
8384 if (ptgt == NULL) {
8385 return (FALSE);
8386 }
8387 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8388 level));
8389
8390 mutex_enter(&mpt->m_mutex);
8391 /*
8392 * if we are not in panic set up a reset delay for this target
8393 */
8394 if (!ddi_in_panic()) {
8395 mptsas_setup_bus_reset_delay(mpt);
8396 } else {
8397 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8398 }
8399 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8400 mutex_exit(&mpt->m_mutex);
8401
8402 /*
8403 * The transport layer expect to only see TRUE and
8404 * FALSE. Therefore, we will adjust the return value
8405 * if mptsas_do_scsi_reset returns FAILED.
8406 */
8407 if (rval == FAILED)
8408 rval = FALSE;
8409 return (rval);
8410 }
8411
8412 static int
8413 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8414 {
8415 int rval = FALSE;
8416 uint8_t config, disk;
8417 mptsas_slots_t *slots = mpt->m_active;
8418
8419 ASSERT(mutex_owned(&mpt->m_mutex));
8420
8421 if (mptsas_debug_resets) {
8422 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8423 devhdl);
8424 }
8425
8426 /*
8427 * Issue a Target Reset message to the target specified but not to a
8428 * disk making up a raid volume. Just look through the RAID config
8429 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8430 * list, then don't reset this target.
8431 */
8432 for (config = 0; config < slots->m_num_raid_configs; config++) {
8433 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8434 if (devhdl == slots->m_raidconfig[config].
8435 m_physdisk_devhdl[disk]) {
8436 return (TRUE);
8437 }
8438 }
8439 }
8440
8441 rval = mptsas_ioc_task_management(mpt,
8442 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8443
8444 mptsas_doneq_empty(mpt);
8445 return (rval);
8446 }
8447
8448 static int
8449 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8450 void (*callback)(caddr_t), caddr_t arg)
8451 {
8452 mptsas_t *mpt = ADDR2MPT(ap);
8453
8454 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8455
8456 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8457 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8458 }
8459
8460 static int
8461 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8462 {
8463 dev_info_t *lun_dip = NULL;
8464
8465 ASSERT(sd != NULL);
8466 ASSERT(name != NULL);
8467 lun_dip = sd->sd_dev;
8468 ASSERT(lun_dip != NULL);
8469
8470 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8471 return (1);
8472 } else {
8473 return (0);
8474 }
8475 }
8476
8477 static int
8478 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8479 {
8480 return (mptsas_get_name(sd, name, len));
8481 }
8482
8483 void
8484 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8485 {
8486
8487 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8488
8489 /*
8490 * if the bus is draining/quiesced, no changes to the throttles
8491 * are allowed. Not allowing change of throttles during draining
8492 * limits error recovery but will reduce draining time
8493 *
8494 * all throttles should have been set to HOLD_THROTTLE
8495 */
8496 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8497 return;
8498 }
8499
8500 if (what == HOLD_THROTTLE) {
8501 ptgt->m_t_throttle = HOLD_THROTTLE;
8502 } else if (ptgt->m_reset_delay == 0) {
8503 ptgt->m_t_throttle = what;
8504 }
8505 }
8506
8507 /*
8508 * Clean up from a device reset.
8509 * For the case of target reset, this function clears the waitq of all
8510 * commands for a particular target. For the case of abort task set, this
8511 * function clears the waitq of all commonds for a particular target/lun.
8512 */
8513 static void
8514 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8515 {
8516 mptsas_slots_t *slots = mpt->m_active;
8517 mptsas_cmd_t *cmd, *next_cmd;
8518 int slot;
8519 uchar_t reason;
8520 uint_t stat;
8521
8522 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8523
8524 /*
8525 * Make sure the I/O Controller has flushed all cmds
8526 * that are associated with this target for a target reset
8527 * and target/lun for abort task set.
8528 * Account for TM requests, which use the last SMID.
8529 */
8530 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8531 if ((cmd = slots->m_slot[slot]) == NULL)
8532 continue;
8533 reason = CMD_RESET;
8534 stat = STAT_DEV_RESET;
8535 switch (tasktype) {
8536 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8537 if (Tgt(cmd) == target) {
8538 if (cmd->cmd_tgt_addr->m_timeout < 0) {
8539 /*
8540 * When timeout requested, propagate
8541 * proper reason and statistics to
8542 * target drivers.
8543 */
8544 reason = CMD_TIMEOUT;
8545 stat |= STAT_TIMEOUT;
8546 }
8547
8548 NDBG25(("mptsas_flush_target discovered non-"
8549 "NULL cmd in slot %d, tasktype 0x%x", slot,
8550 tasktype));
8551 mptsas_dump_cmd(mpt, cmd);
8552 mptsas_remove_cmd(mpt, cmd);
8553 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8554 mptsas_doneq_add(mpt, cmd);
8555 }
8556 break;
8557 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8558 reason = CMD_ABORTED;
8559 stat = STAT_ABORTED;
8560 /*FALLTHROUGH*/
8561 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8562 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8563
8564 NDBG25(("mptsas_flush_target discovered non-"
8565 "NULL cmd in slot %d, tasktype 0x%x", slot,
8566 tasktype));
8567 mptsas_dump_cmd(mpt, cmd);
8568 mptsas_remove_cmd(mpt, cmd);
8569 mptsas_set_pkt_reason(mpt, cmd, reason,
8570 stat);
8571 mptsas_doneq_add(mpt, cmd);
8572 }
8573 break;
8574 default:
8575 break;
8576 }
8577 }
8578
8579 /*
8580 * Flush the waitq and tx_waitq of this target's cmds
8581 */
8582 cmd = mpt->m_waitq;
8583
8584 reason = CMD_RESET;
8585 stat = STAT_DEV_RESET;
8586
8587 switch (tasktype) {
8588 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8589 while (cmd != NULL) {
8590 next_cmd = cmd->cmd_linkp;
8591 if (Tgt(cmd) == target) {
8592 mptsas_waitq_delete(mpt, cmd);
8593 mptsas_set_pkt_reason(mpt, cmd,
8594 reason, stat);
8595 mptsas_doneq_add(mpt, cmd);
8596 }
8597 cmd = next_cmd;
8598 }
8599 mutex_enter(&mpt->m_tx_waitq_mutex);
8600 cmd = mpt->m_tx_waitq;
8601 while (cmd != NULL) {
8602 next_cmd = cmd->cmd_linkp;
8603 if (Tgt(cmd) == target) {
8604 mptsas_tx_waitq_delete(mpt, cmd);
8605 mutex_exit(&mpt->m_tx_waitq_mutex);
8606 mptsas_set_pkt_reason(mpt, cmd,
8607 reason, stat);
8608 mptsas_doneq_add(mpt, cmd);
8609 mutex_enter(&mpt->m_tx_waitq_mutex);
8610 }
8611 cmd = next_cmd;
8612 }
8613 mutex_exit(&mpt->m_tx_waitq_mutex);
8614 break;
8615 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8616 reason = CMD_ABORTED;
8617 stat = STAT_ABORTED;
8618 /*FALLTHROUGH*/
8619 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8620 while (cmd != NULL) {
8621 next_cmd = cmd->cmd_linkp;
8622 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8623 mptsas_waitq_delete(mpt, cmd);
8624 mptsas_set_pkt_reason(mpt, cmd,
8625 reason, stat);
8626 mptsas_doneq_add(mpt, cmd);
8627 }
8628 cmd = next_cmd;
8629 }
8630 mutex_enter(&mpt->m_tx_waitq_mutex);
8631 cmd = mpt->m_tx_waitq;
8632 while (cmd != NULL) {
8633 next_cmd = cmd->cmd_linkp;
8634 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8635 mptsas_tx_waitq_delete(mpt, cmd);
8636 mutex_exit(&mpt->m_tx_waitq_mutex);
8637 mptsas_set_pkt_reason(mpt, cmd,
8638 reason, stat);
8639 mptsas_doneq_add(mpt, cmd);
8640 mutex_enter(&mpt->m_tx_waitq_mutex);
8641 }
8642 cmd = next_cmd;
8643 }
8644 mutex_exit(&mpt->m_tx_waitq_mutex);
8645 break;
8646 default:
8647 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8648 tasktype);
8649 break;
8650 }
8651 }
8652
8653 /*
8654 * Clean up hba state, abort all outstanding command and commands in waitq
8655 * reset timeout of all targets.
8656 */
8657 static void
8658 mptsas_flush_hba(mptsas_t *mpt)
8659 {
8660 mptsas_slots_t *slots = mpt->m_active;
8661 mptsas_cmd_t *cmd;
8662 int slot;
8663
8664 NDBG25(("mptsas_flush_hba"));
8665
8666 /*
8667 * The I/O Controller should have already sent back
8668 * all commands via the scsi I/O reply frame. Make
8669 * sure all commands have been flushed.
8670 * Account for TM request, which use the last SMID.
8671 */
8672 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8673 if ((cmd = slots->m_slot[slot]) == NULL)
8674 continue;
8675
8676 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8677 /*
8678 * Need to make sure to tell everyone that might be
8679 * waiting on this command that it's going to fail. If
8680 * we get here, this command will never timeout because
8681 * the active command table is going to be re-allocated,
8682 * so there will be nothing to check against a time out.
8683 * Instead, mark the command as failed due to reset.
8684 */
8685 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8686 STAT_BUS_RESET);
8687 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8688 (cmd->cmd_flags & CFLAG_CONFIG) ||
8689 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8690 cmd->cmd_flags |= CFLAG_FINISHED;
8691 cv_broadcast(&mpt->m_passthru_cv);
8692 cv_broadcast(&mpt->m_config_cv);
8693 cv_broadcast(&mpt->m_fw_diag_cv);
8694 }
8695 continue;
8696 }
8697
8698 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8699 slot));
8700 mptsas_dump_cmd(mpt, cmd);
8701
8702 mptsas_remove_cmd(mpt, cmd);
8703 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8704 mptsas_doneq_add(mpt, cmd);
8705 }
8706
8707 /*
8708 * Flush the waitq.
8709 */
8710 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8711 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8712 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8713 (cmd->cmd_flags & CFLAG_CONFIG) ||
8714 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8715 cmd->cmd_flags |= CFLAG_FINISHED;
8716 cv_broadcast(&mpt->m_passthru_cv);
8717 cv_broadcast(&mpt->m_config_cv);
8718 cv_broadcast(&mpt->m_fw_diag_cv);
8719 } else {
8720 mptsas_doneq_add(mpt, cmd);
8721 }
8722 }
8723
8724 /*
8725 * Flush the tx_waitq
8726 */
8727 mutex_enter(&mpt->m_tx_waitq_mutex);
8728 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8729 mutex_exit(&mpt->m_tx_waitq_mutex);
8730 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8731 mptsas_doneq_add(mpt, cmd);
8732 mutex_enter(&mpt->m_tx_waitq_mutex);
8733 }
8734 mutex_exit(&mpt->m_tx_waitq_mutex);
8735
8736 /*
8737 * Drain the taskqs prior to reallocating resources.
8738 */
8739 mutex_exit(&mpt->m_mutex);
8740 ddi_taskq_wait(mpt->m_event_taskq);
8741 ddi_taskq_wait(mpt->m_dr_taskq);
8742 mutex_enter(&mpt->m_mutex);
8743 }
8744
8745 /*
8746 * set pkt_reason and OR in pkt_statistics flag
8747 */
8748 static void
8749 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8750 uint_t stat)
8751 {
8752 #ifndef __lock_lint
8753 _NOTE(ARGUNUSED(mpt))
8754 #endif
8755
8756 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8757 (void *)cmd, reason, stat));
8758
8759 if (cmd) {
8760 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8761 cmd->cmd_pkt->pkt_reason = reason;
8762 }
8763 cmd->cmd_pkt->pkt_statistics |= stat;
8764 }
8765 }
8766
8767 static void
8768 mptsas_start_watch_reset_delay()
8769 {
8770 NDBG22(("mptsas_start_watch_reset_delay"));
8771
8772 mutex_enter(&mptsas_global_mutex);
8773 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8774 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8775 drv_usectohz((clock_t)
8776 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8777 ASSERT(mptsas_reset_watch != NULL);
8778 }
8779 mutex_exit(&mptsas_global_mutex);
8780 }
8781
8782 static void
8783 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8784 {
8785 mptsas_target_t *ptgt = NULL;
8786
8787 NDBG22(("mptsas_setup_bus_reset_delay"));
8788 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8789 MPTSAS_HASH_FIRST);
8790 while (ptgt != NULL) {
8791 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8792 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8793
8794 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8795 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8796 }
8797
8798 mptsas_start_watch_reset_delay();
8799 }
8800
8801 /*
8802 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8803 * mpt instance for active reset delays
8804 */
8805 static void
8806 mptsas_watch_reset_delay(void *arg)
8807 {
8808 #ifndef __lock_lint
8809 _NOTE(ARGUNUSED(arg))
8810 #endif
8811
8812 mptsas_t *mpt;
8813 int not_done = 0;
8814
8815 NDBG22(("mptsas_watch_reset_delay"));
8816
8817 mutex_enter(&mptsas_global_mutex);
8818 mptsas_reset_watch = 0;
8819 mutex_exit(&mptsas_global_mutex);
8820 rw_enter(&mptsas_global_rwlock, RW_READER);
8821 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8822 if (mpt->m_tran == 0) {
8823 continue;
8824 }
8825 mutex_enter(&mpt->m_mutex);
8826 not_done += mptsas_watch_reset_delay_subr(mpt);
8827 mutex_exit(&mpt->m_mutex);
8828 }
8829 rw_exit(&mptsas_global_rwlock);
8830
8831 if (not_done) {
8832 mptsas_start_watch_reset_delay();
8833 }
8834 }
8835
8836 static int
8837 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8838 {
8839 int done = 0;
8840 int restart = 0;
8841 mptsas_target_t *ptgt = NULL;
8842
8843 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8844
8845 ASSERT(mutex_owned(&mpt->m_mutex));
8846
8847 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8848 MPTSAS_HASH_FIRST);
8849 while (ptgt != NULL) {
8850 if (ptgt->m_reset_delay != 0) {
8851 ptgt->m_reset_delay -=
8852 MPTSAS_WATCH_RESET_DELAY_TICK;
8853 if (ptgt->m_reset_delay <= 0) {
8854 ptgt->m_reset_delay = 0;
8855 mptsas_set_throttle(mpt, ptgt,
8856 MAX_THROTTLE);
8857 restart++;
8858 } else {
8859 done = -1;
8860 }
8861 }
8862
8863 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8864 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8865 }
8866
8867 if (restart > 0) {
8868 mptsas_restart_hba(mpt);
8869 }
8870 return (done);
8871 }
8872
8873 #ifdef MPTSAS_TEST
8874 static void
8875 mptsas_test_reset(mptsas_t *mpt, int target)
8876 {
8877 mptsas_target_t *ptgt = NULL;
8878
8879 if (mptsas_rtest == target) {
8880 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8881 mptsas_rtest = -1;
8882 }
8883 if (mptsas_rtest == -1) {
8884 NDBG22(("mptsas_test_reset success"));
8885 }
8886 }
8887 }
8888 #endif
8889
8890 /*
8891 * abort handling:
8892 *
8893 * Notes:
8894 * - if pkt is not NULL, abort just that command
8895 * - if pkt is NULL, abort all outstanding commands for target
8896 */
8897 static int
8898 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8899 {
8900 mptsas_t *mpt = ADDR2MPT(ap);
8901 int rval;
8902 mptsas_tgt_private_t *tgt_private;
8903 int target, lun;
8904
8905 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8906 tran_tgt_private;
8907 ASSERT(tgt_private != NULL);
8908 target = tgt_private->t_private->m_devhdl;
8909 lun = tgt_private->t_lun;
8910
8911 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8912
8913 mutex_enter(&mpt->m_mutex);
8914 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8915 mutex_exit(&mpt->m_mutex);
8916 return (rval);
8917 }
8918
8919 static int
8920 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8921 {
8922 mptsas_cmd_t *sp = NULL;
8923 mptsas_slots_t *slots = mpt->m_active;
8924 int rval = FALSE;
8925
8926 ASSERT(mutex_owned(&mpt->m_mutex));
8927
8928 /*
8929 * Abort the command pkt on the target/lun in ap. If pkt is
8930 * NULL, abort all outstanding commands on that target/lun.
8931 * If you can abort them, return 1, else return 0.
8932 * Each packet that's aborted should be sent back to the target
8933 * driver through the callback routine, with pkt_reason set to
8934 * CMD_ABORTED.
8935 *
8936 * abort cmd pkt on HBA hardware; clean out of outstanding
8937 * command lists, etc.
8938 */
8939 if (pkt != NULL) {
8940 /* abort the specified packet */
8941 sp = PKT2CMD(pkt);
8942
8943 if (sp->cmd_queued) {
8944 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8945 (void *)sp));
8946 mptsas_waitq_delete(mpt, sp);
8947 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8948 STAT_ABORTED);
8949 mptsas_doneq_add(mpt, sp);
8950 rval = TRUE;
8951 goto done;
8952 }
8953
8954 /*
8955 * Have mpt firmware abort this command
8956 */
8957
8958 if (slots->m_slot[sp->cmd_slot] != NULL) {
8959 rval = mptsas_ioc_task_management(mpt,
8960 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8961 lun, NULL, 0, 0);
8962
8963 /*
8964 * The transport layer expects only TRUE and FALSE.
8965 * Therefore, if mptsas_ioc_task_management returns
8966 * FAILED we will return FALSE.
8967 */
8968 if (rval == FAILED)
8969 rval = FALSE;
8970 goto done;
8971 }
8972 }
8973
8974 /*
8975 * If pkt is NULL then abort task set
8976 */
8977 rval = mptsas_ioc_task_management(mpt,
8978 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8979
8980 /*
8981 * The transport layer expects only TRUE and FALSE.
8982 * Therefore, if mptsas_ioc_task_management returns
8983 * FAILED we will return FALSE.
8984 */
8985 if (rval == FAILED)
8986 rval = FALSE;
8987
8988 #ifdef MPTSAS_TEST
8989 if (rval && mptsas_test_stop) {
8990 debug_enter("mptsas_do_scsi_abort");
8991 }
8992 #endif
8993
8994 done:
8995 mptsas_doneq_empty(mpt);
8996 return (rval);
8997 }
8998
8999 /*
9000 * capability handling:
9001 * (*tran_getcap). Get the capability named, and return its value.
9002 */
9003 static int
9004 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9005 {
9006 mptsas_t *mpt = ADDR2MPT(ap);
9007 int ckey;
9008 int rval = FALSE;
9009
9010 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9011 ap->a_target, cap, tgtonly));
9012
9013 mutex_enter(&mpt->m_mutex);
9014
9015 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9016 mutex_exit(&mpt->m_mutex);
9017 return (UNDEFINED);
9018 }
9019
9020 switch (ckey) {
9021 case SCSI_CAP_DMA_MAX:
9022 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9023 break;
9024 case SCSI_CAP_ARQ:
9025 rval = TRUE;
9026 break;
9027 case SCSI_CAP_MSG_OUT:
9028 case SCSI_CAP_PARITY:
9029 case SCSI_CAP_UNTAGGED_QING:
9030 rval = TRUE;
9031 break;
9032 case SCSI_CAP_TAGGED_QING:
9033 rval = TRUE;
9034 break;
9035 case SCSI_CAP_RESET_NOTIFICATION:
9036 rval = TRUE;
9037 break;
9038 case SCSI_CAP_LINKED_CMDS:
9039 rval = FALSE;
9040 break;
9041 case SCSI_CAP_QFULL_RETRIES:
9042 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9043 tran_tgt_private))->t_private->m_qfull_retries;
9044 break;
9045 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9046 rval = drv_hztousec(((mptsas_tgt_private_t *)
9047 (ap->a_hba_tran->tran_tgt_private))->
9048 t_private->m_qfull_retry_interval) / 1000;
9049 break;
9050 case SCSI_CAP_CDB_LEN:
9051 rval = CDB_GROUP4;
9052 break;
9053 case SCSI_CAP_INTERCONNECT_TYPE:
9054 rval = INTERCONNECT_SAS;
9055 break;
9056 case SCSI_CAP_TRAN_LAYER_RETRIES:
9057 if (mpt->m_ioc_capabilities &
9058 MPI2_IOCFACTS_CAPABILITY_TLR)
9059 rval = TRUE;
9060 else
9061 rval = FALSE;
9062 break;
9063 default:
9064 rval = UNDEFINED;
9065 break;
9066 }
9067
9068 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9069
9070 mutex_exit(&mpt->m_mutex);
9071 return (rval);
9072 }
9073
9074 /*
9075 * (*tran_setcap). Set the capability named to the value given.
9076 */
9077 static int
9078 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9079 {
9080 mptsas_t *mpt = ADDR2MPT(ap);
9081 int ckey;
9082 int rval = FALSE;
9083
9084 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9085 ap->a_target, cap, value, tgtonly));
9086
9087 if (!tgtonly) {
9088 return (rval);
9089 }
9090
9091 mutex_enter(&mpt->m_mutex);
9092
9093 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9094 mutex_exit(&mpt->m_mutex);
9095 return (UNDEFINED);
9096 }
9097
9098 switch (ckey) {
9099 case SCSI_CAP_DMA_MAX:
9100 case SCSI_CAP_MSG_OUT:
9101 case SCSI_CAP_PARITY:
9102 case SCSI_CAP_INITIATOR_ID:
9103 case SCSI_CAP_LINKED_CMDS:
9104 case SCSI_CAP_UNTAGGED_QING:
9105 case SCSI_CAP_RESET_NOTIFICATION:
9106 /*
9107 * None of these are settable via
9108 * the capability interface.
9109 */
9110 break;
9111 case SCSI_CAP_ARQ:
9112 /*
9113 * We cannot turn off arq so return false if asked to
9114 */
9115 if (value) {
9116 rval = TRUE;
9117 } else {
9118 rval = FALSE;
9119 }
9120 break;
9121 case SCSI_CAP_TAGGED_QING:
9122 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9123 (ap->a_hba_tran->tran_tgt_private))->t_private,
9124 MAX_THROTTLE);
9125 rval = TRUE;
9126 break;
9127 case SCSI_CAP_QFULL_RETRIES:
9128 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9129 t_private->m_qfull_retries = (uchar_t)value;
9130 rval = TRUE;
9131 break;
9132 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9133 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9134 t_private->m_qfull_retry_interval =
9135 drv_usectohz(value * 1000);
9136 rval = TRUE;
9137 break;
9138 default:
9139 rval = UNDEFINED;
9140 break;
9141 }
9142 mutex_exit(&mpt->m_mutex);
9143 return (rval);
9144 }
9145
9146 /*
9147 * Utility routine for mptsas_ifsetcap/ifgetcap
9148 */
9149 /*ARGSUSED*/
9150 static int
9151 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9152 {
9153 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9154
9155 if (!cap)
9156 return (FALSE);
9157
9158 *cidxp = scsi_hba_lookup_capstr(cap);
9159 return (TRUE);
9160 }
9161
9162 static int
9163 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9164 {
9165 mptsas_slots_t *old_active = mpt->m_active;
9166 mptsas_slots_t *new_active;
9167 size_t size;
9168 int rval = -1, i;
9169
9170 /*
9171 * if there are active commands, then we cannot
9172 * change size of active slots array.
9173 */
9174 ASSERT(mpt->m_ncmds == 0);
9175
9176 size = MPTSAS_SLOTS_SIZE(mpt);
9177 new_active = kmem_zalloc(size, flag);
9178 if (new_active == NULL) {
9179 NDBG1(("new active alloc failed"));
9180 return (rval);
9181 }
9182 /*
9183 * Since SMID 0 is reserved and the TM slot is reserved, the
9184 * number of slots that can be used at any one time is
9185 * m_max_requests - 2.
9186 */
9187 new_active->m_n_slots = (mpt->m_max_requests - 2);
9188 new_active->m_size = size;
9189 new_active->m_tags = 1;
9190 if (old_active) {
9191 new_active->m_tgttbl = old_active->m_tgttbl;
9192 new_active->m_smptbl = old_active->m_smptbl;
9193 new_active->m_num_raid_configs =
9194 old_active->m_num_raid_configs;
9195 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9196 new_active->m_raidconfig[i] =
9197 old_active->m_raidconfig[i];
9198 }
9199 mptsas_free_active_slots(mpt);
9200 }
9201 mpt->m_active = new_active;
9202 rval = 0;
9203
9204 return (rval);
9205 }
9206
9207 static void
9208 mptsas_free_active_slots(mptsas_t *mpt)
9209 {
9210 mptsas_slots_t *active = mpt->m_active;
9211 size_t size;
9212
9213 if (active == NULL)
9214 return;
9215 size = active->m_size;
9216 kmem_free(active, size);
9217 mpt->m_active = NULL;
9218 }
9219
9220 /*
9221 * Error logging, printing, and debug print routines.
9222 */
9223 static char *mptsas_label = "mpt_sas";
9224
9225 /*PRINTFLIKE3*/
9226 void
9227 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9228 {
9229 dev_info_t *dev;
9230 va_list ap;
9231
9232 if (mpt) {
9233 dev = mpt->m_dip;
9234 } else {
9235 dev = 0;
9236 }
9237
9238 mutex_enter(&mptsas_log_mutex);
9239
9240 va_start(ap, fmt);
9241 (void) vsprintf(mptsas_log_buf, fmt, ap);
9242 va_end(ap);
9243
9244 if (level == CE_CONT) {
9245 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9246 } else {
9247 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9248 }
9249
9250 mutex_exit(&mptsas_log_mutex);
9251 }
9252
9253 #ifdef MPTSAS_DEBUG
9254 /*PRINTFLIKE1*/
9255 void
9256 mptsas_printf(char *fmt, ...)
9257 {
9258 dev_info_t *dev = 0;
9259 va_list ap;
9260
9261 mutex_enter(&mptsas_log_mutex);
9262
9263 va_start(ap, fmt);
9264 (void) vsprintf(mptsas_log_buf, fmt, ap);
9265 va_end(ap);
9266
9267 #ifdef PROM_PRINTF
9268 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9269 #else
9270 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9271 #endif
9272 mutex_exit(&mptsas_log_mutex);
9273 }
9274 #endif
9275
9276 /*
9277 * timeout handling
9278 */
9279 static void
9280 mptsas_watch(void *arg)
9281 {
9282 #ifndef __lock_lint
9283 _NOTE(ARGUNUSED(arg))
9284 #endif
9285
9286 mptsas_t *mpt;
9287 uint32_t doorbell;
9288
9289 NDBG30(("mptsas_watch"));
9290
9291 rw_enter(&mptsas_global_rwlock, RW_READER);
9292 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9293
9294 mutex_enter(&mpt->m_mutex);
9295
9296 /* Skip device if not powered on */
9297 if (mpt->m_options & MPTSAS_OPT_PM) {
9298 if (mpt->m_power_level == PM_LEVEL_D0) {
9299 (void) pm_busy_component(mpt->m_dip, 0);
9300 mpt->m_busy = 1;
9301 } else {
9302 mutex_exit(&mpt->m_mutex);
9303 continue;
9304 }
9305 }
9306
9307 /*
9308 * Check if controller is in a FAULT state. If so, reset it.
9309 */
9310 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9311 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9312 doorbell &= MPI2_DOORBELL_DATA_MASK;
9313 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9314 "code: %04x", doorbell);
9315 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9316 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9317 mptsas_log(mpt, CE_WARN, "Reset failed"
9318 "after fault was detected");
9319 }
9320 }
9321
9322 /*
9323 * For now, always call mptsas_watchsubr.
9324 */
9325 mptsas_watchsubr(mpt);
9326
9327 if (mpt->m_options & MPTSAS_OPT_PM) {
9328 mpt->m_busy = 0;
9329 (void) pm_idle_component(mpt->m_dip, 0);
9330 }
9331
9332 mutex_exit(&mpt->m_mutex);
9333 }
9334 rw_exit(&mptsas_global_rwlock);
9335
9336 mutex_enter(&mptsas_global_mutex);
9337 if (mptsas_timeouts_enabled)
9338 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9339 mutex_exit(&mptsas_global_mutex);
9340 }
9341
9342 static void
9343 mptsas_watchsubr(mptsas_t *mpt)
9344 {
9345 int i;
9346 mptsas_cmd_t *cmd;
9347 mptsas_target_t *ptgt = NULL;
9348
9349 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9350
9351 #ifdef MPTSAS_TEST
9352 if (mptsas_enable_untagged) {
9353 mptsas_test_untagged++;
9354 }
9355 #endif
9356
9357 /*
9358 * Check for commands stuck in active slot
9359 * Account for TM requests, which use the last SMID.
9360 */
9361 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9362 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9363 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9364 cmd->cmd_active_timeout -=
9365 mptsas_scsi_watchdog_tick;
9366 if (cmd->cmd_active_timeout <= 0) {
9367 /*
9368 * There seems to be a command stuck
9369 * in the active slot. Drain throttle.
9370 */
9371 mptsas_set_throttle(mpt,
9372 cmd->cmd_tgt_addr,
9373 DRAIN_THROTTLE);
9374 }
9375 }
9376 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9377 (cmd->cmd_flags & CFLAG_CONFIG) ||
9378 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9379 cmd->cmd_active_timeout -=
9380 mptsas_scsi_watchdog_tick;
9381 if (cmd->cmd_active_timeout <= 0) {
9382 /*
9383 * passthrough command timeout
9384 */
9385 cmd->cmd_flags |= (CFLAG_FINISHED |
9386 CFLAG_TIMEOUT);
9387 cv_broadcast(&mpt->m_passthru_cv);
9388 cv_broadcast(&mpt->m_config_cv);
9389 cv_broadcast(&mpt->m_fw_diag_cv);
9390 }
9391 }
9392 }
9393 }
9394
9395 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9396 MPTSAS_HASH_FIRST);
9397 while (ptgt != NULL) {
9398 /*
9399 * If we were draining due to a qfull condition,
9400 * go back to full throttle.
9401 */
9402 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9403 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9404 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9405 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9406 mptsas_restart_hba(mpt);
9407 }
9408
9409 if ((ptgt->m_t_ncmds > 0) &&
9410 (ptgt->m_timebase)) {
9411
9412 if (ptgt->m_timebase <=
9413 mptsas_scsi_watchdog_tick) {
9414 ptgt->m_timebase +=
9415 mptsas_scsi_watchdog_tick;
9416 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9417 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9418 continue;
9419 }
9420
9421 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9422
9423 if (ptgt->m_timeout_count > 0) {
9424 ptgt->m_timeout_interval +=
9425 mptsas_scsi_watchdog_tick;
9426 }
9427 if (ptgt->m_timeout_interval > mptsas_timeout_interval) {
9428 ptgt->m_timeout_interval = 0;
9429 ptgt->m_timeout_count = 0;
9430 }
9431
9432 if (ptgt->m_timeout < 0) {
9433 ptgt->m_timeout_count++;
9434 if (ptgt->m_timeout_count >
9435 mptsas_timeout_threshold) {
9436 ptgt->m_timeout_count = 0;
9437 mptsas_kill_target(mpt, ptgt);
9438 } else {
9439 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9440 }
9441 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9442 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9443 continue;
9444 }
9445
9446 if ((ptgt->m_timeout) <=
9447 mptsas_scsi_watchdog_tick) {
9448 NDBG23(("pending timeout"));
9449 mptsas_set_throttle(mpt, ptgt,
9450 DRAIN_THROTTLE);
9451 }
9452 }
9453
9454 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9455 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9456 }
9457 }
9458
9459 /*
9460 * timeout recovery
9461 */
9462 static void
9463 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9464 {
9465
9466 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9467 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9468 "Target %d", devhdl);
9469
9470 /*
9471 * If the current target is not the target passed in,
9472 * try to reset that target.
9473 */
9474 NDBG29(("mptsas_cmd_timeout: device reset"));
9475 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9476 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9477 "recovery failed!", devhdl);
9478 }
9479 }
9480
9481 /*
9482 * target causing too many timeouts
9483 */
9484 static void
9485 mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt)
9486 {
9487 mptsas_topo_change_list_t *topo_node = NULL;
9488
9489 NDBG29(("mptsas_tgt_kill: target=%d", ptgt->m_devhdl));
9490 mptsas_log(mpt, CE_WARN, "timeout threshold exceeded for "
9491 "Target %d", ptgt->m_devhdl);
9492
9493 topo_node = kmem_zalloc(sizeof (mptsas_topo_change_list_t), KM_SLEEP);
9494 topo_node->mpt = mpt;
9495 topo_node->un.phymask = ptgt->m_phymask;
9496 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_TARGET;
9497 topo_node->devhdl = ptgt->m_devhdl;
9498 if (ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
9499 topo_node->flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
9500 else
9501 topo_node->flags = MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
9502 topo_node->object = NULL;
9503
9504 /*
9505 * Launch DR taskq to fake topology change
9506 */
9507 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
9508 mptsas_handle_dr, (void *)topo_node,
9509 DDI_NOSLEEP)) != DDI_SUCCESS) {
9510 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
9511 "for fake offline event failed. \n");
9512 }
9513 }
9514
9515 /*
9516 * Device / Hotplug control
9517 */
9518 static int
9519 mptsas_scsi_quiesce(dev_info_t *dip)
9520 {
9521 mptsas_t *mpt;
9522 scsi_hba_tran_t *tran;
9523
9524 tran = ddi_get_driver_private(dip);
9525 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9526 return (-1);
9527
9528 return (mptsas_quiesce_bus(mpt));
9529 }
9530
9531 static int
9532 mptsas_scsi_unquiesce(dev_info_t *dip)
9533 {
9534 mptsas_t *mpt;
9535 scsi_hba_tran_t *tran;
9536
9537 tran = ddi_get_driver_private(dip);
9538 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9539 return (-1);
9540
9541 return (mptsas_unquiesce_bus(mpt));
9542 }
9543
9544 static int
9545 mptsas_quiesce_bus(mptsas_t *mpt)
9546 {
9547 mptsas_target_t *ptgt = NULL;
9548
9549 NDBG28(("mptsas_quiesce_bus"));
9550 mutex_enter(&mpt->m_mutex);
9551
9552 /* Set all the throttles to zero */
9553 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9554 MPTSAS_HASH_FIRST);
9555 while (ptgt != NULL) {
9556 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9557
9558 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9559 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9560 }
9561
9562 /* If there are any outstanding commands in the queue */
9563 if (mpt->m_ncmds) {
9564 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9565 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9566 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9567 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9568 /*
9569 * Quiesce has been interrupted
9570 */
9571 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9572 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9573 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9574 while (ptgt != NULL) {
9575 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9576
9577 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9578 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9579 }
9580 mptsas_restart_hba(mpt);
9581 if (mpt->m_quiesce_timeid != 0) {
9582 timeout_id_t tid = mpt->m_quiesce_timeid;
9583 mpt->m_quiesce_timeid = 0;
9584 mutex_exit(&mpt->m_mutex);
9585 (void) untimeout(tid);
9586 return (-1);
9587 }
9588 mutex_exit(&mpt->m_mutex);
9589 return (-1);
9590 } else {
9591 /* Bus has been quiesced */
9592 ASSERT(mpt->m_quiesce_timeid == 0);
9593 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9594 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9595 mutex_exit(&mpt->m_mutex);
9596 return (0);
9597 }
9598 }
9599 /* Bus was not busy - QUIESCED */
9600 mutex_exit(&mpt->m_mutex);
9601
9602 return (0);
9603 }
9604
9605 static int
9606 mptsas_unquiesce_bus(mptsas_t *mpt)
9607 {
9608 mptsas_target_t *ptgt = NULL;
9609
9610 NDBG28(("mptsas_unquiesce_bus"));
9611 mutex_enter(&mpt->m_mutex);
9612 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9613 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9614 MPTSAS_HASH_FIRST);
9615 while (ptgt != NULL) {
9616 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9617
9618 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9619 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9620 }
9621 mptsas_restart_hba(mpt);
9622 mutex_exit(&mpt->m_mutex);
9623 return (0);
9624 }
9625
9626 static void
9627 mptsas_ncmds_checkdrain(void *arg)
9628 {
9629 mptsas_t *mpt = arg;
9630 mptsas_target_t *ptgt = NULL;
9631
9632 mutex_enter(&mpt->m_mutex);
9633 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9634 mpt->m_quiesce_timeid = 0;
9635 if (mpt->m_ncmds == 0) {
9636 /* Command queue has been drained */
9637 cv_signal(&mpt->m_cv);
9638 } else {
9639 /*
9640 * The throttle may have been reset because
9641 * of a SCSI bus reset
9642 */
9643 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9644 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9645 while (ptgt != NULL) {
9646 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9647
9648 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9649 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9650 }
9651
9652 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9653 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9654 drv_usectohz(1000000)));
9655 }
9656 }
9657 mutex_exit(&mpt->m_mutex);
9658 }
9659
9660 /*ARGSUSED*/
9661 static void
9662 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9663 {
9664 int i;
9665 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9666 char buf[128];
9667
9668 buf[0] = '\0';
9669 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9670 Tgt(cmd), Lun(cmd)));
9671 (void) sprintf(&buf[0], "\tcdb=[");
9672 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9673 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9674 }
9675 (void) sprintf(&buf[strlen(buf)], " ]");
9676 NDBG25(("?%s\n", buf));
9677 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9678 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9679 cmd->cmd_pkt->pkt_state));
9680 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9681 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9682 }
9683
9684 static void
9685 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9686 {
9687 caddr_t memp;
9688 pMPI2RequestHeader_t request_hdrp;
9689 struct scsi_pkt *pkt = cmd->cmd_pkt;
9690 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9691 uint32_t request_size, data_size, dataout_size;
9692 uint32_t direction;
9693 ddi_dma_cookie_t data_cookie;
9694 ddi_dma_cookie_t dataout_cookie;
9695 uint32_t request_desc_low, request_desc_high = 0;
9696 uint32_t i, sense_bufp;
9697 uint8_t desc_type;
9698 uint8_t *request, function;
9699 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9700 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9701
9702 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9703
9704 request = pt->request;
9705 direction = pt->direction;
9706 request_size = pt->request_size;
9707 data_size = pt->data_size;
9708 dataout_size = pt->dataout_size;
9709 data_cookie = pt->data_cookie;
9710 dataout_cookie = pt->dataout_cookie;
9711
9712 /*
9713 * Store the passthrough message in memory location
9714 * corresponding to our slot number
9715 */
9716 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9717 request_hdrp = (pMPI2RequestHeader_t)memp;
9718 bzero(memp, mpt->m_req_frame_size);
9719
9720 for (i = 0; i < request_size; i++) {
9721 bcopy(request + i, memp + i, 1);
9722 }
9723
9724 if (data_size || dataout_size) {
9725 pMpi2SGESimple64_t sgep;
9726 uint32_t sge_flags;
9727
9728 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9729 request_size);
9730 if (dataout_size) {
9731
9732 sge_flags = dataout_size |
9733 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9734 MPI2_SGE_FLAGS_END_OF_BUFFER |
9735 MPI2_SGE_FLAGS_HOST_TO_IOC |
9736 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9737 MPI2_SGE_FLAGS_SHIFT);
9738 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9739 ddi_put32(acc_hdl, &sgep->Address.Low,
9740 (uint32_t)(dataout_cookie.dmac_laddress &
9741 0xffffffffull));
9742 ddi_put32(acc_hdl, &sgep->Address.High,
9743 (uint32_t)(dataout_cookie.dmac_laddress
9744 >> 32));
9745 sgep++;
9746 }
9747 sge_flags = data_size;
9748 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9749 MPI2_SGE_FLAGS_LAST_ELEMENT |
9750 MPI2_SGE_FLAGS_END_OF_BUFFER |
9751 MPI2_SGE_FLAGS_END_OF_LIST |
9752 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9753 MPI2_SGE_FLAGS_SHIFT);
9754 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9755 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9756 MPI2_SGE_FLAGS_SHIFT);
9757 } else {
9758 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9759 MPI2_SGE_FLAGS_SHIFT);
9760 }
9761 ddi_put32(acc_hdl, &sgep->FlagsLength,
9762 sge_flags);
9763 ddi_put32(acc_hdl, &sgep->Address.Low,
9764 (uint32_t)(data_cookie.dmac_laddress &
9765 0xffffffffull));
9766 ddi_put32(acc_hdl, &sgep->Address.High,
9767 (uint32_t)(data_cookie.dmac_laddress >> 32));
9768 }
9769
9770 function = request_hdrp->Function;
9771 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9772 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9773 pMpi2SCSIIORequest_t scsi_io_req;
9774
9775 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9776 /*
9777 * Put SGE for data and data_out buffer at the end of
9778 * scsi_io_request message header.(64 bytes in total)
9779 * Following above SGEs, the residual space will be
9780 * used by sense data.
9781 */
9782 ddi_put8(acc_hdl,
9783 &scsi_io_req->SenseBufferLength,
9784 (uint8_t)(request_size - 64));
9785
9786 sense_bufp = mpt->m_req_frame_dma_addr +
9787 (mpt->m_req_frame_size * cmd->cmd_slot);
9788 sense_bufp += 64;
9789 ddi_put32(acc_hdl,
9790 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9791
9792 /*
9793 * Set SGLOffset0 value
9794 */
9795 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9796 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9797
9798 /*
9799 * Setup descriptor info. RAID passthrough must use the
9800 * default request descriptor which is already set, so if this
9801 * is a SCSI IO request, change the descriptor to SCSI IO.
9802 */
9803 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9804 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9805 request_desc_high = (ddi_get16(acc_hdl,
9806 &scsi_io_req->DevHandle) << 16);
9807 }
9808 }
9809
9810 /*
9811 * We must wait till the message has been completed before
9812 * beginning the next message so we wait for this one to
9813 * finish.
9814 */
9815 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9816 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9817 cmd->cmd_rfm = NULL;
9818 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9819 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9820 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9821 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9822 }
9823 }
9824
9825
9826
9827 static int
9828 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9829 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9830 uint32_t data_size, uint32_t direction, uint8_t *dataout,
9831 uint32_t dataout_size, short timeout, int mode)
9832 {
9833 mptsas_pt_request_t pt;
9834 mptsas_dma_alloc_state_t data_dma_state;
9835 mptsas_dma_alloc_state_t dataout_dma_state;
9836 caddr_t memp;
9837 mptsas_cmd_t *cmd = NULL;
9838 struct scsi_pkt *pkt;
9839 uint32_t reply_len = 0, sense_len = 0;
9840 pMPI2RequestHeader_t request_hdrp;
9841 pMPI2RequestHeader_t request_msg;
9842 pMPI2DefaultReply_t reply_msg;
9843 Mpi2SCSIIOReply_t rep_msg;
9844 int i, status = 0, pt_flags = 0, rv = 0;
9845 int rvalue;
9846 uint8_t function;
9847
9848 ASSERT(mutex_owned(&mpt->m_mutex));
9849
9850 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9851 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9852 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9853
9854 mutex_exit(&mpt->m_mutex);
9855 /*
9856 * copy in the request buffer since it could be used by
9857 * another thread when the pt request into waitq
9858 */
9859 if (ddi_copyin(request, request_msg, request_size, mode)) {
9860 mutex_enter(&mpt->m_mutex);
9861 status = EFAULT;
9862 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9863 goto out;
9864 }
9865 mutex_enter(&mpt->m_mutex);
9866
9867 function = request_msg->Function;
9868 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9869 pMpi2SCSITaskManagementRequest_t task;
9870 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9871 mptsas_setup_bus_reset_delay(mpt);
9872 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9873 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9874 mode);
9875
9876 if (rv != TRUE) {
9877 status = EIO;
9878 mptsas_log(mpt, CE_WARN, "task management failed");
9879 }
9880 goto out;
9881 }
9882
9883 if (data_size != 0) {
9884 data_dma_state.size = data_size;
9885 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9886 status = ENOMEM;
9887 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9888 "resource");
9889 goto out;
9890 }
9891 pt_flags |= MPTSAS_DATA_ALLOCATED;
9892 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9893 mutex_exit(&mpt->m_mutex);
9894 for (i = 0; i < data_size; i++) {
9895 if (ddi_copyin(data + i, (uint8_t *)
9896 data_dma_state.memp + i, 1, mode)) {
9897 mutex_enter(&mpt->m_mutex);
9898 status = EFAULT;
9899 mptsas_log(mpt, CE_WARN, "failed to "
9900 "copy read data");
9901 goto out;
9902 }
9903 }
9904 mutex_enter(&mpt->m_mutex);
9905 }
9906 }
9907
9908 if (dataout_size != 0) {
9909 dataout_dma_state.size = dataout_size;
9910 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9911 status = ENOMEM;
9912 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9913 "resource");
9914 goto out;
9915 }
9916 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9917 mutex_exit(&mpt->m_mutex);
9918 for (i = 0; i < dataout_size; i++) {
9919 if (ddi_copyin(dataout + i, (uint8_t *)
9920 dataout_dma_state.memp + i, 1, mode)) {
9921 mutex_enter(&mpt->m_mutex);
9922 mptsas_log(mpt, CE_WARN, "failed to copy out"
9923 " data");
9924 status = EFAULT;
9925 goto out;
9926 }
9927 }
9928 mutex_enter(&mpt->m_mutex);
9929 }
9930
9931 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9932 status = EAGAIN;
9933 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9934 goto out;
9935 }
9936 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9937
9938 bzero((caddr_t)cmd, sizeof (*cmd));
9939 bzero((caddr_t)pkt, scsi_pkt_size());
9940 bzero((caddr_t)&pt, sizeof (pt));
9941
9942 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9943
9944 pt.request = (uint8_t *)request_msg;
9945 pt.direction = direction;
9946 pt.request_size = request_size;
9947 pt.data_size = data_size;
9948 pt.dataout_size = dataout_size;
9949 pt.data_cookie = data_dma_state.cookie;
9950 pt.dataout_cookie = dataout_dma_state.cookie;
9951
9952 /*
9953 * Form a blank cmd/pkt to store the acknowledgement message
9954 */
9955 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9956 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9957 pkt->pkt_ha_private = (opaque_t)&pt;
9958 pkt->pkt_flags = FLAG_HEAD;
9959 pkt->pkt_time = timeout;
9960 cmd->cmd_pkt = pkt;
9961 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9962
9963 /*
9964 * Save the command in a slot
9965 */
9966 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9967 /*
9968 * Once passthru command get slot, set cmd_flags
9969 * CFLAG_PREPARED.
9970 */
9971 cmd->cmd_flags |= CFLAG_PREPARED;
9972 mptsas_start_passthru(mpt, cmd);
9973 } else {
9974 mptsas_waitq_add(mpt, cmd);
9975 }
9976
9977 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9978 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9979 }
9980
9981 if (cmd->cmd_flags & CFLAG_PREPARED) {
9982 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9983 cmd->cmd_slot);
9984 request_hdrp = (pMPI2RequestHeader_t)memp;
9985 }
9986
9987 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9988 status = ETIMEDOUT;
9989 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9990 pt_flags |= MPTSAS_CMD_TIMEOUT;
9991 goto out;
9992 }
9993
9994 if (cmd->cmd_rfm) {
9995 /*
9996 * cmd_rfm is zero means the command reply is a CONTEXT
9997 * reply and no PCI Write to post the free reply SMFA
9998 * because no reply message frame is used.
9999 * cmd_rfm is non-zero means the reply is a ADDRESS
10000 * reply and reply message frame is used.
10001 */
10002 pt_flags |= MPTSAS_ADDRESS_REPLY;
10003 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10004 DDI_DMA_SYNC_FORCPU);
10005 reply_msg = (pMPI2DefaultReply_t)
10006 (mpt->m_reply_frame + (cmd->cmd_rfm -
10007 mpt->m_reply_frame_dma_addr));
10008 }
10009
10010 mptsas_fma_check(mpt, cmd);
10011 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10012 status = EAGAIN;
10013 mptsas_log(mpt, CE_WARN, "passthru fma error");
10014 goto out;
10015 }
10016 if (pkt->pkt_reason == CMD_RESET) {
10017 status = EAGAIN;
10018 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10019 goto out;
10020 }
10021
10022 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10023 status = EIO;
10024 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10025 goto out;
10026 }
10027
10028 mutex_exit(&mpt->m_mutex);
10029 if (cmd->cmd_flags & CFLAG_PREPARED) {
10030 function = request_hdrp->Function;
10031 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10032 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10033 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10034 sense_len = reply_size - reply_len;
10035 } else {
10036 reply_len = reply_size;
10037 sense_len = 0;
10038 }
10039
10040 for (i = 0; i < reply_len; i++) {
10041 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10042 mode)) {
10043 mutex_enter(&mpt->m_mutex);
10044 status = EFAULT;
10045 mptsas_log(mpt, CE_WARN, "failed to copy out "
10046 "reply data");
10047 goto out;
10048 }
10049 }
10050 for (i = 0; i < sense_len; i++) {
10051 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10052 reply + reply_len + i, 1, mode)) {
10053 mutex_enter(&mpt->m_mutex);
10054 status = EFAULT;
10055 mptsas_log(mpt, CE_WARN, "failed to copy out "
10056 "sense data");
10057 goto out;
10058 }
10059 }
10060 }
10061
10062 if (data_size) {
10063 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10064 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10065 DDI_DMA_SYNC_FORCPU);
10066 for (i = 0; i < data_size; i++) {
10067 if (ddi_copyout((uint8_t *)(
10068 data_dma_state.memp + i), data + i, 1,
10069 mode)) {
10070 mutex_enter(&mpt->m_mutex);
10071 status = EFAULT;
10072 mptsas_log(mpt, CE_WARN, "failed to "
10073 "copy out the reply data");
10074 goto out;
10075 }
10076 }
10077 }
10078 }
10079 mutex_enter(&mpt->m_mutex);
10080 out:
10081 /*
10082 * Put the reply frame back on the free queue, increment the free
10083 * index, and write the new index to the free index register. But only
10084 * if this reply is an ADDRESS reply.
10085 */
10086 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10087 ddi_put32(mpt->m_acc_free_queue_hdl,
10088 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10089 cmd->cmd_rfm);
10090 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10091 DDI_DMA_SYNC_FORDEV);
10092 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10093 mpt->m_free_index = 0;
10094 }
10095 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10096 mpt->m_free_index);
10097 }
10098 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10099 mptsas_remove_cmd(mpt, cmd);
10100 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10101 }
10102 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10103 mptsas_return_to_pool(mpt, cmd);
10104 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10105 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10106 DDI_SUCCESS) {
10107 ddi_fm_service_impact(mpt->m_dip,
10108 DDI_SERVICE_UNAFFECTED);
10109 status = EFAULT;
10110 }
10111 mptsas_dma_free(&data_dma_state);
10112 }
10113 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10114 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10115 DDI_SUCCESS) {
10116 ddi_fm_service_impact(mpt->m_dip,
10117 DDI_SERVICE_UNAFFECTED);
10118 status = EFAULT;
10119 }
10120 mptsas_dma_free(&dataout_dma_state);
10121 }
10122 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10123 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10124 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10125 }
10126 }
10127 if (request_msg)
10128 kmem_free(request_msg, request_size);
10129
10130 return (status);
10131 }
10132
10133 static int
10134 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10135 {
10136 /*
10137 * If timeout is 0, set timeout to default of 60 seconds.
10138 */
10139 if (data->Timeout == 0) {
10140 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10141 }
10142
10143 if (((data->DataSize == 0) &&
10144 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10145 ((data->DataSize != 0) &&
10146 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10147 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10148 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10149 (data->DataOutSize != 0))))) {
10150 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10151 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10152 } else {
10153 data->DataOutSize = 0;
10154 }
10155 /*
10156 * Send passthru request messages
10157 */
10158 return (mptsas_do_passthru(mpt,
10159 (uint8_t *)((uintptr_t)data->PtrRequest),
10160 (uint8_t *)((uintptr_t)data->PtrReply),
10161 (uint8_t *)((uintptr_t)data->PtrData),
10162 data->RequestSize, data->ReplySize,
10163 data->DataSize, data->DataDirection,
10164 (uint8_t *)((uintptr_t)data->PtrDataOut),
10165 data->DataOutSize, data->Timeout, mode));
10166 } else {
10167 return (EINVAL);
10168 }
10169 }
10170
10171 static uint8_t
10172 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10173 {
10174 uint8_t index;
10175
10176 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10177 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10178 return (index);
10179 }
10180 }
10181
10182 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10183 }
10184
10185 static void
10186 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10187 {
10188 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10189 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10190 struct scsi_pkt *pkt = cmd->cmd_pkt;
10191 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10192 uint32_t request_desc_low, i;
10193
10194 ASSERT(mutex_owned(&mpt->m_mutex));
10195
10196 /*
10197 * Form the diag message depending on the post or release function.
10198 */
10199 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10200 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10201 (mpt->m_req_frame + (mpt->m_req_frame_size *
10202 cmd->cmd_slot));
10203 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10204 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10205 diag->function);
10206 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10207 diag->pBuffer->buffer_type);
10208 ddi_put8(mpt->m_acc_req_frame_hdl,
10209 &pDiag_post_msg->ExtendedType,
10210 diag->pBuffer->extended_type);
10211 ddi_put32(mpt->m_acc_req_frame_hdl,
10212 &pDiag_post_msg->BufferLength,
10213 diag->pBuffer->buffer_data.size);
10214 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10215 i++) {
10216 ddi_put32(mpt->m_acc_req_frame_hdl,
10217 &pDiag_post_msg->ProductSpecific[i],
10218 diag->pBuffer->product_specific[i]);
10219 }
10220 ddi_put32(mpt->m_acc_req_frame_hdl,
10221 &pDiag_post_msg->BufferAddress.Low,
10222 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10223 & 0xffffffffull));
10224 ddi_put32(mpt->m_acc_req_frame_hdl,
10225 &pDiag_post_msg->BufferAddress.High,
10226 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10227 >> 32));
10228 } else {
10229 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10230 (mpt->m_req_frame + (mpt->m_req_frame_size *
10231 cmd->cmd_slot));
10232 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10233 ddi_put8(mpt->m_acc_req_frame_hdl,
10234 &pDiag_release_msg->Function, diag->function);
10235 ddi_put8(mpt->m_acc_req_frame_hdl,
10236 &pDiag_release_msg->BufferType,
10237 diag->pBuffer->buffer_type);
10238 }
10239
10240 /*
10241 * Send the message
10242 */
10243 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10244 DDI_DMA_SYNC_FORDEV);
10245 request_desc_low = (cmd->cmd_slot << 16) +
10246 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10247 cmd->cmd_rfm = NULL;
10248 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10249 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10250 DDI_SUCCESS) ||
10251 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10252 DDI_SUCCESS)) {
10253 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10254 }
10255 }
10256
10257 static int
10258 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10259 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10260 {
10261 mptsas_diag_request_t diag;
10262 int status, slot_num, post_flags = 0;
10263 mptsas_cmd_t *cmd = NULL;
10264 struct scsi_pkt *pkt;
10265 pMpi2DiagBufferPostReply_t reply;
10266 uint16_t iocstatus;
10267 uint32_t iocloginfo, transfer_length;
10268
10269 /*
10270 * If buffer is not enabled, just leave.
10271 */
10272 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10273 if (!pBuffer->enabled) {
10274 status = DDI_FAILURE;
10275 goto out;
10276 }
10277
10278 /*
10279 * Clear some flags initially.
10280 */
10281 pBuffer->force_release = FALSE;
10282 pBuffer->valid_data = FALSE;
10283 pBuffer->owned_by_firmware = FALSE;
10284
10285 /*
10286 * Get a cmd buffer from the cmd buffer pool
10287 */
10288 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10289 status = DDI_FAILURE;
10290 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10291 goto out;
10292 }
10293 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10294
10295 bzero((caddr_t)cmd, sizeof (*cmd));
10296 bzero((caddr_t)pkt, scsi_pkt_size());
10297
10298 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10299
10300 diag.pBuffer = pBuffer;
10301 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10302
10303 /*
10304 * Form a blank cmd/pkt to store the acknowledgement message
10305 */
10306 pkt->pkt_ha_private = (opaque_t)&diag;
10307 pkt->pkt_flags = FLAG_HEAD;
10308 pkt->pkt_time = 60;
10309 cmd->cmd_pkt = pkt;
10310 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10311
10312 /*
10313 * Save the command in a slot
10314 */
10315 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10316 /*
10317 * Once passthru command get slot, set cmd_flags
10318 * CFLAG_PREPARED.
10319 */
10320 cmd->cmd_flags |= CFLAG_PREPARED;
10321 mptsas_start_diag(mpt, cmd);
10322 } else {
10323 mptsas_waitq_add(mpt, cmd);
10324 }
10325
10326 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10327 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10328 }
10329
10330 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10331 status = DDI_FAILURE;
10332 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10333 goto out;
10334 }
10335
10336 /*
10337 * cmd_rfm points to the reply message if a reply was given. Check the
10338 * IOCStatus to make sure everything went OK with the FW diag request
10339 * and set buffer flags.
10340 */
10341 if (cmd->cmd_rfm) {
10342 post_flags |= MPTSAS_ADDRESS_REPLY;
10343 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10344 DDI_DMA_SYNC_FORCPU);
10345 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10346 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10347
10348 /*
10349 * Get the reply message data
10350 */
10351 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10352 &reply->IOCStatus);
10353 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10354 &reply->IOCLogInfo);
10355 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10356 &reply->TransferLength);
10357
10358 /*
10359 * If post failed quit.
10360 */
10361 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10362 status = DDI_FAILURE;
10363 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10364 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10365 iocloginfo, transfer_length));
10366 goto out;
10367 }
10368
10369 /*
10370 * Post was successful.
10371 */
10372 pBuffer->valid_data = TRUE;
10373 pBuffer->owned_by_firmware = TRUE;
10374 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10375 status = DDI_SUCCESS;
10376 }
10377
10378 out:
10379 /*
10380 * Put the reply frame back on the free queue, increment the free
10381 * index, and write the new index to the free index register. But only
10382 * if this reply is an ADDRESS reply.
10383 */
10384 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10385 ddi_put32(mpt->m_acc_free_queue_hdl,
10386 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10387 cmd->cmd_rfm);
10388 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10389 DDI_DMA_SYNC_FORDEV);
10390 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10391 mpt->m_free_index = 0;
10392 }
10393 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10394 mpt->m_free_index);
10395 }
10396 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10397 mptsas_remove_cmd(mpt, cmd);
10398 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10399 }
10400 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10401 mptsas_return_to_pool(mpt, cmd);
10402 }
10403
10404 return (status);
10405 }
10406
10407 static int
10408 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10409 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10410 uint32_t diag_type)
10411 {
10412 mptsas_diag_request_t diag;
10413 int status, slot_num, rel_flags = 0;
10414 mptsas_cmd_t *cmd = NULL;
10415 struct scsi_pkt *pkt;
10416 pMpi2DiagReleaseReply_t reply;
10417 uint16_t iocstatus;
10418 uint32_t iocloginfo;
10419
10420 /*
10421 * If buffer is not enabled, just leave.
10422 */
10423 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10424 if (!pBuffer->enabled) {
10425 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10426 "by the IOC");
10427 status = DDI_FAILURE;
10428 goto out;
10429 }
10430
10431 /*
10432 * Clear some flags initially.
10433 */
10434 pBuffer->force_release = FALSE;
10435 pBuffer->valid_data = FALSE;
10436 pBuffer->owned_by_firmware = FALSE;
10437
10438 /*
10439 * Get a cmd buffer from the cmd buffer pool
10440 */
10441 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10442 status = DDI_FAILURE;
10443 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10444 "Diag");
10445 goto out;
10446 }
10447 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10448
10449 bzero((caddr_t)cmd, sizeof (*cmd));
10450 bzero((caddr_t)pkt, scsi_pkt_size());
10451
10452 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10453
10454 diag.pBuffer = pBuffer;
10455 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10456
10457 /*
10458 * Form a blank cmd/pkt to store the acknowledgement message
10459 */
10460 pkt->pkt_ha_private = (opaque_t)&diag;
10461 pkt->pkt_flags = FLAG_HEAD;
10462 pkt->pkt_time = 60;
10463 cmd->cmd_pkt = pkt;
10464 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10465
10466 /*
10467 * Save the command in a slot
10468 */
10469 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10470 /*
10471 * Once passthru command get slot, set cmd_flags
10472 * CFLAG_PREPARED.
10473 */
10474 cmd->cmd_flags |= CFLAG_PREPARED;
10475 mptsas_start_diag(mpt, cmd);
10476 } else {
10477 mptsas_waitq_add(mpt, cmd);
10478 }
10479
10480 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10481 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10482 }
10483
10484 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10485 status = DDI_FAILURE;
10486 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10487 goto out;
10488 }
10489
10490 /*
10491 * cmd_rfm points to the reply message if a reply was given. Check the
10492 * IOCStatus to make sure everything went OK with the FW diag request
10493 * and set buffer flags.
10494 */
10495 if (cmd->cmd_rfm) {
10496 rel_flags |= MPTSAS_ADDRESS_REPLY;
10497 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10498 DDI_DMA_SYNC_FORCPU);
10499 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10500 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10501
10502 /*
10503 * Get the reply message data
10504 */
10505 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10506 &reply->IOCStatus);
10507 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10508 &reply->IOCLogInfo);
10509
10510 /*
10511 * If release failed quit.
10512 */
10513 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10514 pBuffer->owned_by_firmware) {
10515 status = DDI_FAILURE;
10516 NDBG13(("release FW Diag Buffer failed: "
10517 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10518 iocloginfo));
10519 goto out;
10520 }
10521
10522 /*
10523 * Release was successful.
10524 */
10525 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10526 status = DDI_SUCCESS;
10527
10528 /*
10529 * If this was for an UNREGISTER diag type command, clear the
10530 * unique ID.
10531 */
10532 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10533 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10534 }
10535 }
10536
10537 out:
10538 /*
10539 * Put the reply frame back on the free queue, increment the free
10540 * index, and write the new index to the free index register. But only
10541 * if this reply is an ADDRESS reply.
10542 */
10543 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10544 ddi_put32(mpt->m_acc_free_queue_hdl,
10545 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10546 cmd->cmd_rfm);
10547 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10548 DDI_DMA_SYNC_FORDEV);
10549 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10550 mpt->m_free_index = 0;
10551 }
10552 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10553 mpt->m_free_index);
10554 }
10555 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10556 mptsas_remove_cmd(mpt, cmd);
10557 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10558 }
10559 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10560 mptsas_return_to_pool(mpt, cmd);
10561 }
10562
10563 return (status);
10564 }
10565
10566 static int
10567 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10568 uint32_t *return_code)
10569 {
10570 mptsas_fw_diagnostic_buffer_t *pBuffer;
10571 uint8_t extended_type, buffer_type, i;
10572 uint32_t buffer_size;
10573 uint32_t unique_id;
10574 int status;
10575
10576 ASSERT(mutex_owned(&mpt->m_mutex));
10577
10578 extended_type = diag_register->ExtendedType;
10579 buffer_type = diag_register->BufferType;
10580 buffer_size = diag_register->RequestedBufferSize;
10581 unique_id = diag_register->UniqueId;
10582
10583 /*
10584 * Check for valid buffer type
10585 */
10586 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10587 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10588 return (DDI_FAILURE);
10589 }
10590
10591 /*
10592 * Get the current buffer and look up the unique ID. The unique ID
10593 * should not be found. If it is, the ID is already in use.
10594 */
10595 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10596 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10597 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10598 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10599 return (DDI_FAILURE);
10600 }
10601
10602 /*
10603 * The buffer's unique ID should not be registered yet, and the given
10604 * unique ID cannot be 0.
10605 */
10606 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10607 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10608 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10609 return (DDI_FAILURE);
10610 }
10611
10612 /*
10613 * If this buffer is already posted as immediate, just change owner.
10614 */
10615 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10616 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10617 pBuffer->immediate = FALSE;
10618 pBuffer->unique_id = unique_id;
10619 return (DDI_SUCCESS);
10620 }
10621
10622 /*
10623 * Post a new buffer after checking if it's enabled. The DMA buffer
10624 * that is allocated will be contiguous (sgl_len = 1).
10625 */
10626 if (!pBuffer->enabled) {
10627 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10628 return (DDI_FAILURE);
10629 }
10630 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10631 pBuffer->buffer_data.size = buffer_size;
10632 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10633 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10634 "diag buffer: size = %d bytes", buffer_size);
10635 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10636 return (DDI_FAILURE);
10637 }
10638
10639 /*
10640 * Copy the given info to the diag buffer and post the buffer.
10641 */
10642 pBuffer->buffer_type = buffer_type;
10643 pBuffer->immediate = FALSE;
10644 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10645 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10646 i++) {
10647 pBuffer->product_specific[i] =
10648 diag_register->ProductSpecific[i];
10649 }
10650 }
10651 pBuffer->extended_type = extended_type;
10652 pBuffer->unique_id = unique_id;
10653 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10654
10655 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10656 DDI_SUCCESS) {
10657 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10658 "mptsas_diag_register.");
10659 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10660 status = DDI_FAILURE;
10661 }
10662
10663 /*
10664 * In case there was a failure, free the DMA buffer.
10665 */
10666 if (status == DDI_FAILURE) {
10667 mptsas_dma_free(&pBuffer->buffer_data);
10668 }
10669
10670 return (status);
10671 }
10672
10673 static int
10674 mptsas_diag_unregister(mptsas_t *mpt,
10675 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10676 {
10677 mptsas_fw_diagnostic_buffer_t *pBuffer;
10678 uint8_t i;
10679 uint32_t unique_id;
10680 int status;
10681
10682 ASSERT(mutex_owned(&mpt->m_mutex));
10683
10684 unique_id = diag_unregister->UniqueId;
10685
10686 /*
10687 * Get the current buffer and look up the unique ID. The unique ID
10688 * should be there.
10689 */
10690 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10691 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10692 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10693 return (DDI_FAILURE);
10694 }
10695
10696 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10697
10698 /*
10699 * Try to release the buffer from FW before freeing it. If release
10700 * fails, don't free the DMA buffer in case FW tries to access it
10701 * later. If buffer is not owned by firmware, can't release it.
10702 */
10703 if (!pBuffer->owned_by_firmware) {
10704 status = DDI_SUCCESS;
10705 } else {
10706 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10707 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10708 }
10709
10710 /*
10711 * At this point, return the current status no matter what happens with
10712 * the DMA buffer.
10713 */
10714 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10715 if (status == DDI_SUCCESS) {
10716 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10717 DDI_SUCCESS) {
10718 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10719 "in mptsas_diag_unregister.");
10720 ddi_fm_service_impact(mpt->m_dip,
10721 DDI_SERVICE_UNAFFECTED);
10722 }
10723 mptsas_dma_free(&pBuffer->buffer_data);
10724 }
10725
10726 return (status);
10727 }
10728
10729 static int
10730 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10731 uint32_t *return_code)
10732 {
10733 mptsas_fw_diagnostic_buffer_t *pBuffer;
10734 uint8_t i;
10735 uint32_t unique_id;
10736
10737 ASSERT(mutex_owned(&mpt->m_mutex));
10738
10739 unique_id = diag_query->UniqueId;
10740
10741 /*
10742 * If ID is valid, query on ID.
10743 * If ID is invalid, query on buffer type.
10744 */
10745 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10746 i = diag_query->BufferType;
10747 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10748 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10749 return (DDI_FAILURE);
10750 }
10751 } else {
10752 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10753 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10754 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10755 return (DDI_FAILURE);
10756 }
10757 }
10758
10759 /*
10760 * Fill query structure with the diag buffer info.
10761 */
10762 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10763 diag_query->BufferType = pBuffer->buffer_type;
10764 diag_query->ExtendedType = pBuffer->extended_type;
10765 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10766 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10767 i++) {
10768 diag_query->ProductSpecific[i] =
10769 pBuffer->product_specific[i];
10770 }
10771 }
10772 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10773 diag_query->DriverAddedBufferSize = 0;
10774 diag_query->UniqueId = pBuffer->unique_id;
10775 diag_query->ApplicationFlags = 0;
10776 diag_query->DiagnosticFlags = 0;
10777
10778 /*
10779 * Set/Clear application flags
10780 */
10781 if (pBuffer->immediate) {
10782 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10783 } else {
10784 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10785 }
10786 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10787 diag_query->ApplicationFlags |=
10788 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10789 } else {
10790 diag_query->ApplicationFlags &=
10791 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10792 }
10793 if (pBuffer->owned_by_firmware) {
10794 diag_query->ApplicationFlags |=
10795 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10796 } else {
10797 diag_query->ApplicationFlags &=
10798 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10799 }
10800
10801 return (DDI_SUCCESS);
10802 }
10803
10804 static int
10805 mptsas_diag_read_buffer(mptsas_t *mpt,
10806 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10807 uint32_t *return_code, int ioctl_mode)
10808 {
10809 mptsas_fw_diagnostic_buffer_t *pBuffer;
10810 uint8_t i, *pData;
10811 uint32_t unique_id, byte;
10812 int status;
10813
10814 ASSERT(mutex_owned(&mpt->m_mutex));
10815
10816 unique_id = diag_read_buffer->UniqueId;
10817
10818 /*
10819 * Get the current buffer and look up the unique ID. The unique ID
10820 * should be there.
10821 */
10822 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10823 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10824 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10825 return (DDI_FAILURE);
10826 }
10827
10828 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10829
10830 /*
10831 * Make sure requested read is within limits
10832 */
10833 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10834 pBuffer->buffer_data.size) {
10835 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10836 return (DDI_FAILURE);
10837 }
10838
10839 /*
10840 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10841 * buffer that was allocated is one contiguous buffer.
10842 */
10843 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10844 diag_read_buffer->StartingOffset);
10845 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10846 DDI_DMA_SYNC_FORCPU);
10847 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10848 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10849 != 0) {
10850 return (DDI_FAILURE);
10851 }
10852 }
10853 diag_read_buffer->Status = 0;
10854
10855 /*
10856 * Set or clear the Force Release flag.
10857 */
10858 if (pBuffer->force_release) {
10859 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10860 } else {
10861 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10862 }
10863
10864 /*
10865 * If buffer is to be reregistered, make sure it's not already owned by
10866 * firmware first.
10867 */
10868 status = DDI_SUCCESS;
10869 if (!pBuffer->owned_by_firmware) {
10870 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10871 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10872 return_code);
10873 }
10874 }
10875
10876 return (status);
10877 }
10878
10879 static int
10880 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10881 uint32_t *return_code)
10882 {
10883 mptsas_fw_diagnostic_buffer_t *pBuffer;
10884 uint8_t i;
10885 uint32_t unique_id;
10886 int status;
10887
10888 ASSERT(mutex_owned(&mpt->m_mutex));
10889
10890 unique_id = diag_release->UniqueId;
10891
10892 /*
10893 * Get the current buffer and look up the unique ID. The unique ID
10894 * should be there.
10895 */
10896 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10897 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10898 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10899 return (DDI_FAILURE);
10900 }
10901
10902 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10903
10904 /*
10905 * If buffer is not owned by firmware, it's already been released.
10906 */
10907 if (!pBuffer->owned_by_firmware) {
10908 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10909 return (DDI_FAILURE);
10910 }
10911
10912 /*
10913 * Release the buffer.
10914 */
10915 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10916 MPTSAS_FW_DIAG_TYPE_RELEASE);
10917 return (status);
10918 }
10919
10920 static int
10921 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10922 uint32_t length, uint32_t *return_code, int ioctl_mode)
10923 {
10924 mptsas_fw_diag_register_t diag_register;
10925 mptsas_fw_diag_unregister_t diag_unregister;
10926 mptsas_fw_diag_query_t diag_query;
10927 mptsas_diag_read_buffer_t diag_read_buffer;
10928 mptsas_fw_diag_release_t diag_release;
10929 int status = DDI_SUCCESS;
10930 uint32_t original_return_code, read_buf_len;
10931
10932 ASSERT(mutex_owned(&mpt->m_mutex));
10933
10934 original_return_code = *return_code;
10935 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10936
10937 switch (action) {
10938 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10939 if (!length) {
10940 *return_code =
10941 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10942 status = DDI_FAILURE;
10943 break;
10944 }
10945 if (ddi_copyin(diag_action, &diag_register,
10946 sizeof (diag_register), ioctl_mode) != 0) {
10947 return (DDI_FAILURE);
10948 }
10949 status = mptsas_diag_register(mpt, &diag_register,
10950 return_code);
10951 break;
10952
10953 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10954 if (length < sizeof (diag_unregister)) {
10955 *return_code =
10956 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10957 status = DDI_FAILURE;
10958 break;
10959 }
10960 if (ddi_copyin(diag_action, &diag_unregister,
10961 sizeof (diag_unregister), ioctl_mode) != 0) {
10962 return (DDI_FAILURE);
10963 }
10964 status = mptsas_diag_unregister(mpt, &diag_unregister,
10965 return_code);
10966 break;
10967
10968 case MPTSAS_FW_DIAG_TYPE_QUERY:
10969 if (length < sizeof (diag_query)) {
10970 *return_code =
10971 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10972 status = DDI_FAILURE;
10973 break;
10974 }
10975 if (ddi_copyin(diag_action, &diag_query,
10976 sizeof (diag_query), ioctl_mode) != 0) {
10977 return (DDI_FAILURE);
10978 }
10979 status = mptsas_diag_query(mpt, &diag_query,
10980 return_code);
10981 if (status == DDI_SUCCESS) {
10982 if (ddi_copyout(&diag_query, diag_action,
10983 sizeof (diag_query), ioctl_mode) != 0) {
10984 return (DDI_FAILURE);
10985 }
10986 }
10987 break;
10988
10989 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10990 if (ddi_copyin(diag_action, &diag_read_buffer,
10991 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10992 return (DDI_FAILURE);
10993 }
10994 read_buf_len = sizeof (diag_read_buffer) -
10995 sizeof (diag_read_buffer.DataBuffer) +
10996 diag_read_buffer.BytesToRead;
10997 if (length < read_buf_len) {
10998 *return_code =
10999 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11000 status = DDI_FAILURE;
11001 break;
11002 }
11003 status = mptsas_diag_read_buffer(mpt,
11004 &diag_read_buffer, diag_action +
11005 sizeof (diag_read_buffer) - 4, return_code,
11006 ioctl_mode);
11007 if (status == DDI_SUCCESS) {
11008 if (ddi_copyout(&diag_read_buffer, diag_action,
11009 sizeof (diag_read_buffer) - 4, ioctl_mode)
11010 != 0) {
11011 return (DDI_FAILURE);
11012 }
11013 }
11014 break;
11015
11016 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11017 if (length < sizeof (diag_release)) {
11018 *return_code =
11019 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11020 status = DDI_FAILURE;
11021 break;
11022 }
11023 if (ddi_copyin(diag_action, &diag_release,
11024 sizeof (diag_release), ioctl_mode) != 0) {
11025 return (DDI_FAILURE);
11026 }
11027 status = mptsas_diag_release(mpt, &diag_release,
11028 return_code);
11029 break;
11030
11031 default:
11032 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11033 status = DDI_FAILURE;
11034 break;
11035 }
11036
11037 if ((status == DDI_FAILURE) &&
11038 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11039 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11040 status = DDI_SUCCESS;
11041 }
11042
11043 return (status);
11044 }
11045
11046 static int
11047 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11048 {
11049 int status;
11050 mptsas_diag_action_t driver_data;
11051
11052 ASSERT(mutex_owned(&mpt->m_mutex));
11053
11054 /*
11055 * Copy the user data to a driver data buffer.
11056 */
11057 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11058 mode) == 0) {
11059 /*
11060 * Send diag action request if Action is valid
11061 */
11062 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11063 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11064 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11065 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11066 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11067 status = mptsas_do_diag_action(mpt, driver_data.Action,
11068 (void *)(uintptr_t)driver_data.PtrDiagAction,
11069 driver_data.Length, &driver_data.ReturnCode,
11070 mode);
11071 if (status == DDI_SUCCESS) {
11072 if (ddi_copyout(&driver_data.ReturnCode,
11073 &user_data->ReturnCode,
11074 sizeof (user_data->ReturnCode), mode)
11075 != 0) {
11076 status = EFAULT;
11077 } else {
11078 status = 0;
11079 }
11080 } else {
11081 status = EIO;
11082 }
11083 } else {
11084 status = EINVAL;
11085 }
11086 } else {
11087 status = EFAULT;
11088 }
11089
11090 return (status);
11091 }
11092
11093 /*
11094 * This routine handles the "event query" ioctl.
11095 */
11096 static int
11097 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11098 int *rval)
11099 {
11100 int status;
11101 mptsas_event_query_t driverdata;
11102 uint8_t i;
11103
11104 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11105
11106 mutex_enter(&mpt->m_mutex);
11107 for (i = 0; i < 4; i++) {
11108 driverdata.Types[i] = mpt->m_event_mask[i];
11109 }
11110 mutex_exit(&mpt->m_mutex);
11111
11112 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11113 status = EFAULT;
11114 } else {
11115 *rval = MPTIOCTL_STATUS_GOOD;
11116 status = 0;
11117 }
11118
11119 return (status);
11120 }
11121
11122 /*
11123 * This routine handles the "event enable" ioctl.
11124 */
11125 static int
11126 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11127 int *rval)
11128 {
11129 int status;
11130 mptsas_event_enable_t driverdata;
11131 uint8_t i;
11132
11133 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11134 mutex_enter(&mpt->m_mutex);
11135 for (i = 0; i < 4; i++) {
11136 mpt->m_event_mask[i] = driverdata.Types[i];
11137 }
11138 mutex_exit(&mpt->m_mutex);
11139
11140 *rval = MPTIOCTL_STATUS_GOOD;
11141 status = 0;
11142 } else {
11143 status = EFAULT;
11144 }
11145 return (status);
11146 }
11147
11148 /*
11149 * This routine handles the "event report" ioctl.
11150 */
11151 static int
11152 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11153 int *rval)
11154 {
11155 int status;
11156 mptsas_event_report_t driverdata;
11157
11158 mutex_enter(&mpt->m_mutex);
11159
11160 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11161 mode) == 0) {
11162 if (driverdata.Size >= sizeof (mpt->m_events)) {
11163 if (ddi_copyout(mpt->m_events, data->Events,
11164 sizeof (mpt->m_events), mode) != 0) {
11165 status = EFAULT;
11166 } else {
11167 if (driverdata.Size > sizeof (mpt->m_events)) {
11168 driverdata.Size =
11169 sizeof (mpt->m_events);
11170 if (ddi_copyout(&driverdata.Size,
11171 &data->Size,
11172 sizeof (driverdata.Size),
11173 mode) != 0) {
11174 status = EFAULT;
11175 } else {
11176 *rval = MPTIOCTL_STATUS_GOOD;
11177 status = 0;
11178 }
11179 } else {
11180 *rval = MPTIOCTL_STATUS_GOOD;
11181 status = 0;
11182 }
11183 }
11184 } else {
11185 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11186 status = 0;
11187 }
11188 } else {
11189 status = EFAULT;
11190 }
11191
11192 mutex_exit(&mpt->m_mutex);
11193 return (status);
11194 }
11195
11196 static void
11197 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11198 {
11199 int *reg_data;
11200 uint_t reglen;
11201
11202 /*
11203 * Lookup the 'reg' property and extract the other data
11204 */
11205 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11206 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11207 DDI_PROP_SUCCESS) {
11208 /*
11209 * Extract the PCI data from the 'reg' property first DWORD.
11210 * The entry looks like the following:
11211 * First DWORD:
11212 * Bits 0 - 7 8-bit Register number
11213 * Bits 8 - 10 3-bit Function number
11214 * Bits 11 - 15 5-bit Device number
11215 * Bits 16 - 23 8-bit Bus number
11216 * Bits 24 - 25 2-bit Address Space type identifier
11217 *
11218 */
11219 adapter_data->PciInformation.u.bits.BusNumber =
11220 (reg_data[0] & 0x00FF0000) >> 16;
11221 adapter_data->PciInformation.u.bits.DeviceNumber =
11222 (reg_data[0] & 0x0000F800) >> 11;
11223 adapter_data->PciInformation.u.bits.FunctionNumber =
11224 (reg_data[0] & 0x00000700) >> 8;
11225 ddi_prop_free((void *)reg_data);
11226 } else {
11227 /*
11228 * If we can't determine the PCI data then we fill in FF's for
11229 * the data to indicate this.
11230 */
11231 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11232 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11233 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11234 }
11235
11236 /*
11237 * Saved in the mpt->m_fwversion
11238 */
11239 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11240 }
11241
11242 static void
11243 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11244 {
11245 char *driver_verstr = MPTSAS_MOD_STRING;
11246
11247 mptsas_lookup_pci_data(mpt, adapter_data);
11248 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11249 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11250 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11251 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11252 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11253 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11254 adapter_data->BiosVersion = 0;
11255 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11256 }
11257
11258 static void
11259 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11260 {
11261 int *reg_data, i;
11262 uint_t reglen;
11263
11264 /*
11265 * Lookup the 'reg' property and extract the other data
11266 */
11267 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11268 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11269 DDI_PROP_SUCCESS) {
11270 /*
11271 * Extract the PCI data from the 'reg' property first DWORD.
11272 * The entry looks like the following:
11273 * First DWORD:
11274 * Bits 8 - 10 3-bit Function number
11275 * Bits 11 - 15 5-bit Device number
11276 * Bits 16 - 23 8-bit Bus number
11277 */
11278 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11279 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11280 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11281 ddi_prop_free((void *)reg_data);
11282 } else {
11283 /*
11284 * If we can't determine the PCI info then we fill in FF's for
11285 * the data to indicate this.
11286 */
11287 pci_info->BusNumber = 0xFFFFFFFF;
11288 pci_info->DeviceNumber = 0xFF;
11289 pci_info->FunctionNumber = 0xFF;
11290 }
11291
11292 /*
11293 * Now get the interrupt vector and the pci header. The vector can
11294 * only be 0 right now. The header is the first 256 bytes of config
11295 * space.
11296 */
11297 pci_info->InterruptVector = 0;
11298 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11299 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11300 i);
11301 }
11302 }
11303
11304 static int
11305 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11306 {
11307 int status = 0;
11308 mptsas_reg_access_t driverdata;
11309
11310 mutex_enter(&mpt->m_mutex);
11311 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11312 switch (driverdata.Command) {
11313 /*
11314 * IO access is not supported.
11315 */
11316 case REG_IO_READ:
11317 case REG_IO_WRITE:
11318 mptsas_log(mpt, CE_WARN, "IO access is not "
11319 "supported. Use memory access.");
11320 status = EINVAL;
11321 break;
11322
11323 case REG_MEM_READ:
11324 driverdata.RegData = ddi_get32(mpt->m_datap,
11325 (uint32_t *)(void *)mpt->m_reg +
11326 driverdata.RegOffset);
11327 if (ddi_copyout(&driverdata.RegData,
11328 &data->RegData,
11329 sizeof (driverdata.RegData), mode) != 0) {
11330 mptsas_log(mpt, CE_WARN, "Register "
11331 "Read Failed");
11332 status = EFAULT;
11333 }
11334 break;
11335
11336 case REG_MEM_WRITE:
11337 ddi_put32(mpt->m_datap,
11338 (uint32_t *)(void *)mpt->m_reg +
11339 driverdata.RegOffset,
11340 driverdata.RegData);
11341 break;
11342
11343 default:
11344 status = EINVAL;
11345 break;
11346 }
11347 } else {
11348 status = EFAULT;
11349 }
11350
11351 mutex_exit(&mpt->m_mutex);
11352 return (status);
11353 }
11354
11355 static int
11356 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11357 int *rval)
11358 {
11359 int status = 0;
11360 mptsas_t *mpt;
11361 mptsas_update_flash_t flashdata;
11362 mptsas_pass_thru_t passthru_data;
11363 mptsas_adapter_data_t adapter_data;
11364 mptsas_pci_info_t pci_info;
11365 int copylen;
11366
11367 int iport_flag = 0;
11368 dev_info_t *dip = NULL;
11369 mptsas_phymask_t phymask = 0;
11370 struct devctl_iocdata *dcp = NULL;
11371 uint32_t slotstatus = 0;
11372 char *addr = NULL;
11373 mptsas_target_t *ptgt = NULL;
11374
11375 *rval = MPTIOCTL_STATUS_GOOD;
11376 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11377 return (EPERM);
11378 }
11379
11380 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11381 if (mpt == NULL) {
11382 /*
11383 * Called from iport node, get the states
11384 */
11385 iport_flag = 1;
11386 dip = mptsas_get_dip_from_dev(dev, &phymask);
11387 if (dip == NULL) {
11388 return (ENXIO);
11389 }
11390 mpt = DIP2MPT(dip);
11391 }
11392 /* Make sure power level is D0 before accessing registers */
11393 mutex_enter(&mpt->m_mutex);
11394 if (mpt->m_options & MPTSAS_OPT_PM) {
11395 (void) pm_busy_component(mpt->m_dip, 0);
11396 if (mpt->m_power_level != PM_LEVEL_D0) {
11397 mutex_exit(&mpt->m_mutex);
11398 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11399 DDI_SUCCESS) {
11400 mptsas_log(mpt, CE_WARN,
11401 "mptsas%d: mptsas_ioctl: Raise power "
11402 "request failed.", mpt->m_instance);
11403 (void) pm_idle_component(mpt->m_dip, 0);
11404 return (ENXIO);
11405 }
11406 } else {
11407 mutex_exit(&mpt->m_mutex);
11408 }
11409 } else {
11410 mutex_exit(&mpt->m_mutex);
11411 }
11412
11413 if (iport_flag) {
11414 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11415 goto out;
11416 }
11417 switch (cmd) {
11418 case MPTIOCTL_UPDATE_FLASH:
11419 if (ddi_copyin((void *)data, &flashdata,
11420 sizeof (struct mptsas_update_flash), mode)) {
11421 status = EFAULT;
11422 break;
11423 }
11424
11425 mutex_enter(&mpt->m_mutex);
11426 if (mptsas_update_flash(mpt,
11427 (caddr_t)(long)flashdata.PtrBuffer,
11428 flashdata.ImageSize, flashdata.ImageType, mode)) {
11429 status = EFAULT;
11430 }
11431
11432 /*
11433 * Reset the chip to start using the new
11434 * firmware. Reset if failed also.
11435 */
11436 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11437 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11438 status = EFAULT;
11439 }
11440 mutex_exit(&mpt->m_mutex);
11441 break;
11442 case MPTIOCTL_PASS_THRU:
11443 /*
11444 * The user has requested to pass through a command to
11445 * be executed by the MPT firmware. Call our routine
11446 * which does this. Only allow one passthru IOCTL at
11447 * one time. Other threads will block on
11448 * m_passthru_mutex, which is of adaptive variant.
11449 */
11450 if (ddi_copyin((void *)data, &passthru_data,
11451 sizeof (mptsas_pass_thru_t), mode)) {
11452 status = EFAULT;
11453 break;
11454 }
11455 mutex_enter(&mpt->m_passthru_mutex);
11456 mutex_enter(&mpt->m_mutex);
11457 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11458 mutex_exit(&mpt->m_mutex);
11459 mutex_exit(&mpt->m_passthru_mutex);
11460
11461 break;
11462 case MPTIOCTL_GET_ADAPTER_DATA:
11463 /*
11464 * The user has requested to read adapter data. Call
11465 * our routine which does this.
11466 */
11467 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11468 if (ddi_copyin((void *)data, (void *)&adapter_data,
11469 sizeof (mptsas_adapter_data_t), mode)) {
11470 status = EFAULT;
11471 break;
11472 }
11473 if (adapter_data.StructureLength >=
11474 sizeof (mptsas_adapter_data_t)) {
11475 adapter_data.StructureLength = (uint32_t)
11476 sizeof (mptsas_adapter_data_t);
11477 copylen = sizeof (mptsas_adapter_data_t);
11478 mutex_enter(&mpt->m_mutex);
11479 mptsas_read_adapter_data(mpt, &adapter_data);
11480 mutex_exit(&mpt->m_mutex);
11481 } else {
11482 adapter_data.StructureLength = (uint32_t)
11483 sizeof (mptsas_adapter_data_t);
11484 copylen = sizeof (adapter_data.StructureLength);
11485 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11486 }
11487 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11488 copylen, mode) != 0) {
11489 status = EFAULT;
11490 }
11491 break;
11492 case MPTIOCTL_GET_PCI_INFO:
11493 /*
11494 * The user has requested to read pci info. Call
11495 * our routine which does this.
11496 */
11497 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11498 mutex_enter(&mpt->m_mutex);
11499 mptsas_read_pci_info(mpt, &pci_info);
11500 mutex_exit(&mpt->m_mutex);
11501 if (ddi_copyout((void *)(&pci_info), (void *)data,
11502 sizeof (mptsas_pci_info_t), mode) != 0) {
11503 status = EFAULT;
11504 }
11505 break;
11506 case MPTIOCTL_RESET_ADAPTER:
11507 mutex_enter(&mpt->m_mutex);
11508 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11509 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11510 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11511 "failed");
11512 status = EFAULT;
11513 }
11514 mutex_exit(&mpt->m_mutex);
11515 break;
11516 case MPTIOCTL_DIAG_ACTION:
11517 /*
11518 * The user has done a diag buffer action. Call our
11519 * routine which does this. Only allow one diag action
11520 * at one time.
11521 */
11522 mutex_enter(&mpt->m_mutex);
11523 if (mpt->m_diag_action_in_progress) {
11524 mutex_exit(&mpt->m_mutex);
11525 status = EBUSY;
11526 goto out;
11527 }
11528 mpt->m_diag_action_in_progress = 1;
11529 status = mptsas_diag_action(mpt,
11530 (mptsas_diag_action_t *)data, mode);
11531 mpt->m_diag_action_in_progress = 0;
11532 mutex_exit(&mpt->m_mutex);
11533 break;
11534 case MPTIOCTL_EVENT_QUERY:
11535 /*
11536 * The user has done an event query. Call our routine
11537 * which does this.
11538 */
11539 status = mptsas_event_query(mpt,
11540 (mptsas_event_query_t *)data, mode, rval);
11541 break;
11542 case MPTIOCTL_EVENT_ENABLE:
11543 /*
11544 * The user has done an event enable. Call our routine
11545 * which does this.
11546 */
11547 status = mptsas_event_enable(mpt,
11548 (mptsas_event_enable_t *)data, mode, rval);
11549 break;
11550 case MPTIOCTL_EVENT_REPORT:
11551 /*
11552 * The user has done an event report. Call our routine
11553 * which does this.
11554 */
11555 status = mptsas_event_report(mpt,
11556 (mptsas_event_report_t *)data, mode, rval);
11557 break;
11558 case MPTIOCTL_REG_ACCESS:
11559 /*
11560 * The user has requested register access. Call our
11561 * routine which does this.
11562 */
11563 status = mptsas_reg_access(mpt,
11564 (mptsas_reg_access_t *)data, mode);
11565 break;
11566 default:
11567 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11568 rval);
11569 break;
11570 }
11571
11572 out:
11573 return (status);
11574 }
11575
11576 int
11577 mptsas_restart_ioc(mptsas_t *mpt)
11578 {
11579 int rval = DDI_SUCCESS;
11580 mptsas_target_t *ptgt = NULL;
11581
11582 ASSERT(mutex_owned(&mpt->m_mutex));
11583
11584 /*
11585 * Set a flag telling I/O path that we're processing a reset. This is
11586 * needed because after the reset is complete, the hash table still
11587 * needs to be rebuilt. If I/Os are started before the hash table is
11588 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11589 * so that they can be retried.
11590 */
11591 mpt->m_in_reset = TRUE;
11592
11593 /*
11594 * Set all throttles to HOLD
11595 */
11596 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11597 MPTSAS_HASH_FIRST);
11598 while (ptgt != NULL) {
11599 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11600
11601 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11602 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11603 }
11604
11605 /*
11606 * Disable interrupts
11607 */
11608 MPTSAS_DISABLE_INTR(mpt);
11609
11610 /*
11611 * Abort all commands: outstanding commands, commands in waitq and
11612 * tx_waitq.
11613 */
11614 mptsas_flush_hba(mpt);
11615
11616 /*
11617 * Reinitialize the chip.
11618 */
11619 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11620 rval = DDI_FAILURE;
11621 }
11622
11623 /*
11624 * Enable interrupts again
11625 */
11626 MPTSAS_ENABLE_INTR(mpt);
11627
11628 /*
11629 * If mptsas_init_chip was successful, update the driver data.
11630 */
11631 if (rval == DDI_SUCCESS) {
11632 mptsas_update_driver_data(mpt);
11633 }
11634
11635 /*
11636 * Reset the throttles
11637 */
11638 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11639 MPTSAS_HASH_FIRST);
11640 while (ptgt != NULL) {
11641 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11642
11643 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11644 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11645 }
11646
11647 mptsas_doneq_empty(mpt);
11648 mptsas_restart_hba(mpt);
11649
11650 if (rval != DDI_SUCCESS) {
11651 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11652 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11653 }
11654
11655 /*
11656 * Clear the reset flag so that I/Os can continue.
11657 */
11658 mpt->m_in_reset = FALSE;
11659
11660 return (rval);
11661 }
11662
11663 static int
11664 mptsas_init_chip(mptsas_t *mpt, int first_time)
11665 {
11666 ddi_dma_cookie_t cookie;
11667 uint32_t i;
11668 int rval;
11669
11670 /*
11671 * Check to see if the firmware image is valid
11672 */
11673 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11674 MPI2_DIAG_FLASH_BAD_SIG) {
11675 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11676 goto fail;
11677 }
11678
11679 /*
11680 * Reset the chip
11681 */
11682 rval = mptsas_ioc_reset(mpt, first_time);
11683 if (rval == MPTSAS_RESET_FAIL) {
11684 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11685 goto fail;
11686 }
11687
11688 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11689 goto mur;
11690 }
11691 /*
11692 * Setup configuration space
11693 */
11694 if (mptsas_config_space_init(mpt) == FALSE) {
11695 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11696 "failed!");
11697 goto fail;
11698 }
11699
11700 /*
11701 * IOC facts can change after a diag reset so all buffers that are
11702 * based on these numbers must be de-allocated and re-allocated. Get
11703 * new IOC facts each time chip is initialized.
11704 */
11705 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11706 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11707 goto fail;
11708 }
11709
11710 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11711 goto fail;
11712 }
11713 /*
11714 * Allocate request message frames, reply free queue, reply descriptor
11715 * post queue, and reply message frames using latest IOC facts.
11716 */
11717 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11718 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11719 goto fail;
11720 }
11721 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11722 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11723 goto fail;
11724 }
11725 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11726 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11727 goto fail;
11728 }
11729 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11730 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11731 goto fail;
11732 }
11733
11734 mur:
11735 /*
11736 * Re-Initialize ioc to operational state
11737 */
11738 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11739 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11740 goto fail;
11741 }
11742
11743 mptsas_alloc_reply_args(mpt);
11744
11745 /*
11746 * Initialize reply post index. Reply free index is initialized after
11747 * the next loop.
11748 */
11749 mpt->m_post_index = 0;
11750
11751 /*
11752 * Initialize the Reply Free Queue with the physical addresses of our
11753 * reply frames.
11754 */
11755 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11756 for (i = 0; i < mpt->m_max_replies; i++) {
11757 ddi_put32(mpt->m_acc_free_queue_hdl,
11758 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11759 cookie.dmac_address);
11760 cookie.dmac_address += mpt->m_reply_frame_size;
11761 }
11762 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11763 DDI_DMA_SYNC_FORDEV);
11764
11765 /*
11766 * Initialize the reply free index to one past the last frame on the
11767 * queue. This will signify that the queue is empty to start with.
11768 */
11769 mpt->m_free_index = i;
11770 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11771
11772 /*
11773 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11774 */
11775 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11776 ddi_put64(mpt->m_acc_post_queue_hdl,
11777 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11778 0xFFFFFFFFFFFFFFFF);
11779 }
11780 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11781 DDI_DMA_SYNC_FORDEV);
11782
11783 /*
11784 * Enable ports
11785 */
11786 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11787 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11788 goto fail;
11789 }
11790
11791 /*
11792 * enable events
11793 */
11794 if (mptsas_ioc_enable_event_notification(mpt)) {
11795 goto fail;
11796 }
11797
11798 /*
11799 * We need checks in attach and these.
11800 * chip_init is called in mult. places
11801 */
11802
11803 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11804 DDI_SUCCESS) ||
11805 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11806 DDI_SUCCESS) ||
11807 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11808 DDI_SUCCESS) ||
11809 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11810 DDI_SUCCESS) ||
11811 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11812 DDI_SUCCESS)) {
11813 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11814 goto fail;
11815 }
11816
11817 /* Check all acc handles */
11818 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11819 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11820 DDI_SUCCESS) ||
11821 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11822 DDI_SUCCESS) ||
11823 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11824 DDI_SUCCESS) ||
11825 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11826 DDI_SUCCESS) ||
11827 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11828 DDI_SUCCESS) ||
11829 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11830 DDI_SUCCESS)) {
11831 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11832 goto fail;
11833 }
11834
11835 return (DDI_SUCCESS);
11836
11837 fail:
11838 return (DDI_FAILURE);
11839 }
11840
11841 static int
11842 mptsas_get_pci_cap(mptsas_t *mpt)
11843 {
11844 ushort_t caps_ptr, cap, cap_count;
11845
11846 if (mpt->m_config_handle == NULL)
11847 return (FALSE);
11848 /*
11849 * Check if capabilities list is supported and if so,
11850 * get initial capabilities pointer and clear bits 0,1.
11851 */
11852 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
11853 & PCI_STAT_CAP) {
11854 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11855 PCI_CONF_CAP_PTR), 4);
11856 } else {
11857 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
11858 }
11859
11860 /*
11861 * Walk capabilities if supported.
11862 */
11863 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
11864
11865 /*
11866 * Check that we haven't exceeded the maximum number of
11867 * capabilities and that the pointer is in a valid range.
11868 */
11869 if (++cap_count > 48) {
11870 mptsas_log(mpt, CE_WARN,
11871 "too many device capabilities.\n");
11872 break;
11873 }
11874 if (caps_ptr < 64) {
11875 mptsas_log(mpt, CE_WARN,
11876 "capabilities pointer 0x%x out of range.\n",
11877 caps_ptr);
11878 break;
11879 }
11880
11881 /*
11882 * Get next capability and check that it is valid.
11883 * For now, we only support power management.
11884 */
11885 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
11886 switch (cap) {
11887 case PCI_CAP_ID_PM:
11888 mptsas_log(mpt, CE_NOTE,
11889 "?mptsas%d supports power management.\n",
11890 mpt->m_instance);
11891 mpt->m_options |= MPTSAS_OPT_PM;
11892
11893 /* Save PMCSR offset */
11894 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
11895 break;
11896 /*
11897 * The following capabilities are valid. Any others
11898 * will cause a message to be logged.
11899 */
11900 case PCI_CAP_ID_VPD:
11901 case PCI_CAP_ID_MSI:
11902 case PCI_CAP_ID_PCIX:
11903 case PCI_CAP_ID_PCI_E:
11904 case PCI_CAP_ID_MSI_X:
11905 break;
11906 default:
11907 mptsas_log(mpt, CE_NOTE,
11908 "?mptsas%d unrecognized capability "
11909 "0x%x.\n", mpt->m_instance, cap);
11910 break;
11911 }
11912
11913 /*
11914 * Get next capabilities pointer and clear bits 0,1.
11915 */
11916 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11917 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
11918 }
11919 return (TRUE);
11920 }
11921
11922 static int
11923 mptsas_init_pm(mptsas_t *mpt)
11924 {
11925 char pmc_name[16];
11926 char *pmc[] = {
11927 NULL,
11928 "0=Off (PCI D3 State)",
11929 "3=On (PCI D0 State)",
11930 NULL
11931 };
11932 uint16_t pmcsr_stat;
11933
11934 if (mptsas_get_pci_cap(mpt) == FALSE) {
11935 return (DDI_FAILURE);
11936 }
11937 /*
11938 * If PCI's capability does not support PM, then don't need
11939 * to registe the pm-components
11940 */
11941 if (!(mpt->m_options & MPTSAS_OPT_PM))
11942 return (DDI_SUCCESS);
11943 /*
11944 * If power management is supported by this chip, create
11945 * pm-components property for the power management framework
11946 */
11947 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
11948 pmc[0] = pmc_name;
11949 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
11950 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
11951 mpt->m_options &= ~MPTSAS_OPT_PM;
11952 mptsas_log(mpt, CE_WARN,
11953 "mptsas%d: pm-component property creation failed.",
11954 mpt->m_instance);
11955 return (DDI_FAILURE);
11956 }
11957
11958 /*
11959 * Power on device.
11960 */
11961 (void) pm_busy_component(mpt->m_dip, 0);
11962 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
11963 mpt->m_pmcsr_offset);
11964 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
11965 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
11966 mpt->m_instance);
11967 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
11968 PCI_PMCSR_D0);
11969 }
11970 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
11971 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
11972 return (DDI_FAILURE);
11973 }
11974 mpt->m_power_level = PM_LEVEL_D0;
11975 /*
11976 * Set pm idle delay.
11977 */
11978 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
11979 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
11980
11981 return (DDI_SUCCESS);
11982 }
11983
11984 static int
11985 mptsas_register_intrs(mptsas_t *mpt)
11986 {
11987 dev_info_t *dip;
11988 int intr_types;
11989
11990 dip = mpt->m_dip;
11991
11992 /* Get supported interrupt types */
11993 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
11994 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
11995 "failed\n");
11996 return (FALSE);
11997 }
11998
11999 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12000
12001 /*
12002 * Try MSI, but fall back to FIXED
12003 */
12004 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12005 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12006 NDBG0(("Using MSI interrupt type"));
12007 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12008 return (TRUE);
12009 }
12010 }
12011 if (intr_types & DDI_INTR_TYPE_FIXED) {
12012 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12013 NDBG0(("Using FIXED interrupt type"));
12014 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12015 return (TRUE);
12016 } else {
12017 NDBG0(("FIXED interrupt registration failed"));
12018 return (FALSE);
12019 }
12020 }
12021
12022 return (FALSE);
12023 }
12024
12025 static void
12026 mptsas_unregister_intrs(mptsas_t *mpt)
12027 {
12028 mptsas_rem_intrs(mpt);
12029 }
12030
12031 /*
12032 * mptsas_add_intrs:
12033 *
12034 * Register FIXED or MSI interrupts.
12035 */
12036 static int
12037 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12038 {
12039 dev_info_t *dip = mpt->m_dip;
12040 int avail, actual, count = 0;
12041 int i, flag, ret;
12042
12043 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12044
12045 /* Get number of interrupts */
12046 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12047 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12048 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12049 "ret %d count %d\n", ret, count);
12050
12051 return (DDI_FAILURE);
12052 }
12053
12054 /* Get number of available interrupts */
12055 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12056 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12057 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12058 "ret %d avail %d\n", ret, avail);
12059
12060 return (DDI_FAILURE);
12061 }
12062
12063 if (avail < count) {
12064 mptsas_log(mpt, CE_CONT, "!ddi_intr_get_nvail returned %d, "
12065 "navail() returned %d", count, avail);
12066 }
12067
12068 /* Mpt only have one interrupt routine */
12069 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12070 count = 1;
12071 }
12072
12073 /* Allocate an array of interrupt handles */
12074 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12075 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12076
12077 flag = DDI_INTR_ALLOC_NORMAL;
12078
12079 /* call ddi_intr_alloc() */
12080 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12081 count, &actual, flag);
12082
12083 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12084 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12085 ret);
12086 kmem_free(mpt->m_htable, mpt->m_intr_size);
12087 return (DDI_FAILURE);
12088 }
12089
12090 /* use interrupt count returned or abort? */
12091 if (actual < count) {
12092 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12093 count, actual);
12094 }
12095
12096 mpt->m_intr_cnt = actual;
12097
12098 /*
12099 * Get priority for first msi, assume remaining are all the same
12100 */
12101 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12102 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12103 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12104
12105 /* Free already allocated intr */
12106 for (i = 0; i < actual; i++) {
12107 (void) ddi_intr_free(mpt->m_htable[i]);
12108 }
12109
12110 kmem_free(mpt->m_htable, mpt->m_intr_size);
12111 return (DDI_FAILURE);
12112 }
12113
12114 /* Test for high level mutex */
12115 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12116 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12117 "Hi level interrupt not supported\n");
12118
12119 /* Free already allocated intr */
12120 for (i = 0; i < actual; i++) {
12121 (void) ddi_intr_free(mpt->m_htable[i]);
12122 }
12123
12124 kmem_free(mpt->m_htable, mpt->m_intr_size);
12125 return (DDI_FAILURE);
12126 }
12127
12128 /* Call ddi_intr_add_handler() */
12129 for (i = 0; i < actual; i++) {
12130 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12131 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12132 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12133 "failed %d\n", ret);
12134
12135 /* Free already allocated intr */
12136 for (i = 0; i < actual; i++) {
12137 (void) ddi_intr_free(mpt->m_htable[i]);
12138 }
12139
12140 kmem_free(mpt->m_htable, mpt->m_intr_size);
12141 return (DDI_FAILURE);
12142 }
12143 }
12144
12145 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12146 != DDI_SUCCESS) {
12147 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12148
12149 /* Free already allocated intr */
12150 for (i = 0; i < actual; i++) {
12151 (void) ddi_intr_free(mpt->m_htable[i]);
12152 }
12153
12154 kmem_free(mpt->m_htable, mpt->m_intr_size);
12155 return (DDI_FAILURE);
12156 }
12157
12158 /*
12159 * Enable interrupts
12160 */
12161 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12162 /* Call ddi_intr_block_enable() for MSI interrupts */
12163 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12164 } else {
12165 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12166 for (i = 0; i < mpt->m_intr_cnt; i++) {
12167 (void) ddi_intr_enable(mpt->m_htable[i]);
12168 }
12169 }
12170 return (DDI_SUCCESS);
12171 }
12172
12173 /*
12174 * mptsas_rem_intrs:
12175 *
12176 * Unregister FIXED or MSI interrupts
12177 */
12178 static void
12179 mptsas_rem_intrs(mptsas_t *mpt)
12180 {
12181 int i;
12182
12183 NDBG6(("mptsas_rem_intrs"));
12184
12185 /* Disable all interrupts */
12186 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12187 /* Call ddi_intr_block_disable() */
12188 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12189 } else {
12190 for (i = 0; i < mpt->m_intr_cnt; i++) {
12191 (void) ddi_intr_disable(mpt->m_htable[i]);
12192 }
12193 }
12194
12195 /* Call ddi_intr_remove_handler() */
12196 for (i = 0; i < mpt->m_intr_cnt; i++) {
12197 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12198 (void) ddi_intr_free(mpt->m_htable[i]);
12199 }
12200
12201 kmem_free(mpt->m_htable, mpt->m_intr_size);
12202 }
12203
12204 /*
12205 * The IO fault service error handling callback function
12206 */
12207 /*ARGSUSED*/
12208 static int
12209 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12210 {
12211 /*
12212 * as the driver can always deal with an error in any dma or
12213 * access handle, we can just return the fme_status value.
12214 */
12215 pci_ereport_post(dip, err, NULL);
12216 return (err->fme_status);
12217 }
12218
12219 /*
12220 * mptsas_fm_init - initialize fma capabilities and register with IO
12221 * fault services.
12222 */
12223 static void
12224 mptsas_fm_init(mptsas_t *mpt)
12225 {
12226 /*
12227 * Need to change iblock to priority for new MSI intr
12228 */
12229 ddi_iblock_cookie_t fm_ibc;
12230
12231 /* Only register with IO Fault Services if we have some capability */
12232 if (mpt->m_fm_capabilities) {
12233 /* Adjust access and dma attributes for FMA */
12234 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12235 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12236 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12237
12238 /*
12239 * Register capabilities with IO Fault Services.
12240 * mpt->m_fm_capabilities will be updated to indicate
12241 * capabilities actually supported (not requested.)
12242 */
12243 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12244
12245 /*
12246 * Initialize pci ereport capabilities if ereport
12247 * capable (should always be.)
12248 */
12249 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12250 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12251 pci_ereport_setup(mpt->m_dip);
12252 }
12253
12254 /*
12255 * Register error callback if error callback capable.
12256 */
12257 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12258 ddi_fm_handler_register(mpt->m_dip,
12259 mptsas_fm_error_cb, (void *) mpt);
12260 }
12261 }
12262 }
12263
12264 /*
12265 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12266 * fault services.
12267 *
12268 */
12269 static void
12270 mptsas_fm_fini(mptsas_t *mpt)
12271 {
12272 /* Only unregister FMA capabilities if registered */
12273 if (mpt->m_fm_capabilities) {
12274
12275 /*
12276 * Un-register error callback if error callback capable.
12277 */
12278
12279 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12280 ddi_fm_handler_unregister(mpt->m_dip);
12281 }
12282
12283 /*
12284 * Release any resources allocated by pci_ereport_setup()
12285 */
12286
12287 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12288 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12289 pci_ereport_teardown(mpt->m_dip);
12290 }
12291
12292 /* Unregister from IO Fault Services */
12293 ddi_fm_fini(mpt->m_dip);
12294
12295 /* Adjust access and dma attributes for FMA */
12296 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12297 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12298 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12299
12300 }
12301 }
12302
12303 int
12304 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12305 {
12306 ddi_fm_error_t de;
12307
12308 if (handle == NULL)
12309 return (DDI_FAILURE);
12310 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12311 return (de.fme_status);
12312 }
12313
12314 int
12315 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12316 {
12317 ddi_fm_error_t de;
12318
12319 if (handle == NULL)
12320 return (DDI_FAILURE);
12321 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12322 return (de.fme_status);
12323 }
12324
12325 void
12326 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12327 {
12328 uint64_t ena;
12329 char buf[FM_MAX_CLASS];
12330
12331 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12332 ena = fm_ena_generate(0, FM_ENA_FMT1);
12333 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12334 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12335 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12336 }
12337 }
12338
12339 static int
12340 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12341 uint16_t *dev_handle, mptsas_target_t **pptgt)
12342 {
12343 int rval;
12344 uint32_t dev_info;
12345 uint64_t sas_wwn;
12346 mptsas_phymask_t phymask;
12347 uint8_t physport, phynum, config, disk;
12348 mptsas_slots_t *slots = mpt->m_active;
12349 uint64_t devicename;
12350 uint16_t pdev_hdl;
12351 mptsas_target_t *tmp_tgt = NULL;
12352 uint16_t bay_num, enclosure;
12353
12354 ASSERT(*pptgt == NULL);
12355
12356 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12357 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12358 &bay_num, &enclosure);
12359 if (rval != DDI_SUCCESS) {
12360 rval = DEV_INFO_FAIL_PAGE0;
12361 return (rval);
12362 }
12363
12364 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12365 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12366 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12367 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12368 return (rval);
12369 }
12370
12371 /*
12372 * Check if the dev handle is for a Phys Disk. If so, set return value
12373 * and exit. Don't add Phys Disks to hash.
12374 */
12375 for (config = 0; config < slots->m_num_raid_configs; config++) {
12376 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12377 if (*dev_handle == slots->m_raidconfig[config].
12378 m_physdisk_devhdl[disk]) {
12379 rval = DEV_INFO_PHYS_DISK;
12380 return (rval);
12381 }
12382 }
12383 }
12384
12385 /*
12386 * Get SATA Device Name from SAS device page0 for
12387 * sata device, if device name doesn't exist, set m_sas_wwn to
12388 * 0 for direct attached SATA. For the device behind the expander
12389 * we still can use STP address assigned by expander.
12390 */
12391 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12392 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12393 mutex_exit(&mpt->m_mutex);
12394 /* alloc a tmp_tgt to send the cmd */
12395 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12396 KM_SLEEP);
12397 tmp_tgt->m_devhdl = *dev_handle;
12398 tmp_tgt->m_deviceinfo = dev_info;
12399 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12400 tmp_tgt->m_qfull_retry_interval =
12401 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12402 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12403 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12404 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12405 mutex_enter(&mpt->m_mutex);
12406 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12407 sas_wwn = devicename;
12408 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12409 sas_wwn = 0;
12410 }
12411 }
12412
12413 phymask = mptsas_physport_to_phymask(mpt, physport);
12414 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12415 dev_info, phymask, phynum);
12416 if (*pptgt == NULL) {
12417 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12418 "structure!");
12419 rval = DEV_INFO_FAIL_ALLOC;
12420 return (rval);
12421 }
12422 (*pptgt)->m_enclosure = enclosure;
12423 (*pptgt)->m_slot_num = bay_num;
12424 return (DEV_INFO_SUCCESS);
12425 }
12426
12427 uint64_t
12428 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12429 {
12430 uint64_t sata_guid = 0, *pwwn = NULL;
12431 int target = ptgt->m_devhdl;
12432 uchar_t *inq83 = NULL;
12433 int inq83_len = 0xFF;
12434 uchar_t *dblk = NULL;
12435 int inq83_retry = 3;
12436 int rval = DDI_FAILURE;
12437
12438 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12439
12440 inq83_retry:
12441 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12442 inq83_len, NULL, 1);
12443 if (rval != DDI_SUCCESS) {
12444 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12445 "0x83 for target:%x, lun:%x failed!", target, lun);
12446 goto out;
12447 }
12448 /* According to SAT2, the first descriptor is logic unit name */
12449 dblk = &inq83[4];
12450 if ((dblk[1] & 0x30) != 0) {
12451 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12452 goto out;
12453 }
12454 pwwn = (uint64_t *)(void *)(&dblk[4]);
12455 if ((dblk[4] & 0xf0) == 0x50) {
12456 sata_guid = BE_64(*pwwn);
12457 goto out;
12458 } else if (dblk[4] == 'A') {
12459 NDBG20(("SATA drive has no NAA format GUID."));
12460 goto out;
12461 } else {
12462 /* The data is not ready, wait and retry */
12463 inq83_retry--;
12464 if (inq83_retry <= 0) {
12465 goto out;
12466 }
12467 NDBG20(("The GUID is not ready, retry..."));
12468 delay(1 * drv_usectohz(1000000));
12469 goto inq83_retry;
12470 }
12471 out:
12472 kmem_free(inq83, inq83_len);
12473 return (sata_guid);
12474 }
12475
12476 static int
12477 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12478 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12479 {
12480 uchar_t cdb[CDB_GROUP0];
12481 struct scsi_address ap;
12482 struct buf *data_bp = NULL;
12483 int resid = 0;
12484 int ret = DDI_FAILURE;
12485
12486 ASSERT(len <= 0xffff);
12487
12488 ap.a_target = MPTSAS_INVALID_DEVHDL;
12489 ap.a_lun = (uchar_t)(lun);
12490 ap.a_hba_tran = mpt->m_tran;
12491
12492 data_bp = scsi_alloc_consistent_buf(&ap,
12493 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12494 if (data_bp == NULL) {
12495 return (ret);
12496 }
12497 bzero(cdb, CDB_GROUP0);
12498 cdb[0] = SCMD_INQUIRY;
12499 cdb[1] = evpd;
12500 cdb[2] = page;
12501 cdb[3] = (len & 0xff00) >> 8;
12502 cdb[4] = (len & 0x00ff);
12503 cdb[5] = 0;
12504
12505 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12506 &resid);
12507 if (ret == DDI_SUCCESS) {
12508 if (reallen) {
12509 *reallen = len - resid;
12510 }
12511 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12512 }
12513 if (data_bp) {
12514 scsi_free_consistent_buf(data_bp);
12515 }
12516 return (ret);
12517 }
12518
12519 static int
12520 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12521 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12522 int *resid)
12523 {
12524 struct scsi_pkt *pktp = NULL;
12525 scsi_hba_tran_t *tran_clone = NULL;
12526 mptsas_tgt_private_t *tgt_private = NULL;
12527 int ret = DDI_FAILURE;
12528
12529 /*
12530 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12531 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12532 * to simulate the cmds from sd
12533 */
12534 tran_clone = kmem_alloc(
12535 sizeof (scsi_hba_tran_t), KM_SLEEP);
12536 if (tran_clone == NULL) {
12537 goto out;
12538 }
12539 bcopy((caddr_t)mpt->m_tran,
12540 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12541 tgt_private = kmem_alloc(
12542 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12543 if (tgt_private == NULL) {
12544 goto out;
12545 }
12546 tgt_private->t_lun = ap->a_lun;
12547 tgt_private->t_private = ptgt;
12548 tran_clone->tran_tgt_private = tgt_private;
12549 ap->a_hba_tran = tran_clone;
12550
12551 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12552 data_bp, cdblen, sizeof (struct scsi_arq_status),
12553 0, PKT_CONSISTENT, NULL, NULL);
12554 if (pktp == NULL) {
12555 goto out;
12556 }
12557 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12558 pktp->pkt_flags = FLAG_NOPARITY;
12559 if (scsi_poll(pktp) < 0) {
12560 goto out;
12561 }
12562 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12563 goto out;
12564 }
12565 if (resid != NULL) {
12566 *resid = pktp->pkt_resid;
12567 }
12568
12569 ret = DDI_SUCCESS;
12570 out:
12571 if (pktp) {
12572 scsi_destroy_pkt(pktp);
12573 }
12574 if (tran_clone) {
12575 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12576 }
12577 if (tgt_private) {
12578 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12579 }
12580 return (ret);
12581 }
12582 static int
12583 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12584 {
12585 char *cp = NULL;
12586 char *ptr = NULL;
12587 size_t s = 0;
12588 char *wwid_str = NULL;
12589 char *lun_str = NULL;
12590 long lunnum;
12591 long phyid = -1;
12592 int rc = DDI_FAILURE;
12593
12594 ptr = name;
12595 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12596 ptr++;
12597 if ((cp = strchr(ptr, ',')) == NULL) {
12598 return (DDI_FAILURE);
12599 }
12600
12601 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12602 s = (uintptr_t)cp - (uintptr_t)ptr;
12603
12604 bcopy(ptr, wwid_str, s);
12605 wwid_str[s] = '\0';
12606
12607 ptr = ++cp;
12608
12609 if ((cp = strchr(ptr, '\0')) == NULL) {
12610 goto out;
12611 }
12612 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12613 s = (uintptr_t)cp - (uintptr_t)ptr;
12614
12615 bcopy(ptr, lun_str, s);
12616 lun_str[s] = '\0';
12617
12618 if (name[0] == 'p') {
12619 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12620 } else {
12621 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12622 }
12623 if (rc != DDI_SUCCESS)
12624 goto out;
12625
12626 if (phyid != -1) {
12627 ASSERT(phyid < MPTSAS_MAX_PHYS);
12628 *phy = (uint8_t)phyid;
12629 }
12630 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12631 if (rc != 0)
12632 goto out;
12633
12634 *lun = (int)lunnum;
12635 rc = DDI_SUCCESS;
12636 out:
12637 if (wwid_str)
12638 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12639 if (lun_str)
12640 kmem_free(lun_str, SCSI_MAXNAMELEN);
12641
12642 return (rc);
12643 }
12644
12645 /*
12646 * mptsas_parse_smp_name() is to parse sas wwn string
12647 * which format is "wWWN"
12648 */
12649 static int
12650 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12651 {
12652 char *ptr = name;
12653
12654 if (*ptr != 'w') {
12655 return (DDI_FAILURE);
12656 }
12657
12658 ptr++;
12659 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12660 return (DDI_FAILURE);
12661 }
12662 return (DDI_SUCCESS);
12663 }
12664
12665 static int
12666 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12667 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12668 {
12669 int ret = NDI_FAILURE;
12670 int circ = 0;
12671 int circ1 = 0;
12672 mptsas_t *mpt;
12673 char *ptr = NULL;
12674 char *devnm = NULL;
12675 uint64_t wwid = 0;
12676 uint8_t phy = 0xFF;
12677 int lun = 0;
12678 uint_t mflags = flag;
12679 int bconfig = TRUE;
12680
12681 if (scsi_hba_iport_unit_address(pdip) == 0) {
12682 return (DDI_FAILURE);
12683 }
12684
12685 mpt = DIP2MPT(pdip);
12686 if (!mpt) {
12687 return (DDI_FAILURE);
12688 }
12689 /*
12690 * Hold the nexus across the bus_config
12691 */
12692 ndi_devi_enter(scsi_vhci_dip, &circ);
12693 ndi_devi_enter(pdip, &circ1);
12694 switch (op) {
12695 case BUS_CONFIG_ONE:
12696 /* parse wwid/target name out of name given */
12697 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12698 ret = NDI_FAILURE;
12699 break;
12700 }
12701 ptr++;
12702 if (strncmp((char *)arg, "smp", 3) == 0) {
12703 /*
12704 * This is a SMP target device
12705 */
12706 ret = mptsas_parse_smp_name(ptr, &wwid);
12707 if (ret != DDI_SUCCESS) {
12708 ret = NDI_FAILURE;
12709 break;
12710 }
12711 ret = mptsas_config_smp(pdip, wwid, childp);
12712 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12713 /*
12714 * OBP could pass down a non-canonical form
12715 * bootpath without LUN part when LUN is 0.
12716 * So driver need adjust the string.
12717 */
12718 if (strchr(ptr, ',') == NULL) {
12719 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12720 (void) sprintf(devnm, "%s,0", (char *)arg);
12721 ptr = strchr(devnm, '@');
12722 ptr++;
12723 }
12724
12725 /*
12726 * The device path is wWWID format and the device
12727 * is not SMP target device.
12728 */
12729 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12730 if (ret != DDI_SUCCESS) {
12731 ret = NDI_FAILURE;
12732 break;
12733 }
12734 *childp = NULL;
12735 if (ptr[0] == 'w') {
12736 ret = mptsas_config_one_addr(pdip, wwid,
12737 lun, childp);
12738 } else if (ptr[0] == 'p') {
12739 ret = mptsas_config_one_phy(pdip, phy, lun,
12740 childp);
12741 }
12742
12743 /*
12744 * If this is CD/DVD device in OBP path, the
12745 * ndi_busop_bus_config can be skipped as config one
12746 * operation is done above.
12747 */
12748 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12749 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12750 (strncmp((char *)arg, "disk", 4) == 0)) {
12751 bconfig = FALSE;
12752 ndi_hold_devi(*childp);
12753 }
12754 } else {
12755 ret = NDI_FAILURE;
12756 break;
12757 }
12758
12759 /*
12760 * DDI group instructed us to use this flag.
12761 */
12762 mflags |= NDI_MDI_FALLBACK;
12763 break;
12764 case BUS_CONFIG_DRIVER:
12765 case BUS_CONFIG_ALL:
12766 mptsas_config_all(pdip);
12767 ret = NDI_SUCCESS;
12768 break;
12769 }
12770
12771 if ((ret == NDI_SUCCESS) && bconfig) {
12772 ret = ndi_busop_bus_config(pdip, mflags, op,
12773 (devnm == NULL) ? arg : devnm, childp, 0);
12774 }
12775
12776 ndi_devi_exit(pdip, circ1);
12777 ndi_devi_exit(scsi_vhci_dip, circ);
12778 if (devnm != NULL)
12779 kmem_free(devnm, SCSI_MAXNAMELEN);
12780 return (ret);
12781 }
12782
12783 static int
12784 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12785 mptsas_target_t *ptgt)
12786 {
12787 int rval = DDI_FAILURE;
12788 struct scsi_inquiry *sd_inq = NULL;
12789 mptsas_t *mpt = DIP2MPT(pdip);
12790
12791 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12792
12793 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12794 SUN_INQSIZE, 0, (uchar_t)0);
12795
12796 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12797 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12798 } else {
12799 rval = DDI_FAILURE;
12800 }
12801
12802 kmem_free(sd_inq, SUN_INQSIZE);
12803 return (rval);
12804 }
12805
12806 static int
12807 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12808 dev_info_t **lundip)
12809 {
12810 int rval;
12811 mptsas_t *mpt = DIP2MPT(pdip);
12812 int phymask;
12813 mptsas_target_t *ptgt = NULL;
12814
12815 /*
12816 * Get the physical port associated to the iport
12817 */
12818 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12819 "phymask", 0);
12820
12821 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12822 if (ptgt == NULL) {
12823 /*
12824 * didn't match any device by searching
12825 */
12826 return (DDI_FAILURE);
12827 }
12828 /*
12829 * If the LUN already exists and the status is online,
12830 * we just return the pointer to dev_info_t directly.
12831 * For the mdi_pathinfo node, we'll handle it in
12832 * mptsas_create_virt_lun()
12833 * TODO should be also in mptsas_handle_dr
12834 */
12835
12836 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12837 if (*lundip != NULL) {
12838 /*
12839 * TODO Another senario is, we hotplug the same disk
12840 * on the same slot, the devhdl changed, is this
12841 * possible?
12842 * tgt_private->t_private != ptgt
12843 */
12844 if (sasaddr != ptgt->m_sas_wwn) {
12845 /*
12846 * The device has changed although the devhdl is the
12847 * same (Enclosure mapping mode, change drive on the
12848 * same slot)
12849 */
12850 return (DDI_FAILURE);
12851 }
12852 return (DDI_SUCCESS);
12853 }
12854
12855 if (phymask == 0) {
12856 /*
12857 * Configure IR volume
12858 */
12859 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
12860 return (rval);
12861 }
12862 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12863
12864 return (rval);
12865 }
12866
12867 static int
12868 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
12869 dev_info_t **lundip)
12870 {
12871 int rval;
12872 mptsas_t *mpt = DIP2MPT(pdip);
12873 int phymask;
12874 mptsas_target_t *ptgt = NULL;
12875
12876 /*
12877 * Get the physical port associated to the iport
12878 */
12879 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12880 "phymask", 0);
12881
12882 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
12883 if (ptgt == NULL) {
12884 /*
12885 * didn't match any device by searching
12886 */
12887 return (DDI_FAILURE);
12888 }
12889
12890 /*
12891 * If the LUN already exists and the status is online,
12892 * we just return the pointer to dev_info_t directly.
12893 * For the mdi_pathinfo node, we'll handle it in
12894 * mptsas_create_virt_lun().
12895 */
12896
12897 *lundip = mptsas_find_child_phy(pdip, phy);
12898 if (*lundip != NULL) {
12899 return (DDI_SUCCESS);
12900 }
12901
12902 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12903
12904 return (rval);
12905 }
12906
12907 static int
12908 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
12909 uint8_t *lun_addr_type)
12910 {
12911 uint32_t lun_idx = 0;
12912
12913 ASSERT(lun_num != NULL);
12914 ASSERT(lun_addr_type != NULL);
12915
12916 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
12917 /* determine report luns addressing type */
12918 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
12919 /*
12920 * Vendors in the field have been found to be concatenating
12921 * bus/target/lun to equal the complete lun value instead
12922 * of switching to flat space addressing
12923 */
12924 /* 00b - peripheral device addressing method */
12925 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
12926 /* FALLTHRU */
12927 /* 10b - logical unit addressing method */
12928 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
12929 /* FALLTHRU */
12930 /* 01b - flat space addressing method */
12931 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
12932 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
12933 *lun_addr_type = (buf[lun_idx] &
12934 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
12935 *lun_num = (buf[lun_idx] & 0x3F) << 8;
12936 *lun_num |= buf[lun_idx + 1];
12937 return (DDI_SUCCESS);
12938 default:
12939 return (DDI_FAILURE);
12940 }
12941 }
12942
12943 static int
12944 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
12945 {
12946 struct buf *repluns_bp = NULL;
12947 struct scsi_address ap;
12948 uchar_t cdb[CDB_GROUP5];
12949 int ret = DDI_FAILURE;
12950 int retry = 0;
12951 int lun_list_len = 0;
12952 uint16_t lun_num = 0;
12953 uint8_t lun_addr_type = 0;
12954 uint32_t lun_cnt = 0;
12955 uint32_t lun_total = 0;
12956 dev_info_t *cdip = NULL;
12957 uint16_t *saved_repluns = NULL;
12958 char *buffer = NULL;
12959 int buf_len = 128;
12960 mptsas_t *mpt = DIP2MPT(pdip);
12961 uint64_t sas_wwn = 0;
12962 uint8_t phy = 0xFF;
12963 uint32_t dev_info = 0;
12964
12965 mutex_enter(&mpt->m_mutex);
12966 sas_wwn = ptgt->m_sas_wwn;
12967 phy = ptgt->m_phynum;
12968 dev_info = ptgt->m_deviceinfo;
12969 mutex_exit(&mpt->m_mutex);
12970
12971 if (sas_wwn == 0) {
12972 /*
12973 * It's a SATA without Device Name
12974 * So don't try multi-LUNs
12975 */
12976 if (mptsas_find_child_phy(pdip, phy)) {
12977 return (DDI_SUCCESS);
12978 } else {
12979 /*
12980 * need configure and create node
12981 */
12982 return (DDI_FAILURE);
12983 }
12984 }
12985
12986 /*
12987 * WWN (SAS address or Device Name exist)
12988 */
12989 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12990 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12991 /*
12992 * SATA device with Device Name
12993 * So don't try multi-LUNs
12994 */
12995 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
12996 return (DDI_SUCCESS);
12997 } else {
12998 return (DDI_FAILURE);
12999 }
13000 }
13001
13002 do {
13003 ap.a_target = MPTSAS_INVALID_DEVHDL;
13004 ap.a_lun = 0;
13005 ap.a_hba_tran = mpt->m_tran;
13006 repluns_bp = scsi_alloc_consistent_buf(&ap,
13007 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13008 if (repluns_bp == NULL) {
13009 retry++;
13010 continue;
13011 }
13012 bzero(cdb, CDB_GROUP5);
13013 cdb[0] = SCMD_REPORT_LUNS;
13014 cdb[6] = (buf_len & 0xff000000) >> 24;
13015 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13016 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13017 cdb[9] = (buf_len & 0x000000ff);
13018
13019 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13020 repluns_bp, NULL);
13021 if (ret != DDI_SUCCESS) {
13022 scsi_free_consistent_buf(repluns_bp);
13023 retry++;
13024 continue;
13025 }
13026 lun_list_len = BE_32(*(int *)((void *)(
13027 repluns_bp->b_un.b_addr)));
13028 if (buf_len >= lun_list_len + 8) {
13029 ret = DDI_SUCCESS;
13030 break;
13031 }
13032 scsi_free_consistent_buf(repluns_bp);
13033 buf_len = lun_list_len + 8;
13034
13035 } while (retry < 3);
13036
13037 if (ret != DDI_SUCCESS)
13038 return (ret);
13039 buffer = (char *)repluns_bp->b_un.b_addr;
13040 /*
13041 * find out the number of luns returned by the SCSI ReportLun call
13042 * and allocate buffer space
13043 */
13044 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13045 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13046 if (saved_repluns == NULL) {
13047 scsi_free_consistent_buf(repluns_bp);
13048 return (DDI_FAILURE);
13049 }
13050 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13051 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13052 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13053 continue;
13054 }
13055 saved_repluns[lun_cnt] = lun_num;
13056 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13057 ret = DDI_SUCCESS;
13058 else
13059 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13060 ptgt);
13061 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13062 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13063 MPTSAS_DEV_GONE);
13064 }
13065 }
13066 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13067 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13068 scsi_free_consistent_buf(repluns_bp);
13069 return (DDI_SUCCESS);
13070 }
13071
13072 static int
13073 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13074 {
13075 int rval = DDI_FAILURE;
13076 struct scsi_inquiry *sd_inq = NULL;
13077 mptsas_t *mpt = DIP2MPT(pdip);
13078 mptsas_target_t *ptgt = NULL;
13079
13080 mutex_enter(&mpt->m_mutex);
13081 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13082 mutex_exit(&mpt->m_mutex);
13083 if (ptgt == NULL) {
13084 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13085 "not found.", target);
13086 return (rval);
13087 }
13088
13089 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13090 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13091 SUN_INQSIZE, 0, (uchar_t)0);
13092
13093 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13094 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13095 0);
13096 } else {
13097 rval = DDI_FAILURE;
13098 }
13099
13100 kmem_free(sd_inq, SUN_INQSIZE);
13101 return (rval);
13102 }
13103
13104 /*
13105 * configure all RAID volumes for virtual iport
13106 */
13107 static void
13108 mptsas_config_all_viport(dev_info_t *pdip)
13109 {
13110 mptsas_t *mpt = DIP2MPT(pdip);
13111 int config, vol;
13112 int target;
13113 dev_info_t *lundip = NULL;
13114 mptsas_slots_t *slots = mpt->m_active;
13115
13116 /*
13117 * Get latest RAID info and search for any Volume DevHandles. If any
13118 * are found, configure the volume.
13119 */
13120 mutex_enter(&mpt->m_mutex);
13121 for (config = 0; config < slots->m_num_raid_configs; config++) {
13122 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13123 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13124 == 1) {
13125 target = slots->m_raidconfig[config].
13126 m_raidvol[vol].m_raidhandle;
13127 mutex_exit(&mpt->m_mutex);
13128 (void) mptsas_config_raid(pdip, target,
13129 &lundip);
13130 mutex_enter(&mpt->m_mutex);
13131 }
13132 }
13133 }
13134 mutex_exit(&mpt->m_mutex);
13135 }
13136
13137 static void
13138 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13139 int lun_cnt, mptsas_target_t *ptgt)
13140 {
13141 dev_info_t *child = NULL, *savechild = NULL;
13142 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13143 uint64_t sas_wwn, wwid;
13144 uint8_t phy;
13145 int lun;
13146 int i;
13147 int find;
13148 char *addr;
13149 char *nodename;
13150 mptsas_t *mpt = DIP2MPT(pdip);
13151
13152 mutex_enter(&mpt->m_mutex);
13153 wwid = ptgt->m_sas_wwn;
13154 mutex_exit(&mpt->m_mutex);
13155
13156 child = ddi_get_child(pdip);
13157 while (child) {
13158 find = 0;
13159 savechild = child;
13160 child = ddi_get_next_sibling(child);
13161
13162 nodename = ddi_node_name(savechild);
13163 if (strcmp(nodename, "smp") == 0) {
13164 continue;
13165 }
13166
13167 addr = ddi_get_name_addr(savechild);
13168 if (addr == NULL) {
13169 continue;
13170 }
13171
13172 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13173 DDI_SUCCESS) {
13174 continue;
13175 }
13176
13177 if (wwid == sas_wwn) {
13178 for (i = 0; i < lun_cnt; i++) {
13179 if (repluns[i] == lun) {
13180 find = 1;
13181 break;
13182 }
13183 }
13184 } else {
13185 continue;
13186 }
13187 if (find == 0) {
13188 /*
13189 * The lun has not been there already
13190 */
13191 (void) mptsas_offline_lun(pdip, savechild, NULL,
13192 NDI_DEVI_REMOVE);
13193 }
13194 }
13195
13196 pip = mdi_get_next_client_path(pdip, NULL);
13197 while (pip) {
13198 find = 0;
13199 savepip = pip;
13200 addr = MDI_PI(pip)->pi_addr;
13201
13202 pip = mdi_get_next_client_path(pdip, pip);
13203
13204 if (addr == NULL) {
13205 continue;
13206 }
13207
13208 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13209 &lun) != DDI_SUCCESS) {
13210 continue;
13211 }
13212
13213 if (sas_wwn == wwid) {
13214 for (i = 0; i < lun_cnt; i++) {
13215 if (repluns[i] == lun) {
13216 find = 1;
13217 break;
13218 }
13219 }
13220 } else {
13221 continue;
13222 }
13223
13224 if (find == 0) {
13225 /*
13226 * The lun has not been there already
13227 */
13228 (void) mptsas_offline_lun(pdip, NULL, savepip,
13229 NDI_DEVI_REMOVE);
13230 }
13231 }
13232 }
13233
13234 void
13235 mptsas_update_hashtab(struct mptsas *mpt)
13236 {
13237 uint32_t page_address;
13238 int rval = 0;
13239 uint16_t dev_handle;
13240 mptsas_target_t *ptgt = NULL;
13241 mptsas_smp_t smp_node;
13242
13243 /*
13244 * Get latest RAID info.
13245 */
13246 (void) mptsas_get_raid_info(mpt);
13247
13248 dev_handle = mpt->m_smp_devhdl;
13249 for (; mpt->m_done_traverse_smp == 0; ) {
13250 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13251 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13252 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13253 != DDI_SUCCESS) {
13254 break;
13255 }
13256 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13257 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13258 }
13259
13260 /*
13261 * Config target devices
13262 */
13263 dev_handle = mpt->m_dev_handle;
13264
13265 /*
13266 * Do loop to get sas device page 0 by GetNextHandle till the
13267 * the last handle. If the sas device is a SATA/SSP target,
13268 * we try to config it.
13269 */
13270 for (; mpt->m_done_traverse_dev == 0; ) {
13271 ptgt = NULL;
13272 page_address =
13273 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13274 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13275 (uint32_t)dev_handle;
13276 rval = mptsas_get_target_device_info(mpt, page_address,
13277 &dev_handle, &ptgt);
13278 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13279 (rval == DEV_INFO_FAIL_ALLOC)) {
13280 break;
13281 }
13282
13283 mpt->m_dev_handle = dev_handle;
13284 }
13285
13286 }
13287
13288 void
13289 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13290 {
13291 mptsas_hash_data_t *data;
13292 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13293 while (data != NULL) {
13294 data->devhdl = MPTSAS_INVALID_DEVHDL;
13295 data->device_info = 0;
13296 /*
13297 * For tgttbl, clear dr_flag.
13298 */
13299 data->dr_flag = MPTSAS_DR_INACTIVE;
13300 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13301 }
13302 }
13303
13304 void
13305 mptsas_update_driver_data(struct mptsas *mpt)
13306 {
13307 /*
13308 * TODO after hard reset, update the driver data structures
13309 * 1. update port/phymask mapping table mpt->m_phy_info
13310 * 2. invalid all the entries in hash table
13311 * m_devhdl = 0xffff and m_deviceinfo = 0
13312 * 3. call sas_device_page/expander_page to update hash table
13313 */
13314 mptsas_update_phymask(mpt);
13315 /*
13316 * Invalid the existing entries
13317 */
13318 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13319 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13320 mpt->m_done_traverse_dev = 0;
13321 mpt->m_done_traverse_smp = 0;
13322 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13323 mptsas_update_hashtab(mpt);
13324 }
13325
13326 static void
13327 mptsas_config_all(dev_info_t *pdip)
13328 {
13329 dev_info_t *smpdip = NULL;
13330 mptsas_t *mpt = DIP2MPT(pdip);
13331 int phymask = 0;
13332 mptsas_phymask_t phy_mask;
13333 mptsas_target_t *ptgt = NULL;
13334 mptsas_smp_t *psmp;
13335
13336 /*
13337 * Get the phymask associated to the iport
13338 */
13339 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13340 "phymask", 0);
13341
13342 /*
13343 * Enumerate RAID volumes here (phymask == 0).
13344 */
13345 if (phymask == 0) {
13346 mptsas_config_all_viport(pdip);
13347 return;
13348 }
13349
13350 mutex_enter(&mpt->m_mutex);
13351
13352 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13353 mptsas_update_hashtab(mpt);
13354 }
13355
13356 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13357 MPTSAS_HASH_FIRST);
13358 while (psmp != NULL) {
13359 phy_mask = psmp->m_phymask;
13360 if (phy_mask == phymask) {
13361 smpdip = NULL;
13362 mutex_exit(&mpt->m_mutex);
13363 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13364 mutex_enter(&mpt->m_mutex);
13365 }
13366 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13367 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13368 }
13369
13370 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13371 MPTSAS_HASH_FIRST);
13372 while (ptgt != NULL) {
13373 phy_mask = ptgt->m_phymask;
13374 if (phy_mask == phymask) {
13375 mutex_exit(&mpt->m_mutex);
13376 (void) mptsas_config_target(pdip, ptgt);
13377 mutex_enter(&mpt->m_mutex);
13378 }
13379
13380 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13381 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13382 }
13383 mutex_exit(&mpt->m_mutex);
13384 }
13385
13386 static int
13387 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13388 {
13389 int rval = DDI_FAILURE;
13390 dev_info_t *tdip;
13391
13392 rval = mptsas_config_luns(pdip, ptgt);
13393 if (rval != DDI_SUCCESS) {
13394 /*
13395 * The return value means the SCMD_REPORT_LUNS
13396 * did not execute successfully. The target maybe
13397 * doesn't support such command.
13398 */
13399 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13400 }
13401 return (rval);
13402 }
13403
13404 /*
13405 * Return fail if not all the childs/paths are freed.
13406 * if there is any path under the HBA, the return value will be always fail
13407 * because we didn't call mdi_pi_free for path
13408 */
13409 static int
13410 mptsas_offline_target(dev_info_t *pdip, char *name)
13411 {
13412 dev_info_t *child = NULL, *prechild = NULL;
13413 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13414 int tmp_rval, rval = DDI_SUCCESS;
13415 char *addr, *cp;
13416 size_t s;
13417 mptsas_t *mpt = DIP2MPT(pdip);
13418
13419 child = ddi_get_child(pdip);
13420 while (child) {
13421 addr = ddi_get_name_addr(child);
13422 prechild = child;
13423 child = ddi_get_next_sibling(child);
13424
13425 if (addr == NULL) {
13426 continue;
13427 }
13428 if ((cp = strchr(addr, ',')) == NULL) {
13429 continue;
13430 }
13431
13432 s = (uintptr_t)cp - (uintptr_t)addr;
13433
13434 if (strncmp(addr, name, s) != 0) {
13435 continue;
13436 }
13437
13438 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13439 NDI_DEVI_REMOVE);
13440 if (tmp_rval != DDI_SUCCESS) {
13441 rval = DDI_FAILURE;
13442 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13443 prechild, MPTSAS_DEV_GONE) !=
13444 DDI_PROP_SUCCESS) {
13445 mptsas_log(mpt, CE_WARN, "mptsas driver "
13446 "unable to create property for "
13447 "SAS %s (MPTSAS_DEV_GONE)", addr);
13448 }
13449 }
13450 }
13451
13452 pip = mdi_get_next_client_path(pdip, NULL);
13453 while (pip) {
13454 addr = MDI_PI(pip)->pi_addr;
13455 savepip = pip;
13456 pip = mdi_get_next_client_path(pdip, pip);
13457 if (addr == NULL) {
13458 continue;
13459 }
13460
13461 if ((cp = strchr(addr, ',')) == NULL) {
13462 continue;
13463 }
13464
13465 s = (uintptr_t)cp - (uintptr_t)addr;
13466
13467 if (strncmp(addr, name, s) != 0) {
13468 continue;
13469 }
13470
13471 (void) mptsas_offline_lun(pdip, NULL, savepip,
13472 NDI_DEVI_REMOVE);
13473 /*
13474 * driver will not invoke mdi_pi_free, so path will not
13475 * be freed forever, return DDI_FAILURE.
13476 */
13477 rval = DDI_FAILURE;
13478 }
13479 return (rval);
13480 }
13481
13482 static int
13483 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13484 mdi_pathinfo_t *rpip, uint_t flags)
13485 {
13486 int rval = DDI_FAILURE;
13487 char *devname;
13488 dev_info_t *cdip, *parent;
13489
13490 if (rpip != NULL) {
13491 parent = scsi_vhci_dip;
13492 cdip = mdi_pi_get_client(rpip);
13493 } else if (rdip != NULL) {
13494 parent = pdip;
13495 cdip = rdip;
13496 } else {
13497 return (DDI_FAILURE);
13498 }
13499
13500 /*
13501 * Make sure node is attached otherwise
13502 * it won't have related cache nodes to
13503 * clean up. i_ddi_devi_attached is
13504 * similiar to i_ddi_node_state(cdip) >=
13505 * DS_ATTACHED.
13506 */
13507 if (i_ddi_devi_attached(cdip)) {
13508
13509 /* Get full devname */
13510 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13511 (void) ddi_deviname(cdip, devname);
13512 /* Clean cache */
13513 (void) devfs_clean(parent, devname + 1,
13514 DV_CLEAN_FORCE);
13515 kmem_free(devname, MAXNAMELEN + 1);
13516 }
13517 if (rpip != NULL) {
13518 if (MDI_PI_IS_OFFLINE(rpip)) {
13519 rval = DDI_SUCCESS;
13520 } else {
13521 rval = mdi_pi_offline(rpip, 0);
13522 }
13523 } else {
13524 rval = ndi_devi_offline(cdip, flags);
13525 }
13526
13527 return (rval);
13528 }
13529
13530 static dev_info_t *
13531 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13532 {
13533 dev_info_t *child = NULL;
13534 char *smp_wwn = NULL;
13535
13536 child = ddi_get_child(parent);
13537 while (child) {
13538 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13539 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13540 != DDI_SUCCESS) {
13541 child = ddi_get_next_sibling(child);
13542 continue;
13543 }
13544
13545 if (strcmp(smp_wwn, str_wwn) == 0) {
13546 ddi_prop_free(smp_wwn);
13547 break;
13548 }
13549 child = ddi_get_next_sibling(child);
13550 ddi_prop_free(smp_wwn);
13551 }
13552 return (child);
13553 }
13554
13555 static int
13556 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13557 {
13558 int rval = DDI_FAILURE;
13559 char *devname;
13560 char wwn_str[MPTSAS_WWN_STRLEN];
13561 dev_info_t *cdip;
13562
13563 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13564
13565 cdip = mptsas_find_smp_child(pdip, wwn_str);
13566
13567 if (cdip == NULL)
13568 return (DDI_SUCCESS);
13569
13570 /*
13571 * Make sure node is attached otherwise
13572 * it won't have related cache nodes to
13573 * clean up. i_ddi_devi_attached is
13574 * similiar to i_ddi_node_state(cdip) >=
13575 * DS_ATTACHED.
13576 */
13577 if (i_ddi_devi_attached(cdip)) {
13578
13579 /* Get full devname */
13580 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13581 (void) ddi_deviname(cdip, devname);
13582 /* Clean cache */
13583 (void) devfs_clean(pdip, devname + 1,
13584 DV_CLEAN_FORCE);
13585 kmem_free(devname, MAXNAMELEN + 1);
13586 }
13587
13588 rval = ndi_devi_offline(cdip, flags);
13589
13590 return (rval);
13591 }
13592
13593 static dev_info_t *
13594 mptsas_find_child(dev_info_t *pdip, char *name)
13595 {
13596 dev_info_t *child = NULL;
13597 char *rname = NULL;
13598 int rval = DDI_FAILURE;
13599
13600 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13601
13602 child = ddi_get_child(pdip);
13603 while (child) {
13604 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13605 if (rval != DDI_SUCCESS) {
13606 child = ddi_get_next_sibling(child);
13607 bzero(rname, SCSI_MAXNAMELEN);
13608 continue;
13609 }
13610
13611 if (strcmp(rname, name) == 0) {
13612 break;
13613 }
13614 child = ddi_get_next_sibling(child);
13615 bzero(rname, SCSI_MAXNAMELEN);
13616 }
13617
13618 kmem_free(rname, SCSI_MAXNAMELEN);
13619
13620 return (child);
13621 }
13622
13623
13624 static dev_info_t *
13625 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13626 {
13627 dev_info_t *child = NULL;
13628 char *name = NULL;
13629 char *addr = NULL;
13630
13631 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13632 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13633 (void) sprintf(name, "%016"PRIx64, sasaddr);
13634 (void) sprintf(addr, "w%s,%x", name, lun);
13635 child = mptsas_find_child(pdip, addr);
13636 kmem_free(name, SCSI_MAXNAMELEN);
13637 kmem_free(addr, SCSI_MAXNAMELEN);
13638 return (child);
13639 }
13640
13641 static dev_info_t *
13642 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13643 {
13644 dev_info_t *child;
13645 char *addr;
13646
13647 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13648 (void) sprintf(addr, "p%x,0", phy);
13649 child = mptsas_find_child(pdip, addr);
13650 kmem_free(addr, SCSI_MAXNAMELEN);
13651 return (child);
13652 }
13653
13654 static mdi_pathinfo_t *
13655 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13656 {
13657 mdi_pathinfo_t *path;
13658 char *addr = NULL;
13659
13660 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13661 (void) sprintf(addr, "p%x,0", phy);
13662 path = mdi_pi_find(pdip, NULL, addr);
13663 kmem_free(addr, SCSI_MAXNAMELEN);
13664 return (path);
13665 }
13666
13667 static mdi_pathinfo_t *
13668 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13669 {
13670 mdi_pathinfo_t *path;
13671 char *name = NULL;
13672 char *addr = NULL;
13673
13674 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13675 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13676 (void) sprintf(name, "%016"PRIx64, sasaddr);
13677 (void) sprintf(addr, "w%s,%x", name, lun);
13678 path = mdi_pi_find(parent, NULL, addr);
13679 kmem_free(name, SCSI_MAXNAMELEN);
13680 kmem_free(addr, SCSI_MAXNAMELEN);
13681
13682 return (path);
13683 }
13684
13685 static int
13686 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13687 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13688 {
13689 int i = 0;
13690 uchar_t *inq83 = NULL;
13691 int inq83_len1 = 0xFF;
13692 int inq83_len = 0;
13693 int rval = DDI_FAILURE;
13694 ddi_devid_t devid;
13695 char *guid = NULL;
13696 int target = ptgt->m_devhdl;
13697 mdi_pathinfo_t *pip = NULL;
13698 mptsas_t *mpt = DIP2MPT(pdip);
13699
13700 /*
13701 * For DVD/CD ROM and tape devices and optical
13702 * devices, we won't try to enumerate them under
13703 * scsi_vhci, so no need to try page83
13704 */
13705 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13706 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13707 sd_inq->inq_dtype == DTYPE_ESI))
13708 goto create_lun;
13709
13710 /*
13711 * The LCA returns good SCSI status, but corrupt page 83 data the first
13712 * time it is queried. The solution is to keep trying to request page83
13713 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13714 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13715 * give up to get VPD page at this stage and fail the enumeration.
13716 */
13717
13718 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13719
13720 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13721 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13722 inq83_len1, &inq83_len, 1);
13723 if (rval != 0) {
13724 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13725 "0x83 for target:%x, lun:%x failed!", target, lun);
13726 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13727 goto create_lun;
13728 goto out;
13729 }
13730 /*
13731 * create DEVID from inquiry data
13732 */
13733 if ((rval = ddi_devid_scsi_encode(
13734 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13735 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13736 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13737 /*
13738 * extract GUID from DEVID
13739 */
13740 guid = ddi_devid_to_guid(devid);
13741
13742 /*
13743 * Do not enable MPXIO if the strlen(guid) is greater
13744 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13745 * handled by framework later.
13746 */
13747 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13748 ddi_devid_free_guid(guid);
13749 guid = NULL;
13750 if (mpt->m_mpxio_enable == TRUE) {
13751 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13752 "lun:%x doesn't have a valid GUID, "
13753 "multipathing for this drive is "
13754 "not enabled", target, lun);
13755 }
13756 }
13757
13758 /*
13759 * devid no longer needed
13760 */
13761 ddi_devid_free(devid);
13762 break;
13763 } else if (rval == DDI_NOT_WELL_FORMED) {
13764 /*
13765 * return value of ddi_devid_scsi_encode equal to
13766 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13767 * to retry inquiry page 0x83 and get GUID.
13768 */
13769 NDBG20(("Not well formed devid, retry..."));
13770 delay(1 * drv_usectohz(1000000));
13771 continue;
13772 } else {
13773 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13774 "path target:%x, lun:%x", target, lun);
13775 rval = DDI_FAILURE;
13776 goto create_lun;
13777 }
13778 }
13779
13780 if (i == mptsas_inq83_retry_timeout) {
13781 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13782 "for path target:%x, lun:%x", target, lun);
13783 }
13784
13785 rval = DDI_FAILURE;
13786
13787 create_lun:
13788 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13789 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13790 ptgt, lun);
13791 }
13792 if (rval != DDI_SUCCESS) {
13793 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13794 ptgt, lun);
13795
13796 }
13797 out:
13798 if (guid != NULL) {
13799 /*
13800 * guid no longer needed
13801 */
13802 ddi_devid_free_guid(guid);
13803 }
13804 if (inq83 != NULL)
13805 kmem_free(inq83, inq83_len1);
13806 return (rval);
13807 }
13808
13809 static int
13810 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13811 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13812 {
13813 int target;
13814 char *nodename = NULL;
13815 char **compatible = NULL;
13816 int ncompatible = 0;
13817 int mdi_rtn = MDI_FAILURE;
13818 int rval = DDI_FAILURE;
13819 char *old_guid = NULL;
13820 mptsas_t *mpt = DIP2MPT(pdip);
13821 char *lun_addr = NULL;
13822 char *wwn_str = NULL;
13823 char *attached_wwn_str = NULL;
13824 char *component = NULL;
13825 uint8_t phy = 0xFF;
13826 uint64_t sas_wwn;
13827 int64_t lun64 = 0;
13828 uint32_t devinfo;
13829 uint16_t dev_hdl;
13830 uint16_t pdev_hdl;
13831 uint64_t dev_sas_wwn;
13832 uint64_t pdev_sas_wwn;
13833 uint32_t pdev_info;
13834 uint8_t physport;
13835 uint8_t phy_id;
13836 uint32_t page_address;
13837 uint16_t bay_num, enclosure;
13838 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
13839 uint32_t dev_info;
13840
13841 mutex_enter(&mpt->m_mutex);
13842 target = ptgt->m_devhdl;
13843 sas_wwn = ptgt->m_sas_wwn;
13844 devinfo = ptgt->m_deviceinfo;
13845 phy = ptgt->m_phynum;
13846 mutex_exit(&mpt->m_mutex);
13847
13848 if (sas_wwn) {
13849 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13850 } else {
13851 *pip = mptsas_find_path_phy(pdip, phy);
13852 }
13853
13854 if (*pip != NULL) {
13855 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13856 ASSERT(*lun_dip != NULL);
13857 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13858 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
13859 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13860 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13861 /*
13862 * Same path back online again.
13863 */
13864 (void) ddi_prop_free(old_guid);
13865 if ((!MDI_PI_IS_ONLINE(*pip)) &&
13866 (!MDI_PI_IS_STANDBY(*pip)) &&
13867 (ptgt->m_tgt_unconfigured == 0)) {
13868 rval = mdi_pi_online(*pip, 0);
13869 } else {
13870 rval = DDI_SUCCESS;
13871 }
13872 if (rval != DDI_SUCCESS) {
13873 mptsas_log(mpt, CE_WARN, "path:target: "
13874 "%x, lun:%x online failed!", target,
13875 lun);
13876 *pip = NULL;
13877 *lun_dip = NULL;
13878 }
13879 return (rval);
13880 } else {
13881 /*
13882 * The GUID of the LUN has changed which maybe
13883 * because customer mapped another volume to the
13884 * same LUN.
13885 */
13886 mptsas_log(mpt, CE_WARN, "The GUID of the "
13887 "target:%x, lun:%x was changed, maybe "
13888 "because someone mapped another volume "
13889 "to the same LUN", target, lun);
13890 (void) ddi_prop_free(old_guid);
13891 if (!MDI_PI_IS_OFFLINE(*pip)) {
13892 rval = mdi_pi_offline(*pip, 0);
13893 if (rval != MDI_SUCCESS) {
13894 mptsas_log(mpt, CE_WARN, "path:"
13895 "target:%x, lun:%x offline "
13896 "failed!", target, lun);
13897 *pip = NULL;
13898 *lun_dip = NULL;
13899 return (DDI_FAILURE);
13900 }
13901 }
13902 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
13903 mptsas_log(mpt, CE_WARN, "path:target:"
13904 "%x, lun:%x free failed!", target,
13905 lun);
13906 *pip = NULL;
13907 *lun_dip = NULL;
13908 return (DDI_FAILURE);
13909 }
13910 }
13911 } else {
13912 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
13913 "property for path:target:%x, lun:%x", target, lun);
13914 *pip = NULL;
13915 *lun_dip = NULL;
13916 return (DDI_FAILURE);
13917 }
13918 }
13919 scsi_hba_nodename_compatible_get(inq, NULL,
13920 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
13921
13922 /*
13923 * if nodename can't be determined then print a message and skip it
13924 */
13925 if (nodename == NULL) {
13926 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
13927 "driver for target%d lun %d dtype:0x%02x", target, lun,
13928 inq->inq_dtype);
13929 return (DDI_FAILURE);
13930 }
13931
13932 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
13933 /* The property is needed by MPAPI */
13934 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
13935
13936 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13937 if (guid) {
13938 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
13939 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
13940 } else {
13941 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
13942 (void) sprintf(wwn_str, "p%x", phy);
13943 }
13944
13945 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
13946 guid, lun_addr, compatible, ncompatible,
13947 0, pip);
13948 if (mdi_rtn == MDI_SUCCESS) {
13949
13950 if (mdi_prop_update_string(*pip, MDI_GUID,
13951 guid) != DDI_SUCCESS) {
13952 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13953 "create prop for target %d lun %d (MDI_GUID)",
13954 target, lun);
13955 mdi_rtn = MDI_FAILURE;
13956 goto virt_create_done;
13957 }
13958
13959 if (mdi_prop_update_int(*pip, LUN_PROP,
13960 lun) != DDI_SUCCESS) {
13961 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13962 "create prop for target %d lun %d (LUN_PROP)",
13963 target, lun);
13964 mdi_rtn = MDI_FAILURE;
13965 goto virt_create_done;
13966 }
13967 lun64 = (int64_t)lun;
13968 if (mdi_prop_update_int64(*pip, LUN64_PROP,
13969 lun64) != DDI_SUCCESS) {
13970 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13971 "create prop for target %d (LUN64_PROP)",
13972 target);
13973 mdi_rtn = MDI_FAILURE;
13974 goto virt_create_done;
13975 }
13976 if (mdi_prop_update_string_array(*pip, "compatible",
13977 compatible, ncompatible) !=
13978 DDI_PROP_SUCCESS) {
13979 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13980 "create prop for target %d lun %d (COMPATIBLE)",
13981 target, lun);
13982 mdi_rtn = MDI_FAILURE;
13983 goto virt_create_done;
13984 }
13985 if (sas_wwn && (mdi_prop_update_string(*pip,
13986 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
13987 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13988 "create prop for target %d lun %d "
13989 "(target-port)", target, lun);
13990 mdi_rtn = MDI_FAILURE;
13991 goto virt_create_done;
13992 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
13993 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
13994 /*
13995 * Direct attached SATA device without DeviceName
13996 */
13997 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13998 "create prop for SAS target %d lun %d "
13999 "(sata-phy)", target, lun);
14000 mdi_rtn = MDI_FAILURE;
14001 goto virt_create_done;
14002 }
14003 mutex_enter(&mpt->m_mutex);
14004
14005 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14006 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14007 (uint32_t)ptgt->m_devhdl;
14008 rval = mptsas_get_sas_device_page0(mpt, page_address,
14009 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14010 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14011 if (rval != DDI_SUCCESS) {
14012 mutex_exit(&mpt->m_mutex);
14013 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14014 "parent device for handle %d", page_address);
14015 mdi_rtn = MDI_FAILURE;
14016 goto virt_create_done;
14017 }
14018
14019 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14020 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14021 rval = mptsas_get_sas_device_page0(mpt, page_address,
14022 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14023 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14024 if (rval != DDI_SUCCESS) {
14025 mutex_exit(&mpt->m_mutex);
14026 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14027 "device info for handle %d", page_address);
14028 mdi_rtn = MDI_FAILURE;
14029 goto virt_create_done;
14030 }
14031
14032 mutex_exit(&mpt->m_mutex);
14033
14034 /*
14035 * If this device direct attached to the controller
14036 * set the attached-port to the base wwid
14037 */
14038 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14039 != DEVINFO_DIRECT_ATTACHED) {
14040 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14041 pdev_sas_wwn);
14042 } else {
14043 /*
14044 * Update the iport's attached-port to guid
14045 */
14046 if (sas_wwn == 0) {
14047 (void) sprintf(wwn_str, "p%x", phy);
14048 } else {
14049 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14050 }
14051 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14052 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14053 DDI_PROP_SUCCESS) {
14054 mptsas_log(mpt, CE_WARN,
14055 "mptsas unable to create "
14056 "property for iport target-port"
14057 " %s (sas_wwn)",
14058 wwn_str);
14059 mdi_rtn = MDI_FAILURE;
14060 goto virt_create_done;
14061 }
14062
14063 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14064 mpt->un.m_base_wwid);
14065 }
14066
14067 if (mdi_prop_update_string(*pip,
14068 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14069 DDI_PROP_SUCCESS) {
14070 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14071 "property for iport attached-port %s (sas_wwn)",
14072 attached_wwn_str);
14073 mdi_rtn = MDI_FAILURE;
14074 goto virt_create_done;
14075 }
14076
14077
14078 if (inq->inq_dtype == 0) {
14079 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14080 /*
14081 * set obp path for pathinfo
14082 */
14083 (void) snprintf(component, MAXPATHLEN,
14084 "disk@%s", lun_addr);
14085
14086 if (mdi_pi_pathname_obp_set(*pip, component) !=
14087 DDI_SUCCESS) {
14088 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14089 "unable to set obp-path for object %s",
14090 component);
14091 mdi_rtn = MDI_FAILURE;
14092 goto virt_create_done;
14093 }
14094 }
14095
14096 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14097 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14098 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14099 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14100 "pm-capable", 1)) !=
14101 DDI_PROP_SUCCESS) {
14102 mptsas_log(mpt, CE_WARN, "mptsas driver"
14103 "failed to create pm-capable "
14104 "property, target %d", target);
14105 mdi_rtn = MDI_FAILURE;
14106 goto virt_create_done;
14107 }
14108 }
14109 /*
14110 * Create the phy-num property
14111 */
14112 if (mdi_prop_update_int(*pip, "phy-num",
14113 ptgt->m_phynum) != DDI_SUCCESS) {
14114 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14115 "create phy-num property for target %d lun %d",
14116 target, lun);
14117 mdi_rtn = MDI_FAILURE;
14118 goto virt_create_done;
14119 }
14120 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14121 mdi_rtn = mdi_pi_online(*pip, 0);
14122 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14123 mdi_rtn = MDI_FAILURE;
14124 }
14125 virt_create_done:
14126 if (*pip && mdi_rtn != MDI_SUCCESS) {
14127 (void) mdi_pi_free(*pip, 0);
14128 *pip = NULL;
14129 *lun_dip = NULL;
14130 }
14131 }
14132
14133 scsi_hba_nodename_compatible_free(nodename, compatible);
14134 if (lun_addr != NULL) {
14135 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14136 }
14137 if (wwn_str != NULL) {
14138 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14139 }
14140 if (component != NULL) {
14141 kmem_free(component, MAXPATHLEN);
14142 }
14143
14144 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14145 }
14146
14147 static int
14148 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14149 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14150 {
14151 int target;
14152 int rval;
14153 int ndi_rtn = NDI_FAILURE;
14154 uint64_t be_sas_wwn;
14155 char *nodename = NULL;
14156 char **compatible = NULL;
14157 int ncompatible = 0;
14158 int instance = 0;
14159 mptsas_t *mpt = DIP2MPT(pdip);
14160 char *wwn_str = NULL;
14161 char *component = NULL;
14162 char *attached_wwn_str = NULL;
14163 uint8_t phy = 0xFF;
14164 uint64_t sas_wwn;
14165 uint32_t devinfo;
14166 uint16_t dev_hdl;
14167 uint16_t pdev_hdl;
14168 uint64_t pdev_sas_wwn;
14169 uint64_t dev_sas_wwn;
14170 uint32_t pdev_info;
14171 uint8_t physport;
14172 uint8_t phy_id;
14173 uint32_t page_address;
14174 uint16_t bay_num, enclosure;
14175 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14176 uint32_t dev_info;
14177 int64_t lun64 = 0;
14178
14179 mutex_enter(&mpt->m_mutex);
14180 target = ptgt->m_devhdl;
14181 sas_wwn = ptgt->m_sas_wwn;
14182 devinfo = ptgt->m_deviceinfo;
14183 phy = ptgt->m_phynum;
14184 mutex_exit(&mpt->m_mutex);
14185
14186 /*
14187 * generate compatible property with binding-set "mpt"
14188 */
14189 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14190 &nodename, &compatible, &ncompatible);
14191
14192 /*
14193 * if nodename can't be determined then print a message and skip it
14194 */
14195 if (nodename == NULL) {
14196 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14197 "for target %d lun %d", target, lun);
14198 return (DDI_FAILURE);
14199 }
14200
14201 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14202 DEVI_SID_NODEID, lun_dip);
14203
14204 /*
14205 * if lun alloc success, set props
14206 */
14207 if (ndi_rtn == NDI_SUCCESS) {
14208
14209 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14210 *lun_dip, LUN_PROP, lun) !=
14211 DDI_PROP_SUCCESS) {
14212 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14213 "property for target %d lun %d (LUN_PROP)",
14214 target, lun);
14215 ndi_rtn = NDI_FAILURE;
14216 goto phys_create_done;
14217 }
14218
14219 lun64 = (int64_t)lun;
14220 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14221 *lun_dip, LUN64_PROP, lun64) !=
14222 DDI_PROP_SUCCESS) {
14223 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14224 "property for target %d lun64 %d (LUN64_PROP)",
14225 target, lun);
14226 ndi_rtn = NDI_FAILURE;
14227 goto phys_create_done;
14228 }
14229 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14230 *lun_dip, "compatible", compatible, ncompatible)
14231 != DDI_PROP_SUCCESS) {
14232 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14233 "property for target %d lun %d (COMPATIBLE)",
14234 target, lun);
14235 ndi_rtn = NDI_FAILURE;
14236 goto phys_create_done;
14237 }
14238
14239 /*
14240 * We need the SAS WWN for non-multipath devices, so
14241 * we'll use the same property as that multipathing
14242 * devices need to present for MPAPI. If we don't have
14243 * a WWN (e.g. parallel SCSI), don't create the prop.
14244 */
14245 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14246 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14247 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14248 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14249 != DDI_PROP_SUCCESS) {
14250 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14251 "create property for SAS target %d lun %d "
14252 "(target-port)", target, lun);
14253 ndi_rtn = NDI_FAILURE;
14254 goto phys_create_done;
14255 }
14256
14257 be_sas_wwn = BE_64(sas_wwn);
14258 if (sas_wwn && ndi_prop_update_byte_array(
14259 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14260 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14261 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14262 "create property for SAS target %d lun %d "
14263 "(port-wwn)", target, lun);
14264 ndi_rtn = NDI_FAILURE;
14265 goto phys_create_done;
14266 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14267 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14268 DDI_PROP_SUCCESS)) {
14269 /*
14270 * Direct attached SATA device without DeviceName
14271 */
14272 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14273 "create property for SAS target %d lun %d "
14274 "(sata-phy)", target, lun);
14275 ndi_rtn = NDI_FAILURE;
14276 goto phys_create_done;
14277 }
14278
14279 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14280 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14281 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14282 "create property for SAS target %d lun %d"
14283 " (SAS_PROP)", target, lun);
14284 ndi_rtn = NDI_FAILURE;
14285 goto phys_create_done;
14286 }
14287 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14288 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14289 mptsas_log(mpt, CE_WARN, "mptsas unable "
14290 "to create guid property for target %d "
14291 "lun %d", target, lun);
14292 ndi_rtn = NDI_FAILURE;
14293 goto phys_create_done;
14294 }
14295
14296 /*
14297 * The following code is to set properties for SM-HBA support,
14298 * it doesn't apply to RAID volumes
14299 */
14300 if (ptgt->m_phymask == 0)
14301 goto phys_raid_lun;
14302
14303 mutex_enter(&mpt->m_mutex);
14304
14305 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14306 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14307 (uint32_t)ptgt->m_devhdl;
14308 rval = mptsas_get_sas_device_page0(mpt, page_address,
14309 &dev_hdl, &dev_sas_wwn, &dev_info,
14310 &physport, &phy_id, &pdev_hdl,
14311 &bay_num, &enclosure);
14312 if (rval != DDI_SUCCESS) {
14313 mutex_exit(&mpt->m_mutex);
14314 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14315 "parent device for handle %d.", page_address);
14316 ndi_rtn = NDI_FAILURE;
14317 goto phys_create_done;
14318 }
14319
14320 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14321 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14322 rval = mptsas_get_sas_device_page0(mpt, page_address,
14323 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14324 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14325 if (rval != DDI_SUCCESS) {
14326 mutex_exit(&mpt->m_mutex);
14327 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14328 "device for handle %d.", page_address);
14329 ndi_rtn = NDI_FAILURE;
14330 goto phys_create_done;
14331 }
14332
14333 mutex_exit(&mpt->m_mutex);
14334
14335 /*
14336 * If this device direct attached to the controller
14337 * set the attached-port to the base wwid
14338 */
14339 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14340 != DEVINFO_DIRECT_ATTACHED) {
14341 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14342 pdev_sas_wwn);
14343 } else {
14344 /*
14345 * Update the iport's attached-port to guid
14346 */
14347 if (sas_wwn == 0) {
14348 (void) sprintf(wwn_str, "p%x", phy);
14349 } else {
14350 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14351 }
14352 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14353 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14354 DDI_PROP_SUCCESS) {
14355 mptsas_log(mpt, CE_WARN,
14356 "mptsas unable to create "
14357 "property for iport target-port"
14358 " %s (sas_wwn)",
14359 wwn_str);
14360 ndi_rtn = NDI_FAILURE;
14361 goto phys_create_done;
14362 }
14363
14364 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14365 mpt->un.m_base_wwid);
14366 }
14367
14368 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14369 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14370 DDI_PROP_SUCCESS) {
14371 mptsas_log(mpt, CE_WARN,
14372 "mptsas unable to create "
14373 "property for iport attached-port %s (sas_wwn)",
14374 attached_wwn_str);
14375 ndi_rtn = NDI_FAILURE;
14376 goto phys_create_done;
14377 }
14378
14379 if (IS_SATA_DEVICE(dev_info)) {
14380 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14381 *lun_dip, MPTSAS_VARIANT, "sata") !=
14382 DDI_PROP_SUCCESS) {
14383 mptsas_log(mpt, CE_WARN,
14384 "mptsas unable to create "
14385 "property for device variant ");
14386 ndi_rtn = NDI_FAILURE;
14387 goto phys_create_done;
14388 }
14389 }
14390
14391 if (IS_ATAPI_DEVICE(dev_info)) {
14392 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14393 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14394 DDI_PROP_SUCCESS) {
14395 mptsas_log(mpt, CE_WARN,
14396 "mptsas unable to create "
14397 "property for device variant ");
14398 ndi_rtn = NDI_FAILURE;
14399 goto phys_create_done;
14400 }
14401 }
14402
14403 phys_raid_lun:
14404 /*
14405 * if this is a SAS controller, and the target is a SATA
14406 * drive, set the 'pm-capable' property for sd and if on
14407 * an OPL platform, also check if this is an ATAPI
14408 * device.
14409 */
14410 instance = ddi_get_instance(mpt->m_dip);
14411 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14412 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14413 NDBG2(("mptsas%d: creating pm-capable property, "
14414 "target %d", instance, target));
14415
14416 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14417 *lun_dip, "pm-capable", 1)) !=
14418 DDI_PROP_SUCCESS) {
14419 mptsas_log(mpt, CE_WARN, "mptsas "
14420 "failed to create pm-capable "
14421 "property, target %d", target);
14422 ndi_rtn = NDI_FAILURE;
14423 goto phys_create_done;
14424 }
14425
14426 }
14427
14428 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14429 /*
14430 * add 'obp-path' properties for devinfo
14431 */
14432 bzero(wwn_str, sizeof (wwn_str));
14433 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14434 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14435 if (guid) {
14436 (void) snprintf(component, MAXPATHLEN,
14437 "disk@w%s,%x", wwn_str, lun);
14438 } else {
14439 (void) snprintf(component, MAXPATHLEN,
14440 "disk@p%x,%x", phy, lun);
14441 }
14442 if (ddi_pathname_obp_set(*lun_dip, component)
14443 != DDI_SUCCESS) {
14444 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14445 "unable to set obp-path for SAS "
14446 "object %s", component);
14447 ndi_rtn = NDI_FAILURE;
14448 goto phys_create_done;
14449 }
14450 }
14451 /*
14452 * Create the phy-num property for non-raid disk
14453 */
14454 if (ptgt->m_phymask != 0) {
14455 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14456 *lun_dip, "phy-num", ptgt->m_phynum) !=
14457 DDI_PROP_SUCCESS) {
14458 mptsas_log(mpt, CE_WARN, "mptsas driver "
14459 "failed to create phy-num property for "
14460 "target %d", target);
14461 ndi_rtn = NDI_FAILURE;
14462 goto phys_create_done;
14463 }
14464 }
14465 phys_create_done:
14466 /*
14467 * If props were setup ok, online the lun
14468 */
14469 if (ndi_rtn == NDI_SUCCESS) {
14470 /*
14471 * Try to online the new node
14472 */
14473 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14474 }
14475
14476 /*
14477 * If success set rtn flag, else unwire alloc'd lun
14478 */
14479 if (ndi_rtn != NDI_SUCCESS) {
14480 NDBG12(("mptsas driver unable to online "
14481 "target %d lun %d", target, lun));
14482 ndi_prop_remove_all(*lun_dip);
14483 (void) ndi_devi_free(*lun_dip);
14484 *lun_dip = NULL;
14485 }
14486 }
14487
14488 scsi_hba_nodename_compatible_free(nodename, compatible);
14489
14490 if (wwn_str != NULL) {
14491 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14492 }
14493 if (component != NULL) {
14494 kmem_free(component, MAXPATHLEN);
14495 }
14496
14497
14498 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14499 }
14500
14501 static int
14502 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14503 {
14504 mptsas_t *mpt = DIP2MPT(pdip);
14505 struct smp_device smp_sd;
14506
14507 /* XXX An HBA driver should not be allocating an smp_device. */
14508 bzero(&smp_sd, sizeof (struct smp_device));
14509 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14510 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14511
14512 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14513 return (NDI_FAILURE);
14514 return (NDI_SUCCESS);
14515 }
14516
14517 static int
14518 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14519 {
14520 mptsas_t *mpt = DIP2MPT(pdip);
14521 mptsas_smp_t *psmp = NULL;
14522 int rval;
14523 int phymask;
14524
14525 /*
14526 * Get the physical port associated to the iport
14527 * PHYMASK TODO
14528 */
14529 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14530 "phymask", 0);
14531 /*
14532 * Find the smp node in hash table with specified sas address and
14533 * physical port
14534 */
14535 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14536 if (psmp == NULL) {
14537 return (DDI_FAILURE);
14538 }
14539
14540 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14541
14542 return (rval);
14543 }
14544
14545 static int
14546 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14547 dev_info_t **smp_dip)
14548 {
14549 char wwn_str[MPTSAS_WWN_STRLEN];
14550 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14551 int ndi_rtn = NDI_FAILURE;
14552 int rval = 0;
14553 mptsas_smp_t dev_info;
14554 uint32_t page_address;
14555 mptsas_t *mpt = DIP2MPT(pdip);
14556 uint16_t dev_hdl;
14557 uint64_t sas_wwn;
14558 uint64_t smp_sas_wwn;
14559 uint8_t physport;
14560 uint8_t phy_id;
14561 uint16_t pdev_hdl;
14562 uint8_t numphys = 0;
14563 uint16_t i = 0;
14564 char phymask[MPTSAS_MAX_PHYS];
14565 char *iport = NULL;
14566 mptsas_phymask_t phy_mask = 0;
14567 uint16_t attached_devhdl;
14568 uint16_t bay_num, enclosure;
14569
14570 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14571
14572 /*
14573 * Probe smp device, prevent the node of removed device from being
14574 * configured succesfully
14575 */
14576 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
14577 return (DDI_FAILURE);
14578 }
14579
14580 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14581 return (DDI_SUCCESS);
14582 }
14583
14584 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14585
14586 /*
14587 * if lun alloc success, set props
14588 */
14589 if (ndi_rtn == NDI_SUCCESS) {
14590 /*
14591 * Set the flavor of the child to be SMP flavored
14592 */
14593 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14594
14595 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14596 *smp_dip, SMP_WWN, wwn_str) !=
14597 DDI_PROP_SUCCESS) {
14598 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14599 "property for smp device %s (sas_wwn)",
14600 wwn_str);
14601 ndi_rtn = NDI_FAILURE;
14602 goto smp_create_done;
14603 }
14604 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
14605 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14606 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14607 DDI_PROP_SUCCESS) {
14608 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14609 "property for iport target-port %s (sas_wwn)",
14610 wwn_str);
14611 ndi_rtn = NDI_FAILURE;
14612 goto smp_create_done;
14613 }
14614
14615 mutex_enter(&mpt->m_mutex);
14616
14617 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14618 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14619 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14620 &dev_info);
14621 if (rval != DDI_SUCCESS) {
14622 mutex_exit(&mpt->m_mutex);
14623 mptsas_log(mpt, CE_WARN,
14624 "mptsas unable to get expander "
14625 "parent device info for %x", page_address);
14626 ndi_rtn = NDI_FAILURE;
14627 goto smp_create_done;
14628 }
14629
14630 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14631 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14632 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14633 (uint32_t)dev_info.m_pdevhdl;
14634 rval = mptsas_get_sas_device_page0(mpt, page_address,
14635 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14636 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14637 if (rval != DDI_SUCCESS) {
14638 mutex_exit(&mpt->m_mutex);
14639 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14640 "device info for %x", page_address);
14641 ndi_rtn = NDI_FAILURE;
14642 goto smp_create_done;
14643 }
14644
14645 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14646 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14647 (uint32_t)dev_info.m_devhdl;
14648 rval = mptsas_get_sas_device_page0(mpt, page_address,
14649 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14650 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14651 if (rval != DDI_SUCCESS) {
14652 mutex_exit(&mpt->m_mutex);
14653 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14654 "device info for %x", page_address);
14655 ndi_rtn = NDI_FAILURE;
14656 goto smp_create_done;
14657 }
14658 mutex_exit(&mpt->m_mutex);
14659
14660 /*
14661 * If this smp direct attached to the controller
14662 * set the attached-port to the base wwid
14663 */
14664 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14665 != DEVINFO_DIRECT_ATTACHED) {
14666 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14667 sas_wwn);
14668 } else {
14669 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14670 mpt->un.m_base_wwid);
14671 }
14672
14673 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14674 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14675 DDI_PROP_SUCCESS) {
14676 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14677 "property for smp attached-port %s (sas_wwn)",
14678 attached_wwn_str);
14679 ndi_rtn = NDI_FAILURE;
14680 goto smp_create_done;
14681 }
14682
14683 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14684 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14685 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14686 "create property for SMP %s (SMP_PROP) ",
14687 wwn_str);
14688 ndi_rtn = NDI_FAILURE;
14689 goto smp_create_done;
14690 }
14691
14692 /*
14693 * check the smp to see whether it direct
14694 * attached to the controller
14695 */
14696 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14697 != DEVINFO_DIRECT_ATTACHED) {
14698 goto smp_create_done;
14699 }
14700 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14701 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14702 if (numphys > 0) {
14703 goto smp_create_done;
14704 }
14705 /*
14706 * this iport is an old iport, we need to
14707 * reconfig the props for it.
14708 */
14709 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14710 MPTSAS_VIRTUAL_PORT, 0) !=
14711 DDI_PROP_SUCCESS) {
14712 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14713 MPTSAS_VIRTUAL_PORT);
14714 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14715 "prop update failed");
14716 goto smp_create_done;
14717 }
14718
14719 mutex_enter(&mpt->m_mutex);
14720 numphys = 0;
14721 iport = ddi_get_name_addr(pdip);
14722 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14723 bzero(phymask, sizeof (phymask));
14724 (void) sprintf(phymask,
14725 "%x", mpt->m_phy_info[i].phy_mask);
14726 if (strcmp(phymask, iport) == 0) {
14727 phy_mask = mpt->m_phy_info[i].phy_mask;
14728 break;
14729 }
14730 }
14731
14732 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14733 if ((phy_mask >> i) & 0x01) {
14734 numphys++;
14735 }
14736 }
14737 /*
14738 * Update PHY info for smhba
14739 */
14740 if (mptsas_smhba_phy_init(mpt)) {
14741 mutex_exit(&mpt->m_mutex);
14742 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14743 "failed");
14744 goto smp_create_done;
14745 }
14746 mutex_exit(&mpt->m_mutex);
14747
14748 mptsas_smhba_set_phy_props(mpt, iport, pdip,
14749 numphys, &attached_devhdl);
14750
14751 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14752 MPTSAS_NUM_PHYS, numphys) !=
14753 DDI_PROP_SUCCESS) {
14754 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14755 MPTSAS_NUM_PHYS);
14756 mptsas_log(mpt, CE_WARN, "mptsas update "
14757 "num phys props failed");
14758 goto smp_create_done;
14759 }
14760 /*
14761 * Add parent's props for SMHBA support
14762 */
14763 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14764 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14765 DDI_PROP_SUCCESS) {
14766 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14767 SCSI_ADDR_PROP_ATTACHED_PORT);
14768 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14769 "attached-port failed");
14770 goto smp_create_done;
14771 }
14772
14773 smp_create_done:
14774 /*
14775 * If props were setup ok, online the lun
14776 */
14777 if (ndi_rtn == NDI_SUCCESS) {
14778 /*
14779 * Try to online the new node
14780 */
14781 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14782 }
14783
14784 /*
14785 * If success set rtn flag, else unwire alloc'd lun
14786 */
14787 if (ndi_rtn != NDI_SUCCESS) {
14788 NDBG12(("mptsas unable to online "
14789 "SMP target %s", wwn_str));
14790 ndi_prop_remove_all(*smp_dip);
14791 (void) ndi_devi_free(*smp_dip);
14792 }
14793 }
14794
14795 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14796 }
14797
14798 /* smp transport routine */
14799 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14800 {
14801 uint64_t wwn;
14802 Mpi2SmpPassthroughRequest_t req;
14803 Mpi2SmpPassthroughReply_t rep;
14804 uint32_t direction = 0;
14805 mptsas_t *mpt;
14806 int ret;
14807 uint64_t tmp64;
14808
14809 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14810 smp_a_hba_tran->smp_tran_hba_private;
14811
14812 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14813 /*
14814 * Need to compose a SMP request message
14815 * and call mptsas_do_passthru() function
14816 */
14817 bzero(&req, sizeof (req));
14818 bzero(&rep, sizeof (rep));
14819 req.PassthroughFlags = 0;
14820 req.PhysicalPort = 0xff;
14821 req.ChainOffset = 0;
14822 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14823
14824 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14825 smp_pkt->smp_pkt_reason = ERANGE;
14826 return (DDI_FAILURE);
14827 }
14828 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14829
14830 req.MsgFlags = 0;
14831 tmp64 = LE_64(wwn);
14832 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14833 if (smp_pkt->smp_pkt_rspsize > 0) {
14834 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14835 }
14836 if (smp_pkt->smp_pkt_reqsize > 0) {
14837 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
14838 }
14839
14840 mutex_enter(&mpt->m_mutex);
14841 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
14842 (uint8_t *)smp_pkt->smp_pkt_rsp,
14843 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
14844 smp_pkt->smp_pkt_rspsize - 4, direction,
14845 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
14846 smp_pkt->smp_pkt_timeout, FKIOCTL);
14847 mutex_exit(&mpt->m_mutex);
14848 if (ret != 0) {
14849 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
14850 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
14851 return (DDI_FAILURE);
14852 }
14853 /* do passthrough success, check the smp status */
14854 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14855 switch (LE_16(rep.IOCStatus)) {
14856 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
14857 smp_pkt->smp_pkt_reason = ENODEV;
14858 break;
14859 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
14860 smp_pkt->smp_pkt_reason = EOVERFLOW;
14861 break;
14862 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
14863 smp_pkt->smp_pkt_reason = EIO;
14864 break;
14865 default:
14866 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
14867 "status:%x", LE_16(rep.IOCStatus));
14868 smp_pkt->smp_pkt_reason = EIO;
14869 break;
14870 }
14871 return (DDI_FAILURE);
14872 }
14873 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
14874 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
14875 rep.SASStatus);
14876 smp_pkt->smp_pkt_reason = EIO;
14877 return (DDI_FAILURE);
14878 }
14879
14880 return (DDI_SUCCESS);
14881 }
14882
14883 /*
14884 * If we didn't get a match, we need to get sas page0 for each device, and
14885 * untill we get a match. If failed, return NULL
14886 */
14887 static mptsas_target_t *
14888 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
14889 {
14890 int i, j = 0;
14891 int rval = 0;
14892 uint16_t cur_handle;
14893 uint32_t page_address;
14894 mptsas_target_t *ptgt = NULL;
14895
14896 /*
14897 * PHY named device must be direct attached and attaches to
14898 * narrow port, if the iport is not parent of the device which
14899 * we are looking for.
14900 */
14901 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14902 if ((1 << i) & phymask)
14903 j++;
14904 }
14905
14906 if (j > 1)
14907 return (NULL);
14908
14909 /*
14910 * Must be a narrow port and single device attached to the narrow port
14911 * So the physical port num of device which is equal to the iport's
14912 * port num is the device what we are looking for.
14913 */
14914
14915 if (mpt->m_phy_info[phy].phy_mask != phymask)
14916 return (NULL);
14917
14918 mutex_enter(&mpt->m_mutex);
14919
14920 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14921 MPTSAS_HASH_FIRST);
14922 while (ptgt != NULL) {
14923 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14924 mutex_exit(&mpt->m_mutex);
14925 return (ptgt);
14926 }
14927
14928 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14929 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14930 }
14931
14932 if (mpt->m_done_traverse_dev) {
14933 mutex_exit(&mpt->m_mutex);
14934 return (NULL);
14935 }
14936
14937 /* If didn't get a match, come here */
14938 cur_handle = mpt->m_dev_handle;
14939 for (; ; ) {
14940 ptgt = NULL;
14941 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14942 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
14943 rval = mptsas_get_target_device_info(mpt, page_address,
14944 &cur_handle, &ptgt);
14945 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14946 (rval == DEV_INFO_FAIL_ALLOC)) {
14947 break;
14948 }
14949 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
14950 (rval == DEV_INFO_PHYS_DISK)) {
14951 continue;
14952 }
14953 mpt->m_dev_handle = cur_handle;
14954
14955 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14956 break;
14957 }
14958 }
14959
14960 mutex_exit(&mpt->m_mutex);
14961 return (ptgt);
14962 }
14963
14964 /*
14965 * The ptgt->m_sas_wwn contains the wwid for each disk.
14966 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
14967 * If we didn't get a match, we need to get sas page0 for each device, and
14968 * untill we get a match
14969 * If failed, return NULL
14970 */
14971 static mptsas_target_t *
14972 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
14973 {
14974 int rval = 0;
14975 uint16_t cur_handle;
14976 uint32_t page_address;
14977 mptsas_target_t *tmp_tgt = NULL;
14978
14979 mutex_enter(&mpt->m_mutex);
14980 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14981 &mpt->m_active->m_tgttbl, wwid, phymask);
14982 if (tmp_tgt != NULL) {
14983 mutex_exit(&mpt->m_mutex);
14984 return (tmp_tgt);
14985 }
14986
14987 if (phymask == 0) {
14988 /*
14989 * It's IR volume
14990 */
14991 rval = mptsas_get_raid_info(mpt);
14992 if (rval) {
14993 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14994 &mpt->m_active->m_tgttbl, wwid, phymask);
14995 }
14996 mutex_exit(&mpt->m_mutex);
14997 return (tmp_tgt);
14998 }
14999
15000 if (mpt->m_done_traverse_dev) {
15001 mutex_exit(&mpt->m_mutex);
15002 return (NULL);
15003 }
15004
15005 /* If didn't get a match, come here */
15006 cur_handle = mpt->m_dev_handle;
15007 for (; ; ) {
15008 tmp_tgt = NULL;
15009 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15010 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15011 rval = mptsas_get_target_device_info(mpt, page_address,
15012 &cur_handle, &tmp_tgt);
15013 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15014 (rval == DEV_INFO_FAIL_ALLOC)) {
15015 tmp_tgt = NULL;
15016 break;
15017 }
15018 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15019 (rval == DEV_INFO_PHYS_DISK)) {
15020 continue;
15021 }
15022 mpt->m_dev_handle = cur_handle;
15023 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15024 (tmp_tgt->m_phymask == phymask)) {
15025 break;
15026 }
15027 }
15028
15029 mutex_exit(&mpt->m_mutex);
15030 return (tmp_tgt);
15031 }
15032
15033 static mptsas_smp_t *
15034 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15035 {
15036 int rval = 0;
15037 uint16_t cur_handle;
15038 uint32_t page_address;
15039 mptsas_smp_t smp_node, *psmp = NULL;
15040
15041 mutex_enter(&mpt->m_mutex);
15042 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15043 wwid, phymask);
15044 if (psmp != NULL) {
15045 mutex_exit(&mpt->m_mutex);
15046 return (psmp);
15047 }
15048
15049 if (mpt->m_done_traverse_smp) {
15050 mutex_exit(&mpt->m_mutex);
15051 return (NULL);
15052 }
15053
15054 /* If didn't get a match, come here */
15055 cur_handle = mpt->m_smp_devhdl;
15056 for (; ; ) {
15057 psmp = NULL;
15058 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15059 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15060 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15061 &smp_node);
15062 if (rval != DDI_SUCCESS) {
15063 break;
15064 }
15065 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15066 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15067 ASSERT(psmp);
15068 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15069 (psmp->m_phymask == phymask)) {
15070 break;
15071 }
15072 }
15073
15074 mutex_exit(&mpt->m_mutex);
15075 return (psmp);
15076 }
15077
15078 /* helper functions using hash */
15079
15080 /*
15081 * Can't have duplicate entries for same devhdl,
15082 * if there are invalid entries, the devhdl should be set to 0xffff
15083 */
15084 static void *
15085 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15086 {
15087 mptsas_hash_data_t *data;
15088
15089 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15090 while (data != NULL) {
15091 if (data->devhdl == devhdl) {
15092 break;
15093 }
15094 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15095 }
15096 return (data);
15097 }
15098
15099 mptsas_target_t *
15100 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15101 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15102 {
15103 mptsas_target_t *tmp_tgt = NULL;
15104
15105 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15106 if (tmp_tgt != NULL) {
15107 NDBG20(("Hash item already exist"));
15108 tmp_tgt->m_deviceinfo = devinfo;
15109 tmp_tgt->m_devhdl = devhdl;
15110 return (tmp_tgt);
15111 }
15112 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15113 if (tmp_tgt == NULL) {
15114 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15115 return (NULL);
15116 }
15117 tmp_tgt->m_devhdl = devhdl;
15118 tmp_tgt->m_sas_wwn = wwid;
15119 tmp_tgt->m_deviceinfo = devinfo;
15120 tmp_tgt->m_phymask = phymask;
15121 tmp_tgt->m_phynum = phynum;
15122 /* Initialized the tgt structure */
15123 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15124 tmp_tgt->m_qfull_retry_interval =
15125 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15126 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15127
15128 mptsas_hash_add(hashtab, tmp_tgt);
15129
15130 return (tmp_tgt);
15131 }
15132
15133 static void
15134 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15135 mptsas_phymask_t phymask)
15136 {
15137 mptsas_target_t *tmp_tgt;
15138 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15139 if (tmp_tgt == NULL) {
15140 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15141 } else {
15142 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15143 }
15144 }
15145
15146 /*
15147 * Return the entry in the hash table
15148 */
15149 static mptsas_smp_t *
15150 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15151 {
15152 uint64_t key1 = data->m_sasaddr;
15153 mptsas_phymask_t key2 = data->m_phymask;
15154 mptsas_smp_t *ret_data;
15155
15156 ret_data = mptsas_hash_search(hashtab, key1, key2);
15157 if (ret_data != NULL) {
15158 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15159 return (ret_data);
15160 }
15161
15162 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15163 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15164 mptsas_hash_add(hashtab, ret_data);
15165 return (ret_data);
15166 }
15167
15168 static void
15169 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15170 mptsas_phymask_t phymask)
15171 {
15172 mptsas_smp_t *tmp_smp;
15173 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15174 if (tmp_smp == NULL) {
15175 cmn_err(CE_WARN, "Smp element not found, nothing to free");
15176 } else {
15177 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15178 }
15179 }
15180
15181 /*
15182 * Hash operation functions
15183 * key1 is the sas_wwn, key2 is the phymask
15184 */
15185 static void
15186 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15187 {
15188 if (hashtab == NULL) {
15189 return;
15190 }
15191 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15192 MPTSAS_HASH_ARRAY_SIZE);
15193 hashtab->cur = NULL;
15194 hashtab->line = 0;
15195 }
15196
15197 static void
15198 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15199 {
15200 uint16_t line = 0;
15201 mptsas_hash_node_t *cur = NULL, *last = NULL;
15202
15203 if (hashtab == NULL) {
15204 return;
15205 }
15206 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15207 cur = hashtab->head[line];
15208 while (cur != NULL) {
15209 last = cur;
15210 cur = cur->next;
15211 kmem_free(last->data, datalen);
15212 kmem_free(last, sizeof (mptsas_hash_node_t));
15213 }
15214 }
15215 }
15216
15217 /*
15218 * You must guarantee the element doesn't exist in the hash table
15219 * before you call mptsas_hash_add()
15220 */
15221 static void
15222 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15223 {
15224 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15225 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15226 mptsas_hash_node_t **head = NULL;
15227 mptsas_hash_node_t *node = NULL;
15228
15229 if (hashtab == NULL) {
15230 return;
15231 }
15232 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15233 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15234 node->data = data;
15235
15236 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15237 if (*head == NULL) {
15238 *head = node;
15239 } else {
15240 node->next = *head;
15241 *head = node;
15242 }
15243 }
15244
15245 static void *
15246 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15247 mptsas_phymask_t key2)
15248 {
15249 mptsas_hash_node_t **head = NULL;
15250 mptsas_hash_node_t *last = NULL, *cur = NULL;
15251 mptsas_hash_data_t *data;
15252 if (hashtab == NULL) {
15253 return (NULL);
15254 }
15255 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15256 cur = *head;
15257 while (cur != NULL) {
15258 data = cur->data;
15259 if ((data->key1 == key1) && (data->key2 == key2)) {
15260 if (last == NULL) {
15261 (*head) = cur->next;
15262 } else {
15263 last->next = cur->next;
15264 }
15265 kmem_free(cur, sizeof (mptsas_hash_node_t));
15266 return (data);
15267 } else {
15268 last = cur;
15269 cur = cur->next;
15270 }
15271 }
15272 return (NULL);
15273 }
15274
15275 static void *
15276 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15277 mptsas_phymask_t key2)
15278 {
15279 mptsas_hash_node_t *cur = NULL;
15280 mptsas_hash_data_t *data;
15281 if (hashtab == NULL) {
15282 return (NULL);
15283 }
15284 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15285 while (cur != NULL) {
15286 data = cur->data;
15287 if ((data->key1 == key1) && (data->key2 == key2)) {
15288 return (data);
15289 } else {
15290 cur = cur->next;
15291 }
15292 }
15293 return (NULL);
15294 }
15295
15296 static void *
15297 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15298 {
15299 mptsas_hash_node_t *this = NULL;
15300
15301 if (hashtab == NULL) {
15302 return (NULL);
15303 }
15304
15305 if (pos == MPTSAS_HASH_FIRST) {
15306 hashtab->line = 0;
15307 hashtab->cur = NULL;
15308 this = hashtab->head[0];
15309 } else {
15310 if (hashtab->cur == NULL) {
15311 return (NULL);
15312 } else {
15313 this = hashtab->cur->next;
15314 }
15315 }
15316
15317 while (this == NULL) {
15318 hashtab->line++;
15319 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
15320 /* the traverse reaches the end */
15321 hashtab->cur = NULL;
15322 return (NULL);
15323 } else {
15324 this = hashtab->head[hashtab->line];
15325 }
15326 }
15327 hashtab->cur = this;
15328 return (this->data);
15329 }
15330
15331 /*
15332 * Functions for SGPIO LED support
15333 */
15334 static dev_info_t *
15335 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15336 {
15337 dev_info_t *dip;
15338 int prop;
15339 dip = e_ddi_hold_devi_by_dev(dev, 0);
15340 if (dip == NULL)
15341 return (dip);
15342 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15343 "phymask", 0);
15344 *phymask = (mptsas_phymask_t)prop;
15345 ddi_release_devi(dip);
15346 return (dip);
15347 }
15348 static mptsas_target_t *
15349 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15350 {
15351 uint8_t phynum;
15352 uint64_t wwn;
15353 int lun;
15354 mptsas_target_t *ptgt = NULL;
15355
15356 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
15357 return (NULL);
15358 }
15359 if (addr[0] == 'w') {
15360 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15361 } else {
15362 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15363 }
15364 return (ptgt);
15365 }
15366
15367 int
15368 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15369 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15370 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15371 {
15372 ddi_dma_cookie_t new_cookie;
15373 size_t alloc_len;
15374 uint_t ncookie;
15375
15376 if (cookiep == NULL)
15377 cookiep = &new_cookie;
15378
15379 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15380 NULL, dma_hdp) != DDI_SUCCESS) {
15381 dma_hdp = NULL;
15382 return (FALSE);
15383 }
15384
15385 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15386 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15387 acc_hdp) != DDI_SUCCESS) {
15388 ddi_dma_free_handle(dma_hdp);
15389 dma_hdp = NULL;
15390 return (FALSE);
15391 }
15392
15393 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15394 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15395 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15396 (void) ddi_dma_mem_free(acc_hdp);
15397 ddi_dma_free_handle(dma_hdp);
15398 dma_hdp = NULL;
15399 return (FALSE);
15400 }
15401
15402 return (TRUE);
15403 }
15404
15405 void
15406 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15407 {
15408 if (*dma_hdp == NULL)
15409 return;
15410
15411 (void) ddi_dma_unbind_handle(*dma_hdp);
15412 (void) ddi_dma_mem_free(acc_hdp);
15413 ddi_dma_free_handle(dma_hdp);
15414 dma_hdp = NULL;
15415 }