1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 */
28
29 /*
30 * Copyright (c) 2000 to 2010, LSI Corporation.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms of all code within
34 * this file that is exclusively owned by LSI, with or without
35 * modification, is permitted provided that, in addition to the CDDL 1.0
36 * License requirements, the following conditions are met:
37 *
38 * Neither the name of the author nor the names of its contributors may be
39 * used to endorse or promote products derived from this software without
40 * specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
45 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
46 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
48 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
49 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
50 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
51 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
52 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
53 * DAMAGE.
54 */
55
56 /*
57 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
58 *
59 */
60
61 #if defined(lint) || defined(DEBUG)
62 #define MPTSAS_DEBUG
63 #endif
64
65 /*
66 * standard header files.
67 */
68 #include <sys/note.h>
69 #include <sys/scsi/scsi.h>
70 #include <sys/pci.h>
71 #include <sys/file.h>
72 #include <sys/policy.h>
73 #include <sys/model.h>
74 #include <sys/sysevent.h>
75 #include <sys/sysevent/eventdefs.h>
76 #include <sys/sysevent/dr.h>
77 #include <sys/sata/sata_defs.h>
78 #include <sys/scsi/generic/sas.h>
79 #include <sys/scsi/impl/scsi_sas.h>
80
81 #pragma pack(1)
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
90 #pragma pack()
91
92 /*
93 * private header files.
94 *
95 */
96 #include <sys/scsi/impl/scsi_reset_notify.h>
97 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
98 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
100 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
101 #include <sys/raidioctl.h>
102
103 #include <sys/fs/dv_node.h> /* devfs_clean */
104
105 /*
106 * FMA header files
107 */
108 #include <sys/ddifm.h>
109 #include <sys/fm/protocol.h>
110 #include <sys/fm/util.h>
111 #include <sys/fm/io/ddi.h>
112
113 /*
114 * autoconfiguration data and routines.
115 */
116 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
117 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
118 static int mptsas_power(dev_info_t *dip, int component, int level);
119
120 /*
121 * cb_ops function
122 */
123 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
124 cred_t *credp, int *rval);
125 #ifdef __sparc
126 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
127 #else /* __sparc */
128 static int mptsas_quiesce(dev_info_t *devi);
129 #endif /* __sparc */
130
131 /*
132 * Resource initilaization for hardware
133 */
134 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
135 static void mptsas_disable_bus_master(mptsas_t *mpt);
136 static void mptsas_hba_fini(mptsas_t *mpt);
137 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
138 static int mptsas_hba_setup(mptsas_t *mpt);
139 static void mptsas_hba_teardown(mptsas_t *mpt);
140 static int mptsas_config_space_init(mptsas_t *mpt);
141 static void mptsas_config_space_fini(mptsas_t *mpt);
142 static void mptsas_iport_register(mptsas_t *mpt);
143 static int mptsas_smp_setup(mptsas_t *mpt);
144 static void mptsas_smp_teardown(mptsas_t *mpt);
145 static int mptsas_cache_create(mptsas_t *mpt);
146 static void mptsas_cache_destroy(mptsas_t *mpt);
147 static int mptsas_alloc_request_frames(mptsas_t *mpt);
148 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
149 static int mptsas_alloc_free_queue(mptsas_t *mpt);
150 static int mptsas_alloc_post_queue(mptsas_t *mpt);
151 static void mptsas_alloc_reply_args(mptsas_t *mpt);
152 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
153 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
154 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
155
156 /*
157 * SCSA function prototypes
158 */
159 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
160 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
161 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
162 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
163 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
164 int tgtonly);
165 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
166 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
167 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
168 int tgtlen, int flags, int (*callback)(), caddr_t arg);
169 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
170 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
171 struct scsi_pkt *pkt);
172 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
173 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
174 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
175 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
176 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
177 void (*callback)(caddr_t), caddr_t arg);
178 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
179 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
180 static int mptsas_scsi_quiesce(dev_info_t *dip);
181 static int mptsas_scsi_unquiesce(dev_info_t *dip);
182 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
183 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
184
185 /*
186 * SMP functions
187 */
188 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
189
190 /*
191 * internal function prototypes.
192 */
193 static void mptsas_list_add(mptsas_t *mpt);
194 static void mptsas_list_del(mptsas_t *mpt);
195
196 static int mptsas_quiesce_bus(mptsas_t *mpt);
197 static int mptsas_unquiesce_bus(mptsas_t *mpt);
198
199 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
200 static void mptsas_free_handshake_msg(mptsas_t *mpt);
201
202 static void mptsas_ncmds_checkdrain(void *arg);
203
204 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
205 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
206 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
207 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
208
209 static int mptsas_do_detach(dev_info_t *dev);
210 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
211 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
212 struct scsi_pkt *pkt);
213 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
214
215 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
216 static void mptsas_handle_event(void *args);
217 static int mptsas_handle_event_sync(void *args);
218 static void mptsas_handle_dr(void *args);
219 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
220 dev_info_t *pdip);
221
222 static void mptsas_restart_cmd(void *);
223
224 static void mptsas_flush_hba(mptsas_t *mpt);
225 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
226 uint8_t tasktype);
227 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
228 uchar_t reason, uint_t stat);
229
230 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
231 static void mptsas_process_intr(mptsas_t *mpt,
232 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
233 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
234 pMpi2ReplyDescriptorsUnion_t reply_desc);
235 static void mptsas_handle_address_reply(mptsas_t *mpt,
236 pMpi2ReplyDescriptorsUnion_t reply_desc);
237 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
238 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
239 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
240
241 static void mptsas_watch(void *arg);
242 static void mptsas_watchsubr(mptsas_t *mpt);
243 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
244
245 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
246 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
247 uint8_t *data, uint32_t request_size, uint32_t reply_size,
248 uint32_t data_size, uint32_t direction, uint8_t *dataout,
249 uint32_t dataout_size, short timeout, int mode);
250 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
251
252 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
253 uint32_t unique_id);
254 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
255 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
256 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
257 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
258 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
259 uint32_t diag_type);
260 static int mptsas_diag_register(mptsas_t *mpt,
261 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
262 static int mptsas_diag_unregister(mptsas_t *mpt,
263 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
264 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
265 uint32_t *return_code);
266 static int mptsas_diag_read_buffer(mptsas_t *mpt,
267 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
268 uint32_t *return_code, int ioctl_mode);
269 static int mptsas_diag_release(mptsas_t *mpt,
270 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
271 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
272 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
273 int ioctl_mode);
274 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
275 int mode);
276
277 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
278 int cmdlen, int tgtlen, int statuslen, int kf);
279 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
280
281 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
282 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
283
284 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
285 int kmflags);
286 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
287
288 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
289 mptsas_cmd_t *cmd);
290 static void mptsas_check_task_mgt(mptsas_t *mpt,
291 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
292 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
293 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
294 int *resid);
295
296 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
297 static void mptsas_free_active_slots(mptsas_t *mpt);
298 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
299
300 static void mptsas_restart_hba(mptsas_t *mpt);
301 static void mptsas_restart_waitq(mptsas_t *mpt);
302
303 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
304 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
305 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
306
307 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
308 static void mptsas_doneq_empty(mptsas_t *mpt);
309 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
310
311 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
312 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
313 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
314 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
315
316
317 static void mptsas_start_watch_reset_delay();
318 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
319 static void mptsas_watch_reset_delay(void *arg);
320 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
321
322 /*
323 * helper functions
324 */
325 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
326
327 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
328 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
329 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
330 int lun);
331 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
332 int lun);
333 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
334 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
335
336 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
337 int *lun);
338 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
339
340 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
341 mptsas_phymask_t phymask, uint8_t phy);
342 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
343 mptsas_phymask_t phymask, uint64_t wwid);
344 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
345 mptsas_phymask_t phymask, uint64_t wwid);
346
347 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
348 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
349
350 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
351 uint16_t *handle, mptsas_target_t **pptgt);
352 static void mptsas_update_phymask(mptsas_t *mpt);
353
354 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
355 uint32_t *status, uint8_t cmd);
356 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
357 mptsas_phymask_t *phymask);
358 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
359 mptsas_phymask_t phymask);
360 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
361
362
363 /*
364 * Enumeration / DR functions
365 */
366 static void mptsas_config_all(dev_info_t *pdip);
367 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
368 dev_info_t **lundip);
369 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
370 dev_info_t **lundip);
371
372 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
373 static int mptsas_offline_target(dev_info_t *pdip, char *name);
374
375 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
376 dev_info_t **dip);
377
378 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
379 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
380 dev_info_t **dip, mptsas_target_t *ptgt);
381
382 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
383 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
384
385 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
386 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
387 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
388 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
389 int lun);
390
391 static void mptsas_offline_missed_luns(dev_info_t *pdip,
392 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
393 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
394 mdi_pathinfo_t *rpip, uint_t flags);
395
396 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
397 dev_info_t **smp_dip);
398 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
399 uint_t flags);
400
401 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
402 int mode, int *rval);
403 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
404 int mode, int *rval);
405 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
406 int mode, int *rval);
407 static void mptsas_record_event(void *args);
408 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
409 int mode);
410
411 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
412 uint32_t, mptsas_phymask_t, uint8_t);
413 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
414 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
415 dev_info_t **smp_dip);
416
417 /*
418 * Power management functions
419 */
420 static int mptsas_get_pci_cap(mptsas_t *mpt);
421 static int mptsas_init_pm(mptsas_t *mpt);
422
423 /*
424 * MPT MSI tunable:
425 *
426 * By default MSI is enabled on all supported platforms.
427 */
428 boolean_t mptsas_enable_msi = B_TRUE;
429 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
430
431 static int mptsas_register_intrs(mptsas_t *);
432 static void mptsas_unregister_intrs(mptsas_t *);
433 static int mptsas_add_intrs(mptsas_t *, int);
434 static void mptsas_rem_intrs(mptsas_t *);
435
436 /*
437 * FMA Prototypes
438 */
439 static void mptsas_fm_init(mptsas_t *mpt);
440 static void mptsas_fm_fini(mptsas_t *mpt);
441 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
442
443 extern pri_t minclsyspri, maxclsyspri;
444
445 /*
446 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
447 * under this device that the paths to a physical device are created when
448 * MPxIO is used.
449 */
450 extern dev_info_t *scsi_vhci_dip;
451
452 /*
453 * Tunable timeout value for Inquiry VPD page 0x83
454 * By default the value is 30 seconds.
455 */
456 int mptsas_inq83_retry_timeout = 30;
457
458 /*
459 * This is used to allocate memory for message frame storage, not for
460 * data I/O DMA. All message frames must be stored in the first 4G of
461 * physical memory.
462 */
463 ddi_dma_attr_t mptsas_dma_attrs = {
464 DMA_ATTR_V0, /* attribute layout version */
465 0x0ull, /* address low - should be 0 (longlong) */
466 0xffffffffull, /* address high - 32-bit max range */
467 0x00ffffffull, /* count max - max DMA object size */
468 4, /* allocation alignment requirements */
469 0x78, /* burstsizes - binary encoded values */
470 1, /* minxfer - gran. of DMA engine */
471 0x00ffffffull, /* maxxfer - gran. of DMA engine */
472 0xffffffffull, /* max segment size (DMA boundary) */
473 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
474 512, /* granularity - device transfer size */
475 0 /* flags, set to 0 */
476 };
477
478 /*
479 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
480 * physical addresses are supported.)
481 */
482 ddi_dma_attr_t mptsas_dma_attrs64 = {
483 DMA_ATTR_V0, /* attribute layout version */
484 0x0ull, /* address low - should be 0 (longlong) */
485 0xffffffffffffffffull, /* address high - 64-bit max */
486 0x00ffffffull, /* count max - max DMA object size */
487 4, /* allocation alignment requirements */
488 0x78, /* burstsizes - binary encoded values */
489 1, /* minxfer - gran. of DMA engine */
490 0x00ffffffull, /* maxxfer - gran. of DMA engine */
491 0xffffffffull, /* max segment size (DMA boundary) */
492 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
493 512, /* granularity - device transfer size */
494 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
495 };
496
497 ddi_device_acc_attr_t mptsas_dev_attr = {
498 DDI_DEVICE_ATTR_V1,
499 DDI_STRUCTURE_LE_ACC,
500 DDI_STRICTORDER_ACC,
501 DDI_DEFAULT_ACC
502 };
503
504 static struct cb_ops mptsas_cb_ops = {
505 scsi_hba_open, /* open */
506 scsi_hba_close, /* close */
507 nodev, /* strategy */
508 nodev, /* print */
509 nodev, /* dump */
510 nodev, /* read */
511 nodev, /* write */
512 mptsas_ioctl, /* ioctl */
513 nodev, /* devmap */
514 nodev, /* mmap */
515 nodev, /* segmap */
516 nochpoll, /* chpoll */
517 ddi_prop_op, /* cb_prop_op */
518 NULL, /* streamtab */
519 D_MP, /* cb_flag */
520 CB_REV, /* rev */
521 nodev, /* aread */
522 nodev /* awrite */
523 };
524
525 static struct dev_ops mptsas_ops = {
526 DEVO_REV, /* devo_rev, */
527 0, /* refcnt */
528 ddi_no_info, /* info */
529 nulldev, /* identify */
530 nulldev, /* probe */
531 mptsas_attach, /* attach */
532 mptsas_detach, /* detach */
533 #ifdef __sparc
534 mptsas_reset,
535 #else
536 nodev, /* reset */
537 #endif /* __sparc */
538 &mptsas_cb_ops, /* driver operations */
539 NULL, /* bus operations */
540 mptsas_power, /* power management */
541 #ifdef __sparc
542 ddi_quiesce_not_needed
543 #else
544 mptsas_quiesce /* quiesce */
545 #endif /* __sparc */
546 };
547
548
549 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
550
551 static struct modldrv modldrv = {
552 &mod_driverops, /* Type of module. This one is a driver */
553 MPTSAS_MOD_STRING, /* Name of the module. */
554 &mptsas_ops, /* driver ops */
555 };
556
557 static struct modlinkage modlinkage = {
558 MODREV_1, &modldrv, NULL
559 };
560 #define TARGET_PROP "target"
561 #define LUN_PROP "lun"
562 #define LUN64_PROP "lun64"
563 #define SAS_PROP "sas-mpt"
564 #define MDI_GUID "wwn"
565 #define NDI_GUID "guid"
566 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
567
568 /*
569 * Local static data
570 */
571 #if defined(MPTSAS_DEBUG)
572 uint32_t mptsas_debug_flags = 0;
573 #endif /* defined(MPTSAS_DEBUG) */
574 uint32_t mptsas_debug_resets = 0;
575
576 static kmutex_t mptsas_global_mutex;
577 static void *mptsas_state; /* soft state ptr */
578 static krwlock_t mptsas_global_rwlock;
579
580 static kmutex_t mptsas_log_mutex;
581 static char mptsas_log_buf[256];
582 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
583
584 static mptsas_t *mptsas_head, *mptsas_tail;
585 static clock_t mptsas_scsi_watchdog_tick;
586 static clock_t mptsas_tick;
587 static timeout_id_t mptsas_reset_watch;
588 static timeout_id_t mptsas_timeout_id;
589 static int mptsas_timeouts_enabled = 0;
590 /*
591 * warlock directives
592 */
593 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
594 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
595 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
596 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
597 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
598 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
599
600 /*
601 * SM - HBA statics
602 */
603 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
604
605 #ifdef MPTSAS_DEBUG
606 void debug_enter(char *);
607 #endif
608
609 /*
610 * Notes:
611 * - scsi_hba_init(9F) initializes SCSI HBA modules
612 * - must call scsi_hba_fini(9F) if modload() fails
613 */
614 int
615 _init(void)
616 {
617 int status;
618 /* CONSTCOND */
619 ASSERT(NO_COMPETING_THREADS);
620
621 NDBG0(("_init"));
622
623 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
624 MPTSAS_INITIAL_SOFT_SPACE);
625 if (status != 0) {
626 return (status);
627 }
628
629 if ((status = scsi_hba_init(&modlinkage)) != 0) {
630 ddi_soft_state_fini(&mptsas_state);
631 return (status);
632 }
633
634 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
635 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
636 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
637
638 if ((status = mod_install(&modlinkage)) != 0) {
639 mutex_destroy(&mptsas_log_mutex);
640 rw_destroy(&mptsas_global_rwlock);
641 mutex_destroy(&mptsas_global_mutex);
642 ddi_soft_state_fini(&mptsas_state);
643 scsi_hba_fini(&modlinkage);
644 }
645
646 return (status);
647 }
648
649 /*
650 * Notes:
651 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
652 */
653 int
654 _fini(void)
655 {
656 int status;
657 /* CONSTCOND */
658 ASSERT(NO_COMPETING_THREADS);
659
660 NDBG0(("_fini"));
661
662 if ((status = mod_remove(&modlinkage)) == 0) {
663 ddi_soft_state_fini(&mptsas_state);
664 scsi_hba_fini(&modlinkage);
665 mutex_destroy(&mptsas_global_mutex);
666 rw_destroy(&mptsas_global_rwlock);
667 mutex_destroy(&mptsas_log_mutex);
668 }
669 return (status);
670 }
671
672 /*
673 * The loadable-module _info(9E) entry point
674 */
675 int
676 _info(struct modinfo *modinfop)
677 {
678 /* CONSTCOND */
679 ASSERT(NO_COMPETING_THREADS);
680 NDBG0(("mptsas _info"));
681
682 return (mod_info(&modlinkage, modinfop));
683 }
684
685 static int
686 mptsas_target_eval_devhdl(const void *op, void *arg)
687 {
688 uint16_t dh = *(uint16_t *)arg;
689 const mptsas_target_t *tp = op;
690
691 return ((int)tp->m_devhdl - (int)dh);
692 }
693
694 static int
695 mptsas_target_eval_slot(const void *op, void *arg)
696 {
697 mptsas_led_control_t *lcp = arg;
698 const mptsas_target_t *tp = op;
699
700 if (tp->m_enclosure != lcp->Enclosure)
701 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
702
703 return ((int)tp->m_slot_num - (int)lcp->Slot);
704 }
705
706 static int
707 mptsas_target_eval_nowwn(const void *op, void *arg)
708 {
709 uint8_t phy = *(uint8_t *)arg;
710 const mptsas_target_t *tp = op;
711
712 if (tp->m_addr.mta_wwn != 0)
713 return (-1);
714
715 return ((int)tp->m_phynum - (int)phy);
716 }
717
718 static int
719 mptsas_smp_eval_devhdl(const void *op, void *arg)
720 {
721 uint16_t dh = *(uint16_t *)arg;
722 const mptsas_smp_t *sp = op;
723
724 return ((int)sp->m_devhdl - (int)dh);
725 }
726
727 static uint64_t
728 mptsas_target_addr_hash(const void *tp)
729 {
730 const mptsas_target_addr_t *tap = tp;
731
732 return ((tap->mta_wwn & 0xffffffffffffULL) |
733 ((uint64_t)tap->mta_phymask << 48));
734 }
735
736 static int
737 mptsas_target_addr_cmp(const void *a, const void *b)
738 {
739 const mptsas_target_addr_t *aap = a;
740 const mptsas_target_addr_t *bap = b;
741
742 if (aap->mta_wwn < bap->mta_wwn)
743 return (-1);
744 if (aap->mta_wwn > bap->mta_wwn)
745 return (1);
746 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
747 }
748
749 static void
750 mptsas_target_free(void *op)
751 {
752 kmem_free(op, sizeof (mptsas_target_t));
753 }
754
755 static void
756 mptsas_smp_free(void *op)
757 {
758 kmem_free(op, sizeof (mptsas_smp_t));
759 }
760
761 static void
762 mptsas_destroy_hashes(mptsas_t *mpt)
763 {
764 mptsas_target_t *tp;
765 mptsas_smp_t *sp;
766
767 for (tp = refhash_first(mpt->m_targets); tp != NULL;
768 tp = refhash_next(mpt->m_targets, tp)) {
769 refhash_remove(mpt->m_targets, tp);
770 }
771 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
772 sp = refhash_next(mpt->m_smp_targets, sp)) {
773 refhash_remove(mpt->m_smp_targets, sp);
774 }
775 refhash_destroy(mpt->m_targets);
776 refhash_destroy(mpt->m_smp_targets);
777 mpt->m_targets = NULL;
778 mpt->m_smp_targets = NULL;
779 }
780
781 static int
782 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
783 {
784 dev_info_t *pdip;
785 mptsas_t *mpt;
786 scsi_hba_tran_t *hba_tran;
787 char *iport = NULL;
788 char phymask[MPTSAS_MAX_PHYS];
789 mptsas_phymask_t phy_mask = 0;
790 int dynamic_port = 0;
791 uint32_t page_address;
792 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
793 int rval = DDI_FAILURE;
794 int i = 0;
795 uint8_t numphys = 0;
796 uint8_t phy_id;
797 uint8_t phy_port = 0;
798 uint16_t attached_devhdl = 0;
799 uint32_t dev_info;
800 uint64_t attached_sas_wwn;
801 uint16_t dev_hdl;
802 uint16_t pdev_hdl;
803 uint16_t bay_num, enclosure;
804 char attached_wwnstr[MPTSAS_WWN_STRLEN];
805
806 /* CONSTCOND */
807 ASSERT(NO_COMPETING_THREADS);
808
809 switch (cmd) {
810 case DDI_ATTACH:
811 break;
812
813 case DDI_RESUME:
814 /*
815 * If this a scsi-iport node, nothing to do here.
816 */
817 return (DDI_SUCCESS);
818
819 default:
820 return (DDI_FAILURE);
821 }
822
823 pdip = ddi_get_parent(dip);
824
825 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
826 NULL) {
827 cmn_err(CE_WARN, "Failed attach iport because fail to "
828 "get tran vector for the HBA node");
829 return (DDI_FAILURE);
830 }
831
832 mpt = TRAN2MPT(hba_tran);
833 ASSERT(mpt != NULL);
834 if (mpt == NULL)
835 return (DDI_FAILURE);
836
837 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
838 NULL) {
839 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
840 "get tran vector for the iport node");
841 return (DDI_FAILURE);
842 }
843
844 /*
845 * Overwrite parent's tran_hba_private to iport's tran vector
846 */
847 hba_tran->tran_hba_private = mpt;
848
849 ddi_report_dev(dip);
850
851 /*
852 * Get SAS address for initiator port according dev_handle
853 */
854 iport = ddi_get_name_addr(dip);
855 if (iport && strncmp(iport, "v0", 2) == 0) {
856 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
857 MPTSAS_VIRTUAL_PORT, 1) !=
858 DDI_PROP_SUCCESS) {
859 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
860 MPTSAS_VIRTUAL_PORT);
861 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
862 "prop update failed");
863 return (DDI_FAILURE);
864 }
865 return (DDI_SUCCESS);
866 }
867
868 mutex_enter(&mpt->m_mutex);
869 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
870 bzero(phymask, sizeof (phymask));
871 (void) sprintf(phymask,
872 "%x", mpt->m_phy_info[i].phy_mask);
873 if (strcmp(phymask, iport) == 0) {
874 break;
875 }
876 }
877
878 if (i == MPTSAS_MAX_PHYS) {
879 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
880 "seems not exist", iport);
881 mutex_exit(&mpt->m_mutex);
882 return (DDI_FAILURE);
883 }
884
885 phy_mask = mpt->m_phy_info[i].phy_mask;
886
887 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
888 dynamic_port = 1;
889 else
890 dynamic_port = 0;
891
892 /*
893 * Update PHY info for smhba
894 */
895 if (mptsas_smhba_phy_init(mpt)) {
896 mutex_exit(&mpt->m_mutex);
897 mptsas_log(mpt, CE_WARN, "mptsas phy update "
898 "failed");
899 return (DDI_FAILURE);
900 }
901
902 mutex_exit(&mpt->m_mutex);
903
904 numphys = 0;
905 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
906 if ((phy_mask >> i) & 0x01) {
907 numphys++;
908 }
909 }
910
911 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
912 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
913 mpt->un.m_base_wwid);
914
915 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
916 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
917 DDI_PROP_SUCCESS) {
918 (void) ddi_prop_remove(DDI_DEV_T_NONE,
919 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
920 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
921 "prop update failed");
922 return (DDI_FAILURE);
923 }
924 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
925 MPTSAS_NUM_PHYS, numphys) !=
926 DDI_PROP_SUCCESS) {
927 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
928 return (DDI_FAILURE);
929 }
930
931 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
932 "phymask", phy_mask) !=
933 DDI_PROP_SUCCESS) {
934 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
935 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
936 "prop update failed");
937 return (DDI_FAILURE);
938 }
939
940 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
941 "dynamic-port", dynamic_port) !=
942 DDI_PROP_SUCCESS) {
943 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
944 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
945 "prop update failed");
946 return (DDI_FAILURE);
947 }
948 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
949 MPTSAS_VIRTUAL_PORT, 0) !=
950 DDI_PROP_SUCCESS) {
951 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
952 MPTSAS_VIRTUAL_PORT);
953 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
954 "prop update failed");
955 return (DDI_FAILURE);
956 }
957 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
958 &attached_devhdl);
959
960 mutex_enter(&mpt->m_mutex);
961 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
962 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
963 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
964 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
965 &pdev_hdl, &bay_num, &enclosure);
966 if (rval != DDI_SUCCESS) {
967 mptsas_log(mpt, CE_WARN,
968 "Failed to get device page0 for handle:%d",
969 attached_devhdl);
970 mutex_exit(&mpt->m_mutex);
971 return (DDI_FAILURE);
972 }
973
974 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
975 bzero(phymask, sizeof (phymask));
976 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
977 if (strcmp(phymask, iport) == 0) {
978 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
979 "%x",
980 mpt->m_phy_info[i].phy_mask);
981 }
982 }
983 mutex_exit(&mpt->m_mutex);
984
985 bzero(attached_wwnstr, sizeof (attached_wwnstr));
986 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
987 attached_sas_wwn);
988 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
989 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
990 DDI_PROP_SUCCESS) {
991 (void) ddi_prop_remove(DDI_DEV_T_NONE,
992 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
993 return (DDI_FAILURE);
994 }
995
996 /* Create kstats for each phy on this iport */
997
998 mptsas_create_phy_stats(mpt, iport, dip);
999
1000 /*
1001 * register sas hba iport with mdi (MPxIO/vhci)
1002 */
1003 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1004 dip, 0) == MDI_SUCCESS) {
1005 mpt->m_mpxio_enable = TRUE;
1006 }
1007 return (DDI_SUCCESS);
1008 }
1009
1010 /*
1011 * Notes:
1012 * Set up all device state and allocate data structures,
1013 * mutexes, condition variables, etc. for device operation.
1014 * Add interrupts needed.
1015 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1016 */
1017 static int
1018 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1019 {
1020 mptsas_t *mpt = NULL;
1021 int instance, i, j;
1022 int doneq_thread_num;
1023 char intr_added = 0;
1024 char map_setup = 0;
1025 char config_setup = 0;
1026 char hba_attach_setup = 0;
1027 char smp_attach_setup = 0;
1028 char mutex_init_done = 0;
1029 char event_taskq_create = 0;
1030 char dr_taskq_create = 0;
1031 char doneq_thread_create = 0;
1032 scsi_hba_tran_t *hba_tran;
1033 uint_t mem_bar = MEM_SPACE;
1034 int rval = DDI_FAILURE;
1035
1036 /* CONSTCOND */
1037 ASSERT(NO_COMPETING_THREADS);
1038
1039 if (scsi_hba_iport_unit_address(dip)) {
1040 return (mptsas_iport_attach(dip, cmd));
1041 }
1042
1043 switch (cmd) {
1044 case DDI_ATTACH:
1045 break;
1046
1047 case DDI_RESUME:
1048 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1049 return (DDI_FAILURE);
1050
1051 mpt = TRAN2MPT(hba_tran);
1052
1053 if (!mpt) {
1054 return (DDI_FAILURE);
1055 }
1056
1057 /*
1058 * Reset hardware and softc to "no outstanding commands"
1059 * Note that a check condition can result on first command
1060 * to a target.
1061 */
1062 mutex_enter(&mpt->m_mutex);
1063
1064 /*
1065 * raise power.
1066 */
1067 if (mpt->m_options & MPTSAS_OPT_PM) {
1068 mutex_exit(&mpt->m_mutex);
1069 (void) pm_busy_component(dip, 0);
1070 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1071 if (rval == DDI_SUCCESS) {
1072 mutex_enter(&mpt->m_mutex);
1073 } else {
1074 /*
1075 * The pm_raise_power() call above failed,
1076 * and that can only occur if we were unable
1077 * to reset the hardware. This is probably
1078 * due to unhealty hardware, and because
1079 * important filesystems(such as the root
1080 * filesystem) could be on the attached disks,
1081 * it would not be a good idea to continue,
1082 * as we won't be entirely certain we are
1083 * writing correct data. So we panic() here
1084 * to not only prevent possible data corruption,
1085 * but to give developers or end users a hope
1086 * of identifying and correcting any problems.
1087 */
1088 fm_panic("mptsas could not reset hardware "
1089 "during resume");
1090 }
1091 }
1092
1093 mpt->m_suspended = 0;
1094
1095 /*
1096 * Reinitialize ioc
1097 */
1098 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1099 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1100 mutex_exit(&mpt->m_mutex);
1101 if (mpt->m_options & MPTSAS_OPT_PM) {
1102 (void) pm_idle_component(dip, 0);
1103 }
1104 fm_panic("mptsas init chip fail during resume");
1105 }
1106 /*
1107 * mptsas_update_driver_data needs interrupts so enable them
1108 * first.
1109 */
1110 MPTSAS_ENABLE_INTR(mpt);
1111 mptsas_update_driver_data(mpt);
1112
1113 /* start requests, if possible */
1114 mptsas_restart_hba(mpt);
1115
1116 mutex_exit(&mpt->m_mutex);
1117
1118 /*
1119 * Restart watch thread
1120 */
1121 mutex_enter(&mptsas_global_mutex);
1122 if (mptsas_timeout_id == 0) {
1123 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1124 mptsas_tick);
1125 mptsas_timeouts_enabled = 1;
1126 }
1127 mutex_exit(&mptsas_global_mutex);
1128
1129 /* report idle status to pm framework */
1130 if (mpt->m_options & MPTSAS_OPT_PM) {
1131 (void) pm_idle_component(dip, 0);
1132 }
1133
1134 return (DDI_SUCCESS);
1135
1136 default:
1137 return (DDI_FAILURE);
1138
1139 }
1140
1141 instance = ddi_get_instance(dip);
1142
1143 /*
1144 * Allocate softc information.
1145 */
1146 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1147 mptsas_log(NULL, CE_WARN,
1148 "mptsas%d: cannot allocate soft state", instance);
1149 goto fail;
1150 }
1151
1152 mpt = ddi_get_soft_state(mptsas_state, instance);
1153
1154 if (mpt == NULL) {
1155 mptsas_log(NULL, CE_WARN,
1156 "mptsas%d: cannot get soft state", instance);
1157 goto fail;
1158 }
1159
1160 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1161 scsi_size_clean(dip);
1162
1163 mpt->m_dip = dip;
1164 mpt->m_instance = instance;
1165
1166 /* Make a per-instance copy of the structures */
1167 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1168 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1169 mpt->m_reg_acc_attr = mptsas_dev_attr;
1170 mpt->m_dev_acc_attr = mptsas_dev_attr;
1171
1172 /*
1173 * Initialize FMA
1174 */
1175 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1176 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1177 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1178 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1179
1180 mptsas_fm_init(mpt);
1181
1182 if (mptsas_alloc_handshake_msg(mpt,
1183 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1184 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1185 goto fail;
1186 }
1187
1188 /*
1189 * Setup configuration space
1190 */
1191 if (mptsas_config_space_init(mpt) == FALSE) {
1192 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1193 goto fail;
1194 }
1195 config_setup++;
1196
1197 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1198 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1199 mptsas_log(mpt, CE_WARN, "map setup failed");
1200 goto fail;
1201 }
1202 map_setup++;
1203
1204 /*
1205 * A taskq is created for dealing with the event handler
1206 */
1207 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1208 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1209 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1210 goto fail;
1211 }
1212 event_taskq_create++;
1213
1214 /*
1215 * A taskq is created for dealing with dr events
1216 */
1217 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1218 "mptsas_dr_taskq",
1219 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1220 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1221 "failed");
1222 goto fail;
1223 }
1224 dr_taskq_create++;
1225
1226 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1227 0, "mptsas_doneq_thread_threshold_prop", 10);
1228 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1229 0, "mptsas_doneq_length_threshold_prop", 8);
1230 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1231 0, "mptsas_doneq_thread_n_prop", 8);
1232
1233 if (mpt->m_doneq_thread_n) {
1234 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1235 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1236
1237 mutex_enter(&mpt->m_doneq_mutex);
1238 mpt->m_doneq_thread_id =
1239 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1240 * mpt->m_doneq_thread_n, KM_SLEEP);
1241
1242 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1243 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1244 CV_DRIVER, NULL);
1245 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1246 MUTEX_DRIVER, NULL);
1247 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1248 mpt->m_doneq_thread_id[j].flag |=
1249 MPTSAS_DONEQ_THREAD_ACTIVE;
1250 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1251 mpt->m_doneq_thread_id[j].arg.t = j;
1252 mpt->m_doneq_thread_id[j].threadp =
1253 thread_create(NULL, 0, mptsas_doneq_thread,
1254 &mpt->m_doneq_thread_id[j].arg,
1255 0, &p0, TS_RUN, minclsyspri);
1256 mpt->m_doneq_thread_id[j].donetail =
1257 &mpt->m_doneq_thread_id[j].doneq;
1258 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1259 }
1260 mutex_exit(&mpt->m_doneq_mutex);
1261 doneq_thread_create++;
1262 }
1263
1264 /* Initialize mutex used in interrupt handler */
1265 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1266 DDI_INTR_PRI(mpt->m_intr_pri));
1267 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1268 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1269 DDI_INTR_PRI(mpt->m_intr_pri));
1270 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1271 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1272 NULL, MUTEX_DRIVER,
1273 DDI_INTR_PRI(mpt->m_intr_pri));
1274 }
1275
1276 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1277 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1278 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1279 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1280 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1281 mutex_init_done++;
1282
1283 /*
1284 * Disable hardware interrupt since we're not ready to
1285 * handle it yet.
1286 */
1287 MPTSAS_DISABLE_INTR(mpt);
1288 if (mptsas_register_intrs(mpt) == FALSE)
1289 goto fail;
1290 intr_added++;
1291
1292 mutex_enter(&mpt->m_mutex);
1293 /*
1294 * Initialize power management component
1295 */
1296 if (mpt->m_options & MPTSAS_OPT_PM) {
1297 if (mptsas_init_pm(mpt)) {
1298 mutex_exit(&mpt->m_mutex);
1299 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1300 "failed");
1301 goto fail;
1302 }
1303 }
1304
1305 /*
1306 * Initialize chip using Message Unit Reset, if allowed
1307 */
1308 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1309 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1310 mutex_exit(&mpt->m_mutex);
1311 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1312 goto fail;
1313 }
1314
1315 /*
1316 * Fill in the phy_info structure and get the base WWID
1317 */
1318 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1319 mptsas_log(mpt, CE_WARN,
1320 "mptsas_get_manufacture_page5 failed!");
1321 goto fail;
1322 }
1323
1324 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1325 mptsas_log(mpt, CE_WARN,
1326 "mptsas_get_sas_io_unit_page_hndshk failed!");
1327 goto fail;
1328 }
1329
1330 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1331 mptsas_log(mpt, CE_WARN,
1332 "mptsas_get_manufacture_page0 failed!");
1333 goto fail;
1334 }
1335
1336 mutex_exit(&mpt->m_mutex);
1337
1338 /*
1339 * Register the iport for multiple port HBA
1340 */
1341 mptsas_iport_register(mpt);
1342
1343 /*
1344 * initialize SCSI HBA transport structure
1345 */
1346 if (mptsas_hba_setup(mpt) == FALSE)
1347 goto fail;
1348 hba_attach_setup++;
1349
1350 if (mptsas_smp_setup(mpt) == FALSE)
1351 goto fail;
1352 smp_attach_setup++;
1353
1354 if (mptsas_cache_create(mpt) == FALSE)
1355 goto fail;
1356
1357 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1358 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1359 if (mpt->m_scsi_reset_delay == 0) {
1360 mptsas_log(mpt, CE_NOTE,
1361 "scsi_reset_delay of 0 is not recommended,"
1362 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1363 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1364 }
1365
1366 /*
1367 * Initialize the wait and done FIFO queue
1368 */
1369 mpt->m_donetail = &mpt->m_doneq;
1370 mpt->m_waitqtail = &mpt->m_waitq;
1371 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1372 mpt->m_tx_draining = 0;
1373
1374 /*
1375 * ioc cmd queue initialize
1376 */
1377 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1378 mpt->m_dev_handle = 0xFFFF;
1379
1380 MPTSAS_ENABLE_INTR(mpt);
1381
1382 /*
1383 * enable event notification
1384 */
1385 mutex_enter(&mpt->m_mutex);
1386 if (mptsas_ioc_enable_event_notification(mpt)) {
1387 mutex_exit(&mpt->m_mutex);
1388 goto fail;
1389 }
1390 mutex_exit(&mpt->m_mutex);
1391
1392 /*
1393 * Initialize PHY info for smhba
1394 */
1395 if (mptsas_smhba_setup(mpt)) {
1396 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1397 "failed");
1398 goto fail;
1399 }
1400
1401 /* Check all dma handles allocated in attach */
1402 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1403 != DDI_SUCCESS) ||
1404 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1405 != DDI_SUCCESS) ||
1406 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1407 != DDI_SUCCESS) ||
1408 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1409 != DDI_SUCCESS) ||
1410 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1411 != DDI_SUCCESS)) {
1412 goto fail;
1413 }
1414
1415 /* Check all acc handles allocated in attach */
1416 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1417 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1418 != DDI_SUCCESS) ||
1419 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1420 != DDI_SUCCESS) ||
1421 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1422 != DDI_SUCCESS) ||
1423 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1424 != DDI_SUCCESS) ||
1425 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1426 != DDI_SUCCESS) ||
1427 (mptsas_check_acc_handle(mpt->m_config_handle)
1428 != DDI_SUCCESS)) {
1429 goto fail;
1430 }
1431
1432 /*
1433 * After this point, we are not going to fail the attach.
1434 */
1435 /*
1436 * used for mptsas_watch
1437 */
1438 mptsas_list_add(mpt);
1439
1440 mutex_enter(&mptsas_global_mutex);
1441 if (mptsas_timeouts_enabled == 0) {
1442 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1443 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1444
1445 mptsas_tick = mptsas_scsi_watchdog_tick *
1446 drv_usectohz((clock_t)1000000);
1447
1448 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1449 mptsas_timeouts_enabled = 1;
1450 }
1451 mutex_exit(&mptsas_global_mutex);
1452
1453 /* Print message of HBA present */
1454 ddi_report_dev(dip);
1455
1456 /* report idle status to pm framework */
1457 if (mpt->m_options & MPTSAS_OPT_PM) {
1458 (void) pm_idle_component(dip, 0);
1459 }
1460
1461 return (DDI_SUCCESS);
1462
1463 fail:
1464 mptsas_log(mpt, CE_WARN, "attach failed");
1465 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1466 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1467 if (mpt) {
1468 mutex_enter(&mptsas_global_mutex);
1469
1470 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1471 timeout_id_t tid = mptsas_timeout_id;
1472 mptsas_timeouts_enabled = 0;
1473 mptsas_timeout_id = 0;
1474 mutex_exit(&mptsas_global_mutex);
1475 (void) untimeout(tid);
1476 mutex_enter(&mptsas_global_mutex);
1477 }
1478 mutex_exit(&mptsas_global_mutex);
1479 /* deallocate in reverse order */
1480 mptsas_cache_destroy(mpt);
1481
1482 if (smp_attach_setup) {
1483 mptsas_smp_teardown(mpt);
1484 }
1485 if (hba_attach_setup) {
1486 mptsas_hba_teardown(mpt);
1487 }
1488
1489 if (mpt->m_targets)
1490 refhash_destroy(mpt->m_targets);
1491 if (mpt->m_smp_targets)
1492 refhash_destroy(mpt->m_smp_targets);
1493
1494 if (mpt->m_active) {
1495 mptsas_free_active_slots(mpt);
1496 }
1497 if (intr_added) {
1498 mptsas_unregister_intrs(mpt);
1499 }
1500
1501 if (doneq_thread_create) {
1502 mutex_enter(&mpt->m_doneq_mutex);
1503 doneq_thread_num = mpt->m_doneq_thread_n;
1504 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1505 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1506 mpt->m_doneq_thread_id[j].flag &=
1507 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1508 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1509 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1510 }
1511 while (mpt->m_doneq_thread_n) {
1512 cv_wait(&mpt->m_doneq_thread_cv,
1513 &mpt->m_doneq_mutex);
1514 }
1515 for (j = 0; j < doneq_thread_num; j++) {
1516 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1517 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1518 }
1519 kmem_free(mpt->m_doneq_thread_id,
1520 sizeof (mptsas_doneq_thread_list_t)
1521 * doneq_thread_num);
1522 mutex_exit(&mpt->m_doneq_mutex);
1523 cv_destroy(&mpt->m_doneq_thread_cv);
1524 mutex_destroy(&mpt->m_doneq_mutex);
1525 }
1526 if (event_taskq_create) {
1527 ddi_taskq_destroy(mpt->m_event_taskq);
1528 }
1529 if (dr_taskq_create) {
1530 ddi_taskq_destroy(mpt->m_dr_taskq);
1531 }
1532 if (mutex_init_done) {
1533 mutex_destroy(&mpt->m_tx_waitq_mutex);
1534 mutex_destroy(&mpt->m_passthru_mutex);
1535 mutex_destroy(&mpt->m_mutex);
1536 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1537 mutex_destroy(
1538 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1539 }
1540 cv_destroy(&mpt->m_cv);
1541 cv_destroy(&mpt->m_passthru_cv);
1542 cv_destroy(&mpt->m_fw_cv);
1543 cv_destroy(&mpt->m_config_cv);
1544 cv_destroy(&mpt->m_fw_diag_cv);
1545 }
1546
1547 if (map_setup) {
1548 mptsas_cfg_fini(mpt);
1549 }
1550 if (config_setup) {
1551 mptsas_config_space_fini(mpt);
1552 }
1553 mptsas_free_handshake_msg(mpt);
1554 mptsas_hba_fini(mpt);
1555
1556 mptsas_fm_fini(mpt);
1557 ddi_soft_state_free(mptsas_state, instance);
1558 ddi_prop_remove_all(dip);
1559 }
1560 return (DDI_FAILURE);
1561 }
1562
1563 static int
1564 mptsas_suspend(dev_info_t *devi)
1565 {
1566 mptsas_t *mpt, *g;
1567 scsi_hba_tran_t *tran;
1568
1569 if (scsi_hba_iport_unit_address(devi)) {
1570 return (DDI_SUCCESS);
1571 }
1572
1573 if ((tran = ddi_get_driver_private(devi)) == NULL)
1574 return (DDI_SUCCESS);
1575
1576 mpt = TRAN2MPT(tran);
1577 if (!mpt) {
1578 return (DDI_SUCCESS);
1579 }
1580
1581 mutex_enter(&mpt->m_mutex);
1582
1583 if (mpt->m_suspended++) {
1584 mutex_exit(&mpt->m_mutex);
1585 return (DDI_SUCCESS);
1586 }
1587
1588 /*
1589 * Cancel timeout threads for this mpt
1590 */
1591 if (mpt->m_quiesce_timeid) {
1592 timeout_id_t tid = mpt->m_quiesce_timeid;
1593 mpt->m_quiesce_timeid = 0;
1594 mutex_exit(&mpt->m_mutex);
1595 (void) untimeout(tid);
1596 mutex_enter(&mpt->m_mutex);
1597 }
1598
1599 if (mpt->m_restart_cmd_timeid) {
1600 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1601 mpt->m_restart_cmd_timeid = 0;
1602 mutex_exit(&mpt->m_mutex);
1603 (void) untimeout(tid);
1604 mutex_enter(&mpt->m_mutex);
1605 }
1606
1607 mutex_exit(&mpt->m_mutex);
1608
1609 (void) pm_idle_component(mpt->m_dip, 0);
1610
1611 /*
1612 * Cancel watch threads if all mpts suspended
1613 */
1614 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1615 for (g = mptsas_head; g != NULL; g = g->m_next) {
1616 if (!g->m_suspended)
1617 break;
1618 }
1619 rw_exit(&mptsas_global_rwlock);
1620
1621 mutex_enter(&mptsas_global_mutex);
1622 if (g == NULL) {
1623 timeout_id_t tid;
1624
1625 mptsas_timeouts_enabled = 0;
1626 if (mptsas_timeout_id) {
1627 tid = mptsas_timeout_id;
1628 mptsas_timeout_id = 0;
1629 mutex_exit(&mptsas_global_mutex);
1630 (void) untimeout(tid);
1631 mutex_enter(&mptsas_global_mutex);
1632 }
1633 if (mptsas_reset_watch) {
1634 tid = mptsas_reset_watch;
1635 mptsas_reset_watch = 0;
1636 mutex_exit(&mptsas_global_mutex);
1637 (void) untimeout(tid);
1638 mutex_enter(&mptsas_global_mutex);
1639 }
1640 }
1641 mutex_exit(&mptsas_global_mutex);
1642
1643 mutex_enter(&mpt->m_mutex);
1644
1645 /*
1646 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1647 */
1648 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1649 (mpt->m_power_level != PM_LEVEL_D0)) {
1650 mutex_exit(&mpt->m_mutex);
1651 return (DDI_SUCCESS);
1652 }
1653
1654 /* Disable HBA interrupts in hardware */
1655 MPTSAS_DISABLE_INTR(mpt);
1656 /*
1657 * Send RAID action system shutdown to sync IR
1658 */
1659 mptsas_raid_action_system_shutdown(mpt);
1660
1661 mutex_exit(&mpt->m_mutex);
1662
1663 /* drain the taskq */
1664 ddi_taskq_wait(mpt->m_event_taskq);
1665 ddi_taskq_wait(mpt->m_dr_taskq);
1666
1667 return (DDI_SUCCESS);
1668 }
1669
1670 #ifdef __sparc
1671 /*ARGSUSED*/
1672 static int
1673 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1674 {
1675 mptsas_t *mpt;
1676 scsi_hba_tran_t *tran;
1677
1678 /*
1679 * If this call is for iport, just return.
1680 */
1681 if (scsi_hba_iport_unit_address(devi))
1682 return (DDI_SUCCESS);
1683
1684 if ((tran = ddi_get_driver_private(devi)) == NULL)
1685 return (DDI_SUCCESS);
1686
1687 if ((mpt = TRAN2MPT(tran)) == NULL)
1688 return (DDI_SUCCESS);
1689
1690 /*
1691 * Send RAID action system shutdown to sync IR. Disable HBA
1692 * interrupts in hardware first.
1693 */
1694 MPTSAS_DISABLE_INTR(mpt);
1695 mptsas_raid_action_system_shutdown(mpt);
1696
1697 return (DDI_SUCCESS);
1698 }
1699 #else /* __sparc */
1700 /*
1701 * quiesce(9E) entry point.
1702 *
1703 * This function is called when the system is single-threaded at high
1704 * PIL with preemption disabled. Therefore, this function must not be
1705 * blocked.
1706 *
1707 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1708 * DDI_FAILURE indicates an error condition and should almost never happen.
1709 */
1710 static int
1711 mptsas_quiesce(dev_info_t *devi)
1712 {
1713 mptsas_t *mpt;
1714 scsi_hba_tran_t *tran;
1715
1716 /*
1717 * If this call is for iport, just return.
1718 */
1719 if (scsi_hba_iport_unit_address(devi))
1720 return (DDI_SUCCESS);
1721
1722 if ((tran = ddi_get_driver_private(devi)) == NULL)
1723 return (DDI_SUCCESS);
1724
1725 if ((mpt = TRAN2MPT(tran)) == NULL)
1726 return (DDI_SUCCESS);
1727
1728 /* Disable HBA interrupts in hardware */
1729 MPTSAS_DISABLE_INTR(mpt);
1730 /* Send RAID action system shutdonw to sync IR */
1731 mptsas_raid_action_system_shutdown(mpt);
1732
1733 return (DDI_SUCCESS);
1734 }
1735 #endif /* __sparc */
1736
1737 /*
1738 * detach(9E). Remove all device allocations and system resources;
1739 * disable device interrupts.
1740 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1741 */
1742 static int
1743 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1744 {
1745 /* CONSTCOND */
1746 ASSERT(NO_COMPETING_THREADS);
1747 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1748
1749 switch (cmd) {
1750 case DDI_DETACH:
1751 return (mptsas_do_detach(devi));
1752
1753 case DDI_SUSPEND:
1754 return (mptsas_suspend(devi));
1755
1756 default:
1757 return (DDI_FAILURE);
1758 }
1759 /* NOTREACHED */
1760 }
1761
1762 static int
1763 mptsas_do_detach(dev_info_t *dip)
1764 {
1765 mptsas_t *mpt;
1766 scsi_hba_tran_t *tran;
1767 int circ = 0;
1768 int circ1 = 0;
1769 mdi_pathinfo_t *pip = NULL;
1770 int i;
1771 int doneq_thread_num = 0;
1772
1773 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1774
1775 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1776 return (DDI_FAILURE);
1777
1778 mpt = TRAN2MPT(tran);
1779 if (!mpt) {
1780 return (DDI_FAILURE);
1781 }
1782 /*
1783 * Still have pathinfo child, should not detach mpt driver
1784 */
1785 if (scsi_hba_iport_unit_address(dip)) {
1786 if (mpt->m_mpxio_enable) {
1787 /*
1788 * MPxIO enabled for the iport
1789 */
1790 ndi_devi_enter(scsi_vhci_dip, &circ1);
1791 ndi_devi_enter(dip, &circ);
1792 while (pip = mdi_get_next_client_path(dip, NULL)) {
1793 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1794 continue;
1795 }
1796 ndi_devi_exit(dip, circ);
1797 ndi_devi_exit(scsi_vhci_dip, circ1);
1798 NDBG12(("detach failed because of "
1799 "outstanding path info"));
1800 return (DDI_FAILURE);
1801 }
1802 ndi_devi_exit(dip, circ);
1803 ndi_devi_exit(scsi_vhci_dip, circ1);
1804 (void) mdi_phci_unregister(dip, 0);
1805 }
1806
1807 ddi_prop_remove_all(dip);
1808
1809 return (DDI_SUCCESS);
1810 }
1811
1812 /* Make sure power level is D0 before accessing registers */
1813 if (mpt->m_options & MPTSAS_OPT_PM) {
1814 (void) pm_busy_component(dip, 0);
1815 if (mpt->m_power_level != PM_LEVEL_D0) {
1816 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1817 DDI_SUCCESS) {
1818 mptsas_log(mpt, CE_WARN,
1819 "mptsas%d: Raise power request failed.",
1820 mpt->m_instance);
1821 (void) pm_idle_component(dip, 0);
1822 return (DDI_FAILURE);
1823 }
1824 }
1825 }
1826
1827 /*
1828 * Send RAID action system shutdown to sync IR. After action, send a
1829 * Message Unit Reset. Since after that DMA resource will be freed,
1830 * set ioc to READY state will avoid HBA initiated DMA operation.
1831 */
1832 mutex_enter(&mpt->m_mutex);
1833 MPTSAS_DISABLE_INTR(mpt);
1834 mptsas_raid_action_system_shutdown(mpt);
1835 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1836 (void) mptsas_ioc_reset(mpt, FALSE);
1837 mutex_exit(&mpt->m_mutex);
1838 mptsas_rem_intrs(mpt);
1839 ddi_taskq_destroy(mpt->m_event_taskq);
1840 ddi_taskq_destroy(mpt->m_dr_taskq);
1841
1842 if (mpt->m_doneq_thread_n) {
1843 mutex_enter(&mpt->m_doneq_mutex);
1844 doneq_thread_num = mpt->m_doneq_thread_n;
1845 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1846 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1847 mpt->m_doneq_thread_id[i].flag &=
1848 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1849 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1850 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1851 }
1852 while (mpt->m_doneq_thread_n) {
1853 cv_wait(&mpt->m_doneq_thread_cv,
1854 &mpt->m_doneq_mutex);
1855 }
1856 for (i = 0; i < doneq_thread_num; i++) {
1857 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1858 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1859 }
1860 kmem_free(mpt->m_doneq_thread_id,
1861 sizeof (mptsas_doneq_thread_list_t)
1862 * doneq_thread_num);
1863 mutex_exit(&mpt->m_doneq_mutex);
1864 cv_destroy(&mpt->m_doneq_thread_cv);
1865 mutex_destroy(&mpt->m_doneq_mutex);
1866 }
1867
1868 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1869
1870 mptsas_list_del(mpt);
1871
1872 /*
1873 * Cancel timeout threads for this mpt
1874 */
1875 mutex_enter(&mpt->m_mutex);
1876 if (mpt->m_quiesce_timeid) {
1877 timeout_id_t tid = mpt->m_quiesce_timeid;
1878 mpt->m_quiesce_timeid = 0;
1879 mutex_exit(&mpt->m_mutex);
1880 (void) untimeout(tid);
1881 mutex_enter(&mpt->m_mutex);
1882 }
1883
1884 if (mpt->m_restart_cmd_timeid) {
1885 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1886 mpt->m_restart_cmd_timeid = 0;
1887 mutex_exit(&mpt->m_mutex);
1888 (void) untimeout(tid);
1889 mutex_enter(&mpt->m_mutex);
1890 }
1891
1892 mutex_exit(&mpt->m_mutex);
1893
1894 /*
1895 * last mpt? ... if active, CANCEL watch threads.
1896 */
1897 mutex_enter(&mptsas_global_mutex);
1898 if (mptsas_head == NULL) {
1899 timeout_id_t tid;
1900 /*
1901 * Clear mptsas_timeouts_enable so that the watch thread
1902 * gets restarted on DDI_ATTACH
1903 */
1904 mptsas_timeouts_enabled = 0;
1905 if (mptsas_timeout_id) {
1906 tid = mptsas_timeout_id;
1907 mptsas_timeout_id = 0;
1908 mutex_exit(&mptsas_global_mutex);
1909 (void) untimeout(tid);
1910 mutex_enter(&mptsas_global_mutex);
1911 }
1912 if (mptsas_reset_watch) {
1913 tid = mptsas_reset_watch;
1914 mptsas_reset_watch = 0;
1915 mutex_exit(&mptsas_global_mutex);
1916 (void) untimeout(tid);
1917 mutex_enter(&mptsas_global_mutex);
1918 }
1919 }
1920 mutex_exit(&mptsas_global_mutex);
1921
1922 /*
1923 * Delete Phy stats
1924 */
1925 mptsas_destroy_phy_stats(mpt);
1926
1927 mptsas_destroy_hashes(mpt);
1928
1929 /*
1930 * Delete nt_active.
1931 */
1932 mutex_enter(&mpt->m_mutex);
1933 mptsas_free_active_slots(mpt);
1934 mutex_exit(&mpt->m_mutex);
1935
1936 /* deallocate everything that was allocated in mptsas_attach */
1937 mptsas_cache_destroy(mpt);
1938
1939 mptsas_hba_fini(mpt);
1940 mptsas_cfg_fini(mpt);
1941
1942 /* Lower the power informing PM Framework */
1943 if (mpt->m_options & MPTSAS_OPT_PM) {
1944 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1945 mptsas_log(mpt, CE_WARN,
1946 "!mptsas%d: Lower power request failed "
1947 "during detach, ignoring.",
1948 mpt->m_instance);
1949 }
1950
1951 mutex_destroy(&mpt->m_tx_waitq_mutex);
1952 mutex_destroy(&mpt->m_passthru_mutex);
1953 mutex_destroy(&mpt->m_mutex);
1954 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1955 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1956 }
1957 cv_destroy(&mpt->m_cv);
1958 cv_destroy(&mpt->m_passthru_cv);
1959 cv_destroy(&mpt->m_fw_cv);
1960 cv_destroy(&mpt->m_config_cv);
1961 cv_destroy(&mpt->m_fw_diag_cv);
1962
1963
1964 mptsas_smp_teardown(mpt);
1965 mptsas_hba_teardown(mpt);
1966
1967 mptsas_config_space_fini(mpt);
1968
1969 mptsas_free_handshake_msg(mpt);
1970
1971 mptsas_fm_fini(mpt);
1972 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1973 ddi_prop_remove_all(dip);
1974
1975 return (DDI_SUCCESS);
1976 }
1977
1978 static void
1979 mptsas_list_add(mptsas_t *mpt)
1980 {
1981 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1982
1983 if (mptsas_head == NULL) {
1984 mptsas_head = mpt;
1985 } else {
1986 mptsas_tail->m_next = mpt;
1987 }
1988 mptsas_tail = mpt;
1989 rw_exit(&mptsas_global_rwlock);
1990 }
1991
1992 static void
1993 mptsas_list_del(mptsas_t *mpt)
1994 {
1995 mptsas_t *m;
1996 /*
1997 * Remove device instance from the global linked list
1998 */
1999 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2000 if (mptsas_head == mpt) {
2001 m = mptsas_head = mpt->m_next;
2002 } else {
2003 for (m = mptsas_head; m != NULL; m = m->m_next) {
2004 if (m->m_next == mpt) {
2005 m->m_next = mpt->m_next;
2006 break;
2007 }
2008 }
2009 if (m == NULL) {
2010 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2011 }
2012 }
2013
2014 if (mptsas_tail == mpt) {
2015 mptsas_tail = m;
2016 }
2017 rw_exit(&mptsas_global_rwlock);
2018 }
2019
2020 static int
2021 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2022 {
2023 ddi_dma_attr_t task_dma_attrs;
2024
2025 mpt->m_hshk_dma_size = 0;
2026 task_dma_attrs = mpt->m_msg_dma_attr;
2027 task_dma_attrs.dma_attr_sgllen = 1;
2028 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2029
2030 /* allocate Task Management ddi_dma resources */
2031 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2032 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2033 alloc_size, NULL) == FALSE) {
2034 return (DDI_FAILURE);
2035 }
2036 mpt->m_hshk_dma_size = alloc_size;
2037
2038 return (DDI_SUCCESS);
2039 }
2040
2041 static void
2042 mptsas_free_handshake_msg(mptsas_t *mpt)
2043 {
2044 if (mpt->m_hshk_dma_size == 0)
2045 return;
2046 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2047 mpt->m_hshk_dma_size = 0;
2048 }
2049
2050 static int
2051 mptsas_hba_setup(mptsas_t *mpt)
2052 {
2053 scsi_hba_tran_t *hba_tran;
2054 int tran_flags;
2055
2056 /* Allocate a transport structure */
2057 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2058 SCSI_HBA_CANSLEEP);
2059 ASSERT(mpt->m_tran != NULL);
2060
2061 hba_tran->tran_hba_private = mpt;
2062 hba_tran->tran_tgt_private = NULL;
2063
2064 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2065 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2066
2067 hba_tran->tran_start = mptsas_scsi_start;
2068 hba_tran->tran_reset = mptsas_scsi_reset;
2069 hba_tran->tran_abort = mptsas_scsi_abort;
2070 hba_tran->tran_getcap = mptsas_scsi_getcap;
2071 hba_tran->tran_setcap = mptsas_scsi_setcap;
2072 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2073 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2074
2075 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2076 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2077 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2078
2079 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2080 hba_tran->tran_get_name = mptsas_get_name;
2081
2082 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2083 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2084 hba_tran->tran_bus_reset = NULL;
2085
2086 hba_tran->tran_add_eventcall = NULL;
2087 hba_tran->tran_get_eventcookie = NULL;
2088 hba_tran->tran_post_event = NULL;
2089 hba_tran->tran_remove_eventcall = NULL;
2090
2091 hba_tran->tran_bus_config = mptsas_bus_config;
2092
2093 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2094
2095 /*
2096 * All children of the HBA are iports. We need tran was cloned.
2097 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2098 * inherited to iport's tran vector.
2099 */
2100 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2101
2102 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2103 hba_tran, tran_flags) != DDI_SUCCESS) {
2104 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2105 scsi_hba_tran_free(hba_tran);
2106 mpt->m_tran = NULL;
2107 return (FALSE);
2108 }
2109 return (TRUE);
2110 }
2111
2112 static void
2113 mptsas_hba_teardown(mptsas_t *mpt)
2114 {
2115 (void) scsi_hba_detach(mpt->m_dip);
2116 if (mpt->m_tran != NULL) {
2117 scsi_hba_tran_free(mpt->m_tran);
2118 mpt->m_tran = NULL;
2119 }
2120 }
2121
2122 static void
2123 mptsas_iport_register(mptsas_t *mpt)
2124 {
2125 int i, j;
2126 mptsas_phymask_t mask = 0x0;
2127 /*
2128 * initial value of mask is 0
2129 */
2130 mutex_enter(&mpt->m_mutex);
2131 for (i = 0; i < mpt->m_num_phys; i++) {
2132 mptsas_phymask_t phy_mask = 0x0;
2133 char phy_mask_name[MPTSAS_MAX_PHYS];
2134 uint8_t current_port;
2135
2136 if (mpt->m_phy_info[i].attached_devhdl == 0)
2137 continue;
2138
2139 bzero(phy_mask_name, sizeof (phy_mask_name));
2140
2141 current_port = mpt->m_phy_info[i].port_num;
2142
2143 if ((mask & (1 << i)) != 0)
2144 continue;
2145
2146 for (j = 0; j < mpt->m_num_phys; j++) {
2147 if (mpt->m_phy_info[j].attached_devhdl &&
2148 (mpt->m_phy_info[j].port_num == current_port)) {
2149 phy_mask |= (1 << j);
2150 }
2151 }
2152 mask = mask | phy_mask;
2153
2154 for (j = 0; j < mpt->m_num_phys; j++) {
2155 if ((phy_mask >> j) & 0x01) {
2156 mpt->m_phy_info[j].phy_mask = phy_mask;
2157 }
2158 }
2159
2160 (void) sprintf(phy_mask_name, "%x", phy_mask);
2161
2162 mutex_exit(&mpt->m_mutex);
2163 /*
2164 * register a iport
2165 */
2166 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2167 mutex_enter(&mpt->m_mutex);
2168 }
2169 mutex_exit(&mpt->m_mutex);
2170 /*
2171 * register a virtual port for RAID volume always
2172 */
2173 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2174
2175 }
2176
2177 static int
2178 mptsas_smp_setup(mptsas_t *mpt)
2179 {
2180 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2181 ASSERT(mpt->m_smptran != NULL);
2182 mpt->m_smptran->smp_tran_hba_private = mpt;
2183 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2184 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2185 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2186 smp_hba_tran_free(mpt->m_smptran);
2187 mpt->m_smptran = NULL;
2188 return (FALSE);
2189 }
2190 /*
2191 * Initialize smp hash table
2192 */
2193 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2194 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2195 mptsas_smp_free, sizeof (mptsas_smp_t),
2196 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2197 KM_SLEEP);
2198 mpt->m_smp_devhdl = 0xFFFF;
2199
2200 return (TRUE);
2201 }
2202
2203 static void
2204 mptsas_smp_teardown(mptsas_t *mpt)
2205 {
2206 (void) smp_hba_detach(mpt->m_dip);
2207 if (mpt->m_smptran != NULL) {
2208 smp_hba_tran_free(mpt->m_smptran);
2209 mpt->m_smptran = NULL;
2210 }
2211 mpt->m_smp_devhdl = 0;
2212 }
2213
2214 static int
2215 mptsas_cache_create(mptsas_t *mpt)
2216 {
2217 int instance = mpt->m_instance;
2218 char buf[64];
2219
2220 /*
2221 * create kmem cache for packets
2222 */
2223 (void) sprintf(buf, "mptsas%d_cache", instance);
2224 mpt->m_kmem_cache = kmem_cache_create(buf,
2225 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2226 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2227 NULL, (void *)mpt, NULL, 0);
2228
2229 if (mpt->m_kmem_cache == NULL) {
2230 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2231 return (FALSE);
2232 }
2233
2234 /*
2235 * create kmem cache for extra SGL frames if SGL cannot
2236 * be accomodated into main request frame.
2237 */
2238 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2239 mpt->m_cache_frames = kmem_cache_create(buf,
2240 sizeof (mptsas_cache_frames_t), 8,
2241 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2242 NULL, (void *)mpt, NULL, 0);
2243
2244 if (mpt->m_cache_frames == NULL) {
2245 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2246 return (FALSE);
2247 }
2248
2249 return (TRUE);
2250 }
2251
2252 static void
2253 mptsas_cache_destroy(mptsas_t *mpt)
2254 {
2255 /* deallocate in reverse order */
2256 if (mpt->m_cache_frames) {
2257 kmem_cache_destroy(mpt->m_cache_frames);
2258 mpt->m_cache_frames = NULL;
2259 }
2260 if (mpt->m_kmem_cache) {
2261 kmem_cache_destroy(mpt->m_kmem_cache);
2262 mpt->m_kmem_cache = NULL;
2263 }
2264 }
2265
2266 static int
2267 mptsas_power(dev_info_t *dip, int component, int level)
2268 {
2269 #ifndef __lock_lint
2270 _NOTE(ARGUNUSED(component))
2271 #endif
2272 mptsas_t *mpt;
2273 int rval = DDI_SUCCESS;
2274 int polls = 0;
2275 uint32_t ioc_status;
2276
2277 if (scsi_hba_iport_unit_address(dip) != 0)
2278 return (DDI_SUCCESS);
2279
2280 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2281 if (mpt == NULL) {
2282 return (DDI_FAILURE);
2283 }
2284
2285 mutex_enter(&mpt->m_mutex);
2286
2287 /*
2288 * If the device is busy, don't lower its power level
2289 */
2290 if (mpt->m_busy && (mpt->m_power_level > level)) {
2291 mutex_exit(&mpt->m_mutex);
2292 return (DDI_FAILURE);
2293 }
2294 switch (level) {
2295 case PM_LEVEL_D0:
2296 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2297 MPTSAS_POWER_ON(mpt);
2298 /*
2299 * Wait up to 30 seconds for IOC to come out of reset.
2300 */
2301 while (((ioc_status = ddi_get32(mpt->m_datap,
2302 &mpt->m_reg->Doorbell)) &
2303 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2304 if (polls++ > 3000) {
2305 break;
2306 }
2307 delay(drv_usectohz(10000));
2308 }
2309 /*
2310 * If IOC is not in operational state, try to hard reset it.
2311 */
2312 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2313 MPI2_IOC_STATE_OPERATIONAL) {
2314 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2315 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2316 mptsas_log(mpt, CE_WARN,
2317 "mptsas_power: hard reset failed");
2318 mutex_exit(&mpt->m_mutex);
2319 return (DDI_FAILURE);
2320 }
2321 }
2322 mpt->m_power_level = PM_LEVEL_D0;
2323 break;
2324 case PM_LEVEL_D3:
2325 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2326 MPTSAS_POWER_OFF(mpt);
2327 break;
2328 default:
2329 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2330 mpt->m_instance, level);
2331 rval = DDI_FAILURE;
2332 break;
2333 }
2334 mutex_exit(&mpt->m_mutex);
2335 return (rval);
2336 }
2337
2338 /*
2339 * Initialize configuration space and figure out which
2340 * chip and revison of the chip the mpt driver is using.
2341 */
2342 static int
2343 mptsas_config_space_init(mptsas_t *mpt)
2344 {
2345 NDBG0(("mptsas_config_space_init"));
2346
2347 if (mpt->m_config_handle != NULL)
2348 return (TRUE);
2349
2350 if (pci_config_setup(mpt->m_dip,
2351 &mpt->m_config_handle) != DDI_SUCCESS) {
2352 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2353 return (FALSE);
2354 }
2355
2356 /*
2357 * This is a workaround for a XMITS ASIC bug which does not
2358 * drive the CBE upper bits.
2359 */
2360 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2361 PCI_STAT_PERROR) {
2362 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2363 PCI_STAT_PERROR);
2364 }
2365
2366 mptsas_setup_cmd_reg(mpt);
2367
2368 /*
2369 * Get the chip device id:
2370 */
2371 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2372
2373 /*
2374 * Save the revision.
2375 */
2376 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2377
2378 /*
2379 * Save the SubSystem Vendor and Device IDs
2380 */
2381 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2382 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2383
2384 /*
2385 * Set the latency timer to 0x40 as specified by the upa -> pci
2386 * bridge chip design team. This may be done by the sparc pci
2387 * bus nexus driver, but the driver should make sure the latency
2388 * timer is correct for performance reasons.
2389 */
2390 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2391 MPTSAS_LATENCY_TIMER);
2392
2393 (void) mptsas_get_pci_cap(mpt);
2394 return (TRUE);
2395 }
2396
2397 static void
2398 mptsas_config_space_fini(mptsas_t *mpt)
2399 {
2400 if (mpt->m_config_handle != NULL) {
2401 mptsas_disable_bus_master(mpt);
2402 pci_config_teardown(&mpt->m_config_handle);
2403 mpt->m_config_handle = NULL;
2404 }
2405 }
2406
2407 static void
2408 mptsas_setup_cmd_reg(mptsas_t *mpt)
2409 {
2410 ushort_t cmdreg;
2411
2412 /*
2413 * Set the command register to the needed values.
2414 */
2415 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2416 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2417 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2418 cmdreg &= ~PCI_COMM_IO;
2419 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2420 }
2421
2422 static void
2423 mptsas_disable_bus_master(mptsas_t *mpt)
2424 {
2425 ushort_t cmdreg;
2426
2427 /*
2428 * Clear the master enable bit in the PCI command register.
2429 * This prevents any bus mastering activity like DMA.
2430 */
2431 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2432 cmdreg &= ~PCI_COMM_ME;
2433 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2434 }
2435
2436 int
2437 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2438 {
2439 ddi_dma_attr_t attrs;
2440
2441 attrs = mpt->m_io_dma_attr;
2442 attrs.dma_attr_sgllen = 1;
2443
2444 ASSERT(dma_statep != NULL);
2445
2446 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2447 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2448 &dma_statep->cookie) == FALSE) {
2449 return (DDI_FAILURE);
2450 }
2451
2452 return (DDI_SUCCESS);
2453 }
2454
2455 void
2456 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2457 {
2458 ASSERT(dma_statep != NULL);
2459 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2460 dma_statep->size = 0;
2461 }
2462
2463 int
2464 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2465 {
2466 ddi_dma_attr_t attrs;
2467 ddi_dma_handle_t dma_handle;
2468 caddr_t memp;
2469 ddi_acc_handle_t accessp;
2470 int rval;
2471
2472 ASSERT(mutex_owned(&mpt->m_mutex));
2473
2474 attrs = mpt->m_msg_dma_attr;
2475 attrs.dma_attr_sgllen = 1;
2476 attrs.dma_attr_granular = size;
2477
2478 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2479 &accessp, &memp, size, NULL) == FALSE) {
2480 return (DDI_FAILURE);
2481 }
2482
2483 rval = (*callback) (mpt, memp, var, accessp);
2484
2485 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2486 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2487 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2488 rval = DDI_FAILURE;
2489 }
2490
2491 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2492 return (rval);
2493
2494 }
2495
2496 static int
2497 mptsas_alloc_request_frames(mptsas_t *mpt)
2498 {
2499 ddi_dma_attr_t frame_dma_attrs;
2500 caddr_t memp;
2501 ddi_dma_cookie_t cookie;
2502 size_t mem_size;
2503
2504 /*
2505 * re-alloc when it has already alloced
2506 */
2507 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2508 &mpt->m_acc_req_frame_hdl);
2509
2510 /*
2511 * The size of the request frame pool is:
2512 * Number of Request Frames * Request Frame Size
2513 */
2514 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2515
2516 /*
2517 * set the DMA attributes. System Request Message Frames must be
2518 * aligned on a 16-byte boundry.
2519 */
2520 frame_dma_attrs = mpt->m_msg_dma_attr;
2521 frame_dma_attrs.dma_attr_align = 16;
2522 frame_dma_attrs.dma_attr_sgllen = 1;
2523
2524 /*
2525 * allocate the request frame pool.
2526 */
2527 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2528 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2529 mem_size, &cookie) == FALSE) {
2530 return (DDI_FAILURE);
2531 }
2532
2533 /*
2534 * Store the request frame memory address. This chip uses this
2535 * address to dma to and from the driver's frame. The second
2536 * address is the address mpt uses to fill in the frame.
2537 */
2538 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2539 mpt->m_req_frame = memp;
2540
2541 /*
2542 * Clear the request frame pool.
2543 */
2544 bzero(mpt->m_req_frame, mem_size);
2545
2546 return (DDI_SUCCESS);
2547 }
2548
2549 static int
2550 mptsas_alloc_reply_frames(mptsas_t *mpt)
2551 {
2552 ddi_dma_attr_t frame_dma_attrs;
2553 caddr_t memp;
2554 ddi_dma_cookie_t cookie;
2555 size_t mem_size;
2556
2557 /*
2558 * re-alloc when it has already alloced
2559 */
2560 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2561 &mpt->m_acc_reply_frame_hdl);
2562
2563 /*
2564 * The size of the reply frame pool is:
2565 * Number of Reply Frames * Reply Frame Size
2566 */
2567 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2568
2569 /*
2570 * set the DMA attributes. System Reply Message Frames must be
2571 * aligned on a 4-byte boundry. This is the default.
2572 */
2573 frame_dma_attrs = mpt->m_msg_dma_attr;
2574 frame_dma_attrs.dma_attr_sgllen = 1;
2575
2576 /*
2577 * allocate the reply frame pool
2578 */
2579 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2580 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2581 mem_size, &cookie) == FALSE) {
2582 return (DDI_FAILURE);
2583 }
2584
2585 /*
2586 * Store the reply frame memory address. This chip uses this
2587 * address to dma to and from the driver's frame. The second
2588 * address is the address mpt uses to process the frame.
2589 */
2590 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2591 mpt->m_reply_frame = memp;
2592
2593 /*
2594 * Clear the reply frame pool.
2595 */
2596 bzero(mpt->m_reply_frame, mem_size);
2597
2598 return (DDI_SUCCESS);
2599 }
2600
2601 static int
2602 mptsas_alloc_free_queue(mptsas_t *mpt)
2603 {
2604 ddi_dma_attr_t frame_dma_attrs;
2605 caddr_t memp;
2606 ddi_dma_cookie_t cookie;
2607 size_t mem_size;
2608
2609 /*
2610 * re-alloc when it has already alloced
2611 */
2612 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2613 &mpt->m_acc_free_queue_hdl);
2614
2615 /*
2616 * The reply free queue size is:
2617 * Reply Free Queue Depth * 4
2618 * The "4" is the size of one 32 bit address (low part of 64-bit
2619 * address)
2620 */
2621 mem_size = mpt->m_free_queue_depth * 4;
2622
2623 /*
2624 * set the DMA attributes The Reply Free Queue must be aligned on a
2625 * 16-byte boundry.
2626 */
2627 frame_dma_attrs = mpt->m_msg_dma_attr;
2628 frame_dma_attrs.dma_attr_align = 16;
2629 frame_dma_attrs.dma_attr_sgllen = 1;
2630
2631 /*
2632 * allocate the reply free queue
2633 */
2634 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2635 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2636 mem_size, &cookie) == FALSE) {
2637 return (DDI_FAILURE);
2638 }
2639
2640 /*
2641 * Store the reply free queue memory address. This chip uses this
2642 * address to read from the reply free queue. The second address
2643 * is the address mpt uses to manage the queue.
2644 */
2645 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2646 mpt->m_free_queue = memp;
2647
2648 /*
2649 * Clear the reply free queue memory.
2650 */
2651 bzero(mpt->m_free_queue, mem_size);
2652
2653 return (DDI_SUCCESS);
2654 }
2655
2656 static int
2657 mptsas_alloc_post_queue(mptsas_t *mpt)
2658 {
2659 ddi_dma_attr_t frame_dma_attrs;
2660 caddr_t memp;
2661 ddi_dma_cookie_t cookie;
2662 size_t mem_size;
2663
2664 /*
2665 * re-alloc when it has already alloced
2666 */
2667 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2668 &mpt->m_acc_post_queue_hdl);
2669
2670 /*
2671 * The reply descriptor post queue size is:
2672 * Reply Descriptor Post Queue Depth * 8
2673 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2674 */
2675 mem_size = mpt->m_post_queue_depth * 8;
2676
2677 /*
2678 * set the DMA attributes. The Reply Descriptor Post Queue must be
2679 * aligned on a 16-byte boundry.
2680 */
2681 frame_dma_attrs = mpt->m_msg_dma_attr;
2682 frame_dma_attrs.dma_attr_align = 16;
2683 frame_dma_attrs.dma_attr_sgllen = 1;
2684
2685 /*
2686 * allocate the reply post queue
2687 */
2688 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2689 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2690 mem_size, &cookie) == FALSE) {
2691 return (DDI_FAILURE);
2692 }
2693
2694 /*
2695 * Store the reply descriptor post queue memory address. This chip
2696 * uses this address to write to the reply descriptor post queue. The
2697 * second address is the address mpt uses to manage the queue.
2698 */
2699 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2700 mpt->m_post_queue = memp;
2701
2702 /*
2703 * Clear the reply post queue memory.
2704 */
2705 bzero(mpt->m_post_queue, mem_size);
2706
2707 return (DDI_SUCCESS);
2708 }
2709
2710 static void
2711 mptsas_alloc_reply_args(mptsas_t *mpt)
2712 {
2713 if (mpt->m_replyh_args == NULL) {
2714 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2715 mpt->m_max_replies, KM_SLEEP);
2716 }
2717 }
2718
2719 static int
2720 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2721 {
2722 mptsas_cache_frames_t *frames = NULL;
2723 if (cmd->cmd_extra_frames == NULL) {
2724 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2725 if (frames == NULL) {
2726 return (DDI_FAILURE);
2727 }
2728 cmd->cmd_extra_frames = frames;
2729 }
2730 return (DDI_SUCCESS);
2731 }
2732
2733 static void
2734 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2735 {
2736 if (cmd->cmd_extra_frames) {
2737 kmem_cache_free(mpt->m_cache_frames,
2738 (void *)cmd->cmd_extra_frames);
2739 cmd->cmd_extra_frames = NULL;
2740 }
2741 }
2742
2743 static void
2744 mptsas_cfg_fini(mptsas_t *mpt)
2745 {
2746 NDBG0(("mptsas_cfg_fini"));
2747 ddi_regs_map_free(&mpt->m_datap);
2748 }
2749
2750 static void
2751 mptsas_hba_fini(mptsas_t *mpt)
2752 {
2753 NDBG0(("mptsas_hba_fini"));
2754
2755 /*
2756 * Free up any allocated memory
2757 */
2758 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2759 &mpt->m_acc_req_frame_hdl);
2760
2761 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2762 &mpt->m_acc_reply_frame_hdl);
2763
2764 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2765 &mpt->m_acc_free_queue_hdl);
2766
2767 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2768 &mpt->m_acc_post_queue_hdl);
2769
2770 if (mpt->m_replyh_args != NULL) {
2771 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2772 * mpt->m_max_replies);
2773 }
2774 }
2775
2776 static int
2777 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2778 {
2779 int lun = 0;
2780 char *sas_wwn = NULL;
2781 int phynum = -1;
2782 int reallen = 0;
2783
2784 /* Get the target num */
2785 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2786 LUN_PROP, 0);
2787
2788 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2789 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2790 /*
2791 * Stick in the address of form "pPHY,LUN"
2792 */
2793 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2794 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2795 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2796 == DDI_PROP_SUCCESS) {
2797 /*
2798 * Stick in the address of the form "wWWN,LUN"
2799 */
2800 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2801 ddi_prop_free(sas_wwn);
2802 } else {
2803 return (DDI_FAILURE);
2804 }
2805
2806 ASSERT(reallen < len);
2807 if (reallen >= len) {
2808 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2809 "length too small, it needs to be %d bytes", reallen + 1);
2810 }
2811 return (DDI_SUCCESS);
2812 }
2813
2814 /*
2815 * tran_tgt_init(9E) - target device instance initialization
2816 */
2817 static int
2818 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2819 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2820 {
2821 #ifndef __lock_lint
2822 _NOTE(ARGUNUSED(hba_tran))
2823 #endif
2824
2825 /*
2826 * At this point, the scsi_device structure already exists
2827 * and has been initialized.
2828 *
2829 * Use this function to allocate target-private data structures,
2830 * if needed by this HBA. Add revised flow-control and queue
2831 * properties for child here, if desired and if you can tell they
2832 * support tagged queueing by now.
2833 */
2834 mptsas_t *mpt;
2835 int lun = sd->sd_address.a_lun;
2836 mdi_pathinfo_t *pip = NULL;
2837 mptsas_tgt_private_t *tgt_private = NULL;
2838 mptsas_target_t *ptgt = NULL;
2839 char *psas_wwn = NULL;
2840 mptsas_phymask_t phymask = 0;
2841 uint64_t sas_wwn = 0;
2842 mptsas_target_addr_t addr;
2843 mpt = SDEV2MPT(sd);
2844
2845 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2846
2847 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2848 (void *)hba_dip, (void *)tgt_dip, lun));
2849
2850 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2851 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2852 ddi_set_name_addr(tgt_dip, NULL);
2853 return (DDI_FAILURE);
2854 }
2855 /*
2856 * phymask is 0 means the virtual port for RAID
2857 */
2858 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2859 "phymask", 0);
2860 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2861 if ((pip = (void *)(sd->sd_private)) == NULL) {
2862 /*
2863 * Very bad news if this occurs. Somehow scsi_vhci has
2864 * lost the pathinfo node for this target.
2865 */
2866 return (DDI_NOT_WELL_FORMED);
2867 }
2868
2869 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2870 DDI_PROP_SUCCESS) {
2871 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2872 return (DDI_FAILURE);
2873 }
2874
2875 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2876 &psas_wwn) == MDI_SUCCESS) {
2877 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2878 sas_wwn = 0;
2879 }
2880 (void) mdi_prop_free(psas_wwn);
2881 }
2882 } else {
2883 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2884 DDI_PROP_DONTPASS, LUN_PROP, 0);
2885 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2886 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2887 DDI_PROP_SUCCESS) {
2888 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2889 sas_wwn = 0;
2890 }
2891 ddi_prop_free(psas_wwn);
2892 } else {
2893 sas_wwn = 0;
2894 }
2895 }
2896
2897 ASSERT((sas_wwn != 0) || (phymask != 0));
2898 addr.mta_wwn = sas_wwn;
2899 addr.mta_phymask = phymask;
2900 mutex_enter(&mpt->m_mutex);
2901 ptgt = refhash_lookup(mpt->m_targets, &addr);
2902 mutex_exit(&mpt->m_mutex);
2903 if (ptgt == NULL) {
2904 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2905 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2906 sas_wwn);
2907 return (DDI_FAILURE);
2908 }
2909 if (hba_tran->tran_tgt_private == NULL) {
2910 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2911 KM_SLEEP);
2912 tgt_private->t_lun = lun;
2913 tgt_private->t_private = ptgt;
2914 hba_tran->tran_tgt_private = tgt_private;
2915 }
2916
2917 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2918 return (DDI_SUCCESS);
2919 }
2920 mutex_enter(&mpt->m_mutex);
2921
2922 if (ptgt->m_deviceinfo &
2923 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2924 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2925 uchar_t *inq89 = NULL;
2926 int inq89_len = 0x238;
2927 int reallen = 0;
2928 int rval = 0;
2929 struct sata_id *sid = NULL;
2930 char model[SATA_ID_MODEL_LEN + 1];
2931 char fw[SATA_ID_FW_LEN + 1];
2932 char *vid, *pid;
2933 int i;
2934
2935 mutex_exit(&mpt->m_mutex);
2936 /*
2937 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2938 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2939 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2940 */
2941 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2942 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2943 inq89, inq89_len, &reallen, 1);
2944
2945 if (rval != 0) {
2946 if (inq89 != NULL) {
2947 kmem_free(inq89, inq89_len);
2948 }
2949
2950 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2951 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2952 return (DDI_SUCCESS);
2953 }
2954 sid = (void *)(&inq89[60]);
2955
2956 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2957 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2958
2959 model[SATA_ID_MODEL_LEN] = 0;
2960 fw[SATA_ID_FW_LEN] = 0;
2961
2962 /*
2963 * split model into into vid/pid
2964 */
2965 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2966 if ((*pid == ' ') || (*pid == '\t'))
2967 break;
2968 if (i < SATA_ID_MODEL_LEN) {
2969 vid = model;
2970 /*
2971 * terminate vid, establish pid
2972 */
2973 *pid++ = 0;
2974 } else {
2975 /*
2976 * vid will stay "ATA ", the rule is same
2977 * as sata framework implementation.
2978 */
2979 vid = NULL;
2980 /*
2981 * model is all pid
2982 */
2983 pid = model;
2984 }
2985
2986 /*
2987 * override SCSA "inquiry-*" properties
2988 */
2989 if (vid)
2990 (void) scsi_device_prop_update_inqstring(sd,
2991 INQUIRY_VENDOR_ID, vid, strlen(vid));
2992 if (pid)
2993 (void) scsi_device_prop_update_inqstring(sd,
2994 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2995 (void) scsi_device_prop_update_inqstring(sd,
2996 INQUIRY_REVISION_ID, fw, strlen(fw));
2997
2998 if (inq89 != NULL) {
2999 kmem_free(inq89, inq89_len);
3000 }
3001 } else {
3002 mutex_exit(&mpt->m_mutex);
3003 }
3004
3005 return (DDI_SUCCESS);
3006 }
3007 /*
3008 * tran_tgt_free(9E) - target device instance deallocation
3009 */
3010 static void
3011 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3012 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3013 {
3014 #ifndef __lock_lint
3015 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3016 #endif
3017
3018 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3019
3020 if (tgt_private != NULL) {
3021 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3022 hba_tran->tran_tgt_private = NULL;
3023 }
3024 }
3025
3026 /*
3027 * scsi_pkt handling
3028 *
3029 * Visible to the external world via the transport structure.
3030 */
3031
3032 /*
3033 * Notes:
3034 * - transport the command to the addressed SCSI target/lun device
3035 * - normal operation is to schedule the command to be transported,
3036 * and return TRAN_ACCEPT if this is successful.
3037 * - if NO_INTR, tran_start must poll device for command completion
3038 */
3039 static int
3040 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3041 {
3042 #ifndef __lock_lint
3043 _NOTE(ARGUNUSED(ap))
3044 #endif
3045 mptsas_t *mpt = PKT2MPT(pkt);
3046 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3047 int rval;
3048 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3049
3050 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3051 ASSERT(ptgt);
3052 if (ptgt == NULL)
3053 return (TRAN_FATAL_ERROR);
3054
3055 /*
3056 * prepare the pkt before taking mutex.
3057 */
3058 rval = mptsas_prepare_pkt(cmd);
3059 if (rval != TRAN_ACCEPT) {
3060 return (rval);
3061 }
3062
3063 /*
3064 * Send the command to target/lun, however your HBA requires it.
3065 * If busy, return TRAN_BUSY; if there's some other formatting error
3066 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3067 * return of TRAN_ACCEPT.
3068 *
3069 * Remember that access to shared resources, including the mptsas_t
3070 * data structure and the HBA hardware registers, must be protected
3071 * with mutexes, here and everywhere.
3072 *
3073 * Also remember that at interrupt time, you'll get an argument
3074 * to the interrupt handler which is a pointer to your mptsas_t
3075 * structure; you'll have to remember which commands are outstanding
3076 * and which scsi_pkt is the currently-running command so the
3077 * interrupt handler can refer to the pkt to set completion
3078 * status, call the target driver back through pkt_comp, etc.
3079 *
3080 * If the instance lock is held by other thread, don't spin to wait
3081 * for it. Instead, queue the cmd and next time when the instance lock
3082 * is not held, accept all the queued cmd. A extra tx_waitq is
3083 * introduced to protect the queue.
3084 *
3085 * The polled cmd will not be queud and accepted as usual.
3086 *
3087 * Under the tx_waitq mutex, record whether a thread is draining
3088 * the tx_waitq. An IO requesting thread that finds the instance
3089 * mutex contended appends to the tx_waitq and while holding the
3090 * tx_wait mutex, if the draining flag is not set, sets it and then
3091 * proceeds to spin for the instance mutex. This scheme ensures that
3092 * the last cmd in a burst be processed.
3093 *
3094 * we enable this feature only when the helper threads are enabled,
3095 * at which we think the loads are heavy.
3096 *
3097 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3098 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3099 */
3100
3101 if (mpt->m_doneq_thread_n) {
3102 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3103 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3104 mutex_exit(&mpt->m_mutex);
3105 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3106 mutex_enter(&mpt->m_mutex);
3107 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3108 mutex_exit(&mpt->m_mutex);
3109 } else {
3110 mutex_enter(&mpt->m_tx_waitq_mutex);
3111 /*
3112 * ptgt->m_dr_flag is protected by m_mutex or
3113 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3114 * is acquired.
3115 */
3116 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3117 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3118 /*
3119 * The command should be allowed to
3120 * retry by returning TRAN_BUSY to
3121 * to stall the I/O's which come from
3122 * scsi_vhci since the device/path is
3123 * in unstable state now.
3124 */
3125 mutex_exit(&mpt->m_tx_waitq_mutex);
3126 return (TRAN_BUSY);
3127 } else {
3128 /*
3129 * The device is offline, just fail the
3130 * command by returning
3131 * TRAN_FATAL_ERROR.
3132 */
3133 mutex_exit(&mpt->m_tx_waitq_mutex);
3134 return (TRAN_FATAL_ERROR);
3135 }
3136 }
3137 if (mpt->m_tx_draining) {
3138 cmd->cmd_flags |= CFLAG_TXQ;
3139 *mpt->m_tx_waitqtail = cmd;
3140 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3141 mutex_exit(&mpt->m_tx_waitq_mutex);
3142 } else { /* drain the queue */
3143 mpt->m_tx_draining = 1;
3144 mutex_exit(&mpt->m_tx_waitq_mutex);
3145 mutex_enter(&mpt->m_mutex);
3146 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3147 mutex_exit(&mpt->m_mutex);
3148 }
3149 }
3150 } else {
3151 mutex_enter(&mpt->m_mutex);
3152 /*
3153 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3154 * in this case, m_mutex is acquired.
3155 */
3156 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3157 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3158 /*
3159 * commands should be allowed to retry by
3160 * returning TRAN_BUSY to stall the I/O's
3161 * which come from scsi_vhci since the device/
3162 * path is in unstable state now.
3163 */
3164 mutex_exit(&mpt->m_mutex);
3165 return (TRAN_BUSY);
3166 } else {
3167 /*
3168 * The device is offline, just fail the
3169 * command by returning TRAN_FATAL_ERROR.
3170 */
3171 mutex_exit(&mpt->m_mutex);
3172 return (TRAN_FATAL_ERROR);
3173 }
3174 }
3175 rval = mptsas_accept_pkt(mpt, cmd);
3176 mutex_exit(&mpt->m_mutex);
3177 }
3178
3179 return (rval);
3180 }
3181
3182 /*
3183 * Accept all the queued cmds(if any) before accept the current one.
3184 */
3185 static int
3186 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3187 {
3188 int rval;
3189 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3190
3191 ASSERT(mutex_owned(&mpt->m_mutex));
3192 /*
3193 * The call to mptsas_accept_tx_waitq() must always be performed
3194 * because that is where mpt->m_tx_draining is cleared.
3195 */
3196 mutex_enter(&mpt->m_tx_waitq_mutex);
3197 mptsas_accept_tx_waitq(mpt);
3198 mutex_exit(&mpt->m_tx_waitq_mutex);
3199 /*
3200 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3201 * in this case, m_mutex is acquired.
3202 */
3203 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3204 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3205 /*
3206 * The command should be allowed to retry by returning
3207 * TRAN_BUSY to stall the I/O's which come from
3208 * scsi_vhci since the device/path is in unstable state
3209 * now.
3210 */
3211 return (TRAN_BUSY);
3212 } else {
3213 /*
3214 * The device is offline, just fail the command by
3215 * return TRAN_FATAL_ERROR.
3216 */
3217 return (TRAN_FATAL_ERROR);
3218 }
3219 }
3220 rval = mptsas_accept_pkt(mpt, cmd);
3221
3222 return (rval);
3223 }
3224
3225 static int
3226 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3227 {
3228 int rval = TRAN_ACCEPT;
3229 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3230
3231 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3232
3233 ASSERT(mutex_owned(&mpt->m_mutex));
3234
3235 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3236 rval = mptsas_prepare_pkt(cmd);
3237 if (rval != TRAN_ACCEPT) {
3238 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3239 return (rval);
3240 }
3241 }
3242
3243 /*
3244 * reset the throttle if we were draining
3245 */
3246 if ((ptgt->m_t_ncmds == 0) &&
3247 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3248 NDBG23(("reset throttle"));
3249 ASSERT(ptgt->m_reset_delay == 0);
3250 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3251 }
3252
3253 /*
3254 * If HBA is being reset, the DevHandles are being re-initialized,
3255 * which means that they could be invalid even if the target is still
3256 * attached. Check if being reset and if DevHandle is being
3257 * re-initialized. If this is the case, return BUSY so the I/O can be
3258 * retried later.
3259 */
3260 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3261 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3262 if (cmd->cmd_flags & CFLAG_TXQ) {
3263 mptsas_doneq_add(mpt, cmd);
3264 mptsas_doneq_empty(mpt);
3265 return (rval);
3266 } else {
3267 return (TRAN_BUSY);
3268 }
3269 }
3270
3271 /*
3272 * If device handle has already been invalidated, just
3273 * fail the command. In theory, command from scsi_vhci
3274 * client is impossible send down command with invalid
3275 * devhdl since devhdl is set after path offline, target
3276 * driver is not suppose to select a offlined path.
3277 */
3278 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3279 NDBG20(("rejecting command, it might because invalid devhdl "
3280 "request."));
3281 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3282 if (cmd->cmd_flags & CFLAG_TXQ) {
3283 mptsas_doneq_add(mpt, cmd);
3284 mptsas_doneq_empty(mpt);
3285 return (rval);
3286 } else {
3287 return (TRAN_FATAL_ERROR);
3288 }
3289 }
3290 /*
3291 * The first case is the normal case. mpt gets a command from the
3292 * target driver and starts it.
3293 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3294 * commands is m_max_requests - 2.
3295 */
3296 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3297 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3298 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3299 (ptgt->m_reset_delay == 0) &&
3300 (ptgt->m_t_nwait == 0) &&
3301 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3302 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3303 (void) mptsas_start_cmd(mpt, cmd);
3304 } else {
3305 mptsas_waitq_add(mpt, cmd);
3306 }
3307 } else {
3308 /*
3309 * Add this pkt to the work queue
3310 */
3311 mptsas_waitq_add(mpt, cmd);
3312
3313 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3314 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3315
3316 /*
3317 * Only flush the doneq if this is not a TM
3318 * cmd. For TM cmds the flushing of the
3319 * doneq will be done in those routines.
3320 */
3321 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3322 mptsas_doneq_empty(mpt);
3323 }
3324 }
3325 }
3326 return (rval);
3327 }
3328
3329 int
3330 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3331 {
3332 mptsas_slots_t *slots = mpt->m_active;
3333 uint_t slot, start_rotor;
3334 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3335
3336 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3337
3338 /*
3339 * Account for reserved TM request slot and reserved SMID of 0.
3340 */
3341 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3342
3343 /*
3344 * Find the next available slot, beginning at m_rotor. If no slot is
3345 * available, we'll return FALSE to indicate that. This mechanism
3346 * considers only the normal slots, not the reserved slot 0 nor the
3347 * task management slot m_n_normal + 1. The rotor is left to point to
3348 * the normal slot after the one we select, unless we select the last
3349 * normal slot in which case it returns to slot 1.
3350 */
3351 start_rotor = slots->m_rotor;
3352 do {
3353 slot = slots->m_rotor++;
3354 if (slots->m_rotor > slots->m_n_normal)
3355 slots->m_rotor = 1;
3356
3357 if (slots->m_rotor == start_rotor)
3358 break;
3359 } while (slots->m_slot[slot] != NULL);
3360
3361 if (slots->m_slot[slot] != NULL)
3362 return (FALSE);
3363
3364 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3365
3366 cmd->cmd_slot = slot;
3367 slots->m_slot[slot] = cmd;
3368 mpt->m_ncmds++;
3369
3370 /*
3371 * only increment per target ncmds if this is not a
3372 * command that has no target associated with it (i.e. a
3373 * event acknoledgment)
3374 */
3375 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3376 ptgt->m_t_ncmds++;
3377 }
3378 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3379
3380 /*
3381 * If initial timout is less than or equal to one tick, bump
3382 * the timeout by a tick so that command doesn't timeout before
3383 * its allotted time.
3384 */
3385 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3386 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3387 }
3388 return (TRUE);
3389 }
3390
3391 /*
3392 * prepare the pkt:
3393 * the pkt may have been resubmitted or just reused so
3394 * initialize some fields and do some checks.
3395 */
3396 static int
3397 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3398 {
3399 struct scsi_pkt *pkt = CMD2PKT(cmd);
3400
3401 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3402
3403 /*
3404 * Reinitialize some fields that need it; the packet may
3405 * have been resubmitted
3406 */
3407 pkt->pkt_reason = CMD_CMPLT;
3408 pkt->pkt_state = 0;
3409 pkt->pkt_statistics = 0;
3410 pkt->pkt_resid = 0;
3411 cmd->cmd_age = 0;
3412 cmd->cmd_pkt_flags = pkt->pkt_flags;
3413
3414 /*
3415 * zero status byte.
3416 */
3417 *(pkt->pkt_scbp) = 0;
3418
3419 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3420 pkt->pkt_resid = cmd->cmd_dmacount;
3421
3422 /*
3423 * consistent packets need to be sync'ed first
3424 * (only for data going out)
3425 */
3426 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3427 (cmd->cmd_flags & CFLAG_DMASEND)) {
3428 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3429 DDI_DMA_SYNC_FORDEV);
3430 }
3431 }
3432
3433 cmd->cmd_flags =
3434 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3435 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3436
3437 return (TRAN_ACCEPT);
3438 }
3439
3440 /*
3441 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3442 *
3443 * One of three possibilities:
3444 * - allocate scsi_pkt
3445 * - allocate scsi_pkt and DMA resources
3446 * - allocate DMA resources to an already-allocated pkt
3447 */
3448 static struct scsi_pkt *
3449 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3450 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3451 int (*callback)(), caddr_t arg)
3452 {
3453 mptsas_cmd_t *cmd, *new_cmd;
3454 mptsas_t *mpt = ADDR2MPT(ap);
3455 int failure = 1;
3456 uint_t oldcookiec;
3457 mptsas_target_t *ptgt = NULL;
3458 int rval;
3459 mptsas_tgt_private_t *tgt_private;
3460 int kf;
3461
3462 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3463
3464 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3465 tran_tgt_private;
3466 ASSERT(tgt_private != NULL);
3467 if (tgt_private == NULL) {
3468 return (NULL);
3469 }
3470 ptgt = tgt_private->t_private;
3471 ASSERT(ptgt != NULL);
3472 if (ptgt == NULL)
3473 return (NULL);
3474 ap->a_target = ptgt->m_devhdl;
3475 ap->a_lun = tgt_private->t_lun;
3476
3477 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3478 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3479 statuslen *= 100; tgtlen *= 4;
3480 #endif
3481 NDBG3(("mptsas_scsi_init_pkt:\n"
3482 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3483 ap->a_target, (void *)pkt, (void *)bp,
3484 cmdlen, statuslen, tgtlen, flags));
3485
3486 /*
3487 * Allocate the new packet.
3488 */
3489 if (pkt == NULL) {
3490 ddi_dma_handle_t save_dma_handle;
3491 ddi_dma_handle_t save_arq_dma_handle;
3492 struct buf *save_arq_bp;
3493 ddi_dma_cookie_t save_arqcookie;
3494
3495 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3496
3497 if (cmd) {
3498 save_dma_handle = cmd->cmd_dmahandle;
3499 save_arq_dma_handle = cmd->cmd_arqhandle;
3500 save_arq_bp = cmd->cmd_arq_buf;
3501 save_arqcookie = cmd->cmd_arqcookie;
3502 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3503 cmd->cmd_dmahandle = save_dma_handle;
3504 cmd->cmd_arqhandle = save_arq_dma_handle;
3505 cmd->cmd_arq_buf = save_arq_bp;
3506 cmd->cmd_arqcookie = save_arqcookie;
3507
3508 pkt = (void *)((uchar_t *)cmd +
3509 sizeof (struct mptsas_cmd));
3510 pkt->pkt_ha_private = (opaque_t)cmd;
3511 pkt->pkt_address = *ap;
3512 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3513 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3514 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3515 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3516 cmd->cmd_cdblen = (uchar_t)cmdlen;
3517 cmd->cmd_scblen = statuslen;
3518 cmd->cmd_rqslen = SENSE_LENGTH;
3519 cmd->cmd_tgt_addr = ptgt;
3520 failure = 0;
3521 }
3522
3523 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3524 (tgtlen > PKT_PRIV_LEN) ||
3525 (statuslen > EXTCMDS_STATUS_SIZE)) {
3526 if (failure == 0) {
3527 /*
3528 * if extern alloc fails, all will be
3529 * deallocated, including cmd
3530 */
3531 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3532 cmdlen, tgtlen, statuslen, kf);
3533 }
3534 if (failure) {
3535 /*
3536 * if extern allocation fails, it will
3537 * deallocate the new pkt as well
3538 */
3539 return (NULL);
3540 }
3541 }
3542 new_cmd = cmd;
3543
3544 } else {
3545 cmd = PKT2CMD(pkt);
3546 new_cmd = NULL;
3547 }
3548
3549
3550 /* grab cmd->cmd_cookiec here as oldcookiec */
3551
3552 oldcookiec = cmd->cmd_cookiec;
3553
3554 /*
3555 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3556 * greater than 0 and we'll need to grab the next dma window
3557 */
3558 /*
3559 * SLM-not doing extra command frame right now; may add later
3560 */
3561
3562 if (cmd->cmd_nwin > 0) {
3563
3564 /*
3565 * Make sure we havn't gone past the the total number
3566 * of windows
3567 */
3568 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3569 return (NULL);
3570 }
3571 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3572 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3573 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3574 return (NULL);
3575 }
3576 goto get_dma_cookies;
3577 }
3578
3579
3580 if (flags & PKT_XARQ) {
3581 cmd->cmd_flags |= CFLAG_XARQ;
3582 }
3583
3584 /*
3585 * DMA resource allocation. This version assumes your
3586 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3587 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3588 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3589 */
3590 if (bp && (bp->b_bcount != 0) &&
3591 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3592
3593 int cnt, dma_flags;
3594 mptti_t *dmap; /* ptr to the S/G list */
3595
3596 /*
3597 * Set up DMA memory and position to the next DMA segment.
3598 */
3599 ASSERT(cmd->cmd_dmahandle != NULL);
3600
3601 if (bp->b_flags & B_READ) {
3602 dma_flags = DDI_DMA_READ;
3603 cmd->cmd_flags &= ~CFLAG_DMASEND;
3604 } else {
3605 dma_flags = DDI_DMA_WRITE;
3606 cmd->cmd_flags |= CFLAG_DMASEND;
3607 }
3608 if (flags & PKT_CONSISTENT) {
3609 cmd->cmd_flags |= CFLAG_CMDIOPB;
3610 dma_flags |= DDI_DMA_CONSISTENT;
3611 }
3612
3613 if (flags & PKT_DMA_PARTIAL) {
3614 dma_flags |= DDI_DMA_PARTIAL;
3615 }
3616
3617 /*
3618 * workaround for byte hole issue on psycho and
3619 * schizo pre 2.1
3620 */
3621 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3622 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3623 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3624 dma_flags |= DDI_DMA_CONSISTENT;
3625 }
3626
3627 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3628 dma_flags, callback, arg,
3629 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3630 if (rval == DDI_DMA_PARTIAL_MAP) {
3631 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3632 &cmd->cmd_nwin);
3633 cmd->cmd_winindex = 0;
3634 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3635 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3636 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3637 &cmd->cmd_cookiec);
3638 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3639 switch (rval) {
3640 case DDI_DMA_NORESOURCES:
3641 bioerror(bp, 0);
3642 break;
3643 case DDI_DMA_BADATTR:
3644 case DDI_DMA_NOMAPPING:
3645 bioerror(bp, EFAULT);
3646 break;
3647 case DDI_DMA_TOOBIG:
3648 default:
3649 bioerror(bp, EINVAL);
3650 break;
3651 }
3652 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3653 if (new_cmd) {
3654 mptsas_scsi_destroy_pkt(ap, pkt);
3655 }
3656 return ((struct scsi_pkt *)NULL);
3657 }
3658
3659 get_dma_cookies:
3660 cmd->cmd_flags |= CFLAG_DMAVALID;
3661 ASSERT(cmd->cmd_cookiec > 0);
3662
3663 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3664 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3665 cmd->cmd_cookiec);
3666 bioerror(bp, EINVAL);
3667 if (new_cmd) {
3668 mptsas_scsi_destroy_pkt(ap, pkt);
3669 }
3670 return ((struct scsi_pkt *)NULL);
3671 }
3672
3673 /*
3674 * Allocate extra SGL buffer if needed.
3675 */
3676 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3677 (cmd->cmd_extra_frames == NULL)) {
3678 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3679 DDI_FAILURE) {
3680 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3681 "failed");
3682 bioerror(bp, ENOMEM);
3683 if (new_cmd) {
3684 mptsas_scsi_destroy_pkt(ap, pkt);
3685 }
3686 return ((struct scsi_pkt *)NULL);
3687 }
3688 }
3689
3690 /*
3691 * Always use scatter-gather transfer
3692 * Use the loop below to store physical addresses of
3693 * DMA segments, from the DMA cookies, into your HBA's
3694 * scatter-gather list.
3695 * We need to ensure we have enough kmem alloc'd
3696 * for the sg entries since we are no longer using an
3697 * array inside mptsas_cmd_t.
3698 *
3699 * We check cmd->cmd_cookiec against oldcookiec so
3700 * the scatter-gather list is correctly allocated
3701 */
3702
3703 if (oldcookiec != cmd->cmd_cookiec) {
3704 if (cmd->cmd_sg != (mptti_t *)NULL) {
3705 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3706 oldcookiec);
3707 cmd->cmd_sg = NULL;
3708 }
3709 }
3710
3711 if (cmd->cmd_sg == (mptti_t *)NULL) {
3712 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3713 cmd->cmd_cookiec), kf);
3714
3715 if (cmd->cmd_sg == (mptti_t *)NULL) {
3716 mptsas_log(mpt, CE_WARN,
3717 "unable to kmem_alloc enough memory "
3718 "for scatter/gather list");
3719 /*
3720 * if we have an ENOMEM condition we need to behave
3721 * the same way as the rest of this routine
3722 */
3723
3724 bioerror(bp, ENOMEM);
3725 if (new_cmd) {
3726 mptsas_scsi_destroy_pkt(ap, pkt);
3727 }
3728 return ((struct scsi_pkt *)NULL);
3729 }
3730 }
3731
3732 dmap = cmd->cmd_sg;
3733
3734 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3735
3736 /*
3737 * store the first segment into the S/G list
3738 */
3739 dmap->count = cmd->cmd_cookie.dmac_size;
3740 dmap->addr.address64.Low = (uint32_t)
3741 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3742 dmap->addr.address64.High = (uint32_t)
3743 (cmd->cmd_cookie.dmac_laddress >> 32);
3744
3745 /*
3746 * dmacount counts the size of the dma for this window
3747 * (if partial dma is being used). totaldmacount
3748 * keeps track of the total amount of dma we have
3749 * transferred for all the windows (needed to calculate
3750 * the resid value below).
3751 */
3752 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3753 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3754
3755 /*
3756 * We already stored the first DMA scatter gather segment,
3757 * start at 1 if we need to store more.
3758 */
3759 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3760 /*
3761 * Get next DMA cookie
3762 */
3763 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3764 &cmd->cmd_cookie);
3765 dmap++;
3766
3767 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3768 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3769
3770 /*
3771 * store the segment parms into the S/G list
3772 */
3773 dmap->count = cmd->cmd_cookie.dmac_size;
3774 dmap->addr.address64.Low = (uint32_t)
3775 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3776 dmap->addr.address64.High = (uint32_t)
3777 (cmd->cmd_cookie.dmac_laddress >> 32);
3778 }
3779
3780 /*
3781 * If this was partially allocated we set the resid
3782 * the amount of data NOT transferred in this window
3783 * If there is only one window, the resid will be 0
3784 */
3785 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3786 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3787 }
3788 return (pkt);
3789 }
3790
3791 /*
3792 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3793 *
3794 * Notes:
3795 * - also frees DMA resources if allocated
3796 * - implicit DMA synchonization
3797 */
3798 static void
3799 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3800 {
3801 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3802 mptsas_t *mpt = ADDR2MPT(ap);
3803
3804 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3805 ap->a_target, (void *)pkt));
3806
3807 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3808 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3809 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3810 }
3811
3812 if (cmd->cmd_sg) {
3813 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3814 cmd->cmd_sg = NULL;
3815 }
3816
3817 mptsas_free_extra_sgl_frame(mpt, cmd);
3818
3819 if ((cmd->cmd_flags &
3820 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3821 CFLAG_SCBEXTERN)) == 0) {
3822 cmd->cmd_flags = CFLAG_FREE;
3823 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3824 } else {
3825 mptsas_pkt_destroy_extern(mpt, cmd);
3826 }
3827 }
3828
3829 /*
3830 * kmem cache constructor and destructor:
3831 * When constructing, we bzero the cmd and allocate the dma handle
3832 * When destructing, just free the dma handle
3833 */
3834 static int
3835 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3836 {
3837 mptsas_cmd_t *cmd = buf;
3838 mptsas_t *mpt = cdrarg;
3839 struct scsi_address ap;
3840 uint_t cookiec;
3841 ddi_dma_attr_t arq_dma_attr;
3842 int (*callback)(caddr_t);
3843
3844 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3845
3846 NDBG4(("mptsas_kmem_cache_constructor"));
3847
3848 ap.a_hba_tran = mpt->m_tran;
3849 ap.a_target = 0;
3850 ap.a_lun = 0;
3851
3852 /*
3853 * allocate a dma handle
3854 */
3855 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3856 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3857 cmd->cmd_dmahandle = NULL;
3858 return (-1);
3859 }
3860
3861 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3862 SENSE_LENGTH, B_READ, callback, NULL);
3863 if (cmd->cmd_arq_buf == NULL) {
3864 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3865 cmd->cmd_dmahandle = NULL;
3866 return (-1);
3867 }
3868
3869 /*
3870 * allocate a arq handle
3871 */
3872 arq_dma_attr = mpt->m_msg_dma_attr;
3873 arq_dma_attr.dma_attr_sgllen = 1;
3874 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3875 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3876 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3877 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3878 cmd->cmd_dmahandle = NULL;
3879 cmd->cmd_arqhandle = NULL;
3880 return (-1);
3881 }
3882
3883 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3884 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3885 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3886 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3887 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3888 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3889 cmd->cmd_dmahandle = NULL;
3890 cmd->cmd_arqhandle = NULL;
3891 cmd->cmd_arq_buf = NULL;
3892 return (-1);
3893 }
3894
3895 return (0);
3896 }
3897
3898 static void
3899 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3900 {
3901 #ifndef __lock_lint
3902 _NOTE(ARGUNUSED(cdrarg))
3903 #endif
3904 mptsas_cmd_t *cmd = buf;
3905
3906 NDBG4(("mptsas_kmem_cache_destructor"));
3907
3908 if (cmd->cmd_arqhandle) {
3909 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3910 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3911 cmd->cmd_arqhandle = NULL;
3912 }
3913 if (cmd->cmd_arq_buf) {
3914 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3915 cmd->cmd_arq_buf = NULL;
3916 }
3917 if (cmd->cmd_dmahandle) {
3918 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3919 cmd->cmd_dmahandle = NULL;
3920 }
3921 }
3922
3923 static int
3924 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3925 {
3926 mptsas_cache_frames_t *p = buf;
3927 mptsas_t *mpt = cdrarg;
3928 ddi_dma_attr_t frame_dma_attr;
3929 size_t mem_size, alloc_len;
3930 ddi_dma_cookie_t cookie;
3931 uint_t ncookie;
3932 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3933 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3934
3935 frame_dma_attr = mpt->m_msg_dma_attr;
3936 frame_dma_attr.dma_attr_align = 0x10;
3937 frame_dma_attr.dma_attr_sgllen = 1;
3938
3939 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3940 &p->m_dma_hdl) != DDI_SUCCESS) {
3941 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3942 " extra SGL.");
3943 return (DDI_FAILURE);
3944 }
3945
3946 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3947
3948 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3949 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3950 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3951 ddi_dma_free_handle(&p->m_dma_hdl);
3952 p->m_dma_hdl = NULL;
3953 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3954 " extra SGL.");
3955 return (DDI_FAILURE);
3956 }
3957
3958 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3959 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3960 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3961 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3962 ddi_dma_free_handle(&p->m_dma_hdl);
3963 p->m_dma_hdl = NULL;
3964 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3965 " extra SGL");
3966 return (DDI_FAILURE);
3967 }
3968
3969 /*
3970 * Store the SGL memory address. This chip uses this
3971 * address to dma to and from the driver. The second
3972 * address is the address mpt uses to fill in the SGL.
3973 */
3974 p->m_phys_addr = cookie.dmac_address;
3975
3976 return (DDI_SUCCESS);
3977 }
3978
3979 static void
3980 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3981 {
3982 #ifndef __lock_lint
3983 _NOTE(ARGUNUSED(cdrarg))
3984 #endif
3985 mptsas_cache_frames_t *p = buf;
3986 if (p->m_dma_hdl != NULL) {
3987 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3988 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3989 ddi_dma_free_handle(&p->m_dma_hdl);
3990 p->m_phys_addr = NULL;
3991 p->m_frames_addr = NULL;
3992 p->m_dma_hdl = NULL;
3993 p->m_acc_hdl = NULL;
3994 }
3995
3996 }
3997
3998 /*
3999 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4000 * for non-standard length cdb, pkt_private, status areas
4001 * if allocation fails, then deallocate all external space and the pkt
4002 */
4003 /* ARGSUSED */
4004 static int
4005 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4006 int cmdlen, int tgtlen, int statuslen, int kf)
4007 {
4008 caddr_t cdbp, scbp, tgt;
4009 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
4010 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
4011 struct scsi_address ap;
4012 size_t senselength;
4013 ddi_dma_attr_t ext_arq_dma_attr;
4014 uint_t cookiec;
4015
4016 NDBG3(("mptsas_pkt_alloc_extern: "
4017 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4018 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4019
4020 tgt = cdbp = scbp = NULL;
4021 cmd->cmd_scblen = statuslen;
4022 cmd->cmd_privlen = (uchar_t)tgtlen;
4023
4024 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4025 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4026 goto fail;
4027 }
4028 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4029 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4030 }
4031 if (tgtlen > PKT_PRIV_LEN) {
4032 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4033 goto fail;
4034 }
4035 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4036 cmd->cmd_pkt->pkt_private = tgt;
4037 }
4038 if (statuslen > EXTCMDS_STATUS_SIZE) {
4039 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4040 goto fail;
4041 }
4042 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4043 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4044
4045 /* allocate sense data buf for DMA */
4046
4047 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4048 struct scsi_arq_status, sts_sensedata);
4049 cmd->cmd_rqslen = (uchar_t)senselength;
4050
4051 ap.a_hba_tran = mpt->m_tran;
4052 ap.a_target = 0;
4053 ap.a_lun = 0;
4054
4055 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4056 (struct buf *)NULL, senselength, B_READ,
4057 callback, NULL);
4058
4059 if (cmd->cmd_ext_arq_buf == NULL) {
4060 goto fail;
4061 }
4062 /*
4063 * allocate a extern arq handle and bind the buf
4064 */
4065 ext_arq_dma_attr = mpt->m_msg_dma_attr;
4066 ext_arq_dma_attr.dma_attr_sgllen = 1;
4067 if ((ddi_dma_alloc_handle(mpt->m_dip,
4068 &ext_arq_dma_attr, callback,
4069 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4070 goto fail;
4071 }
4072
4073 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4074 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4075 callback, NULL, &cmd->cmd_ext_arqcookie,
4076 &cookiec)
4077 != DDI_SUCCESS) {
4078 goto fail;
4079 }
4080 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4081 }
4082 return (0);
4083 fail:
4084 mptsas_pkt_destroy_extern(mpt, cmd);
4085 return (1);
4086 }
4087
4088 /*
4089 * deallocate external pkt space and deallocate the pkt
4090 */
4091 static void
4092 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4093 {
4094 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4095
4096 if (cmd->cmd_flags & CFLAG_FREE) {
4097 mptsas_log(mpt, CE_PANIC,
4098 "mptsas_pkt_destroy_extern: freeing free packet");
4099 _NOTE(NOT_REACHED)
4100 /* NOTREACHED */
4101 }
4102 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4103 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4104 }
4105 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4106 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4107 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4108 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4109 }
4110 if (cmd->cmd_ext_arqhandle) {
4111 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4112 cmd->cmd_ext_arqhandle = NULL;
4113 }
4114 if (cmd->cmd_ext_arq_buf)
4115 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4116 }
4117 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4118 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4119 }
4120 cmd->cmd_flags = CFLAG_FREE;
4121 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4122 }
4123
4124 /*
4125 * tran_sync_pkt(9E) - explicit DMA synchronization
4126 */
4127 /*ARGSUSED*/
4128 static void
4129 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4130 {
4131 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4132
4133 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4134 ap->a_target, (void *)pkt));
4135
4136 if (cmd->cmd_dmahandle) {
4137 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4138 (cmd->cmd_flags & CFLAG_DMASEND) ?
4139 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4140 }
4141 }
4142
4143 /*
4144 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4145 */
4146 /*ARGSUSED*/
4147 static void
4148 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4149 {
4150 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4151 mptsas_t *mpt = ADDR2MPT(ap);
4152
4153 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4154 ap->a_target, (void *)pkt));
4155
4156 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4157 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4158 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4159 }
4160
4161 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4162 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4163 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4164 }
4165
4166 mptsas_free_extra_sgl_frame(mpt, cmd);
4167 }
4168
4169 static void
4170 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4171 {
4172 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4173 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4174 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4175 DDI_DMA_SYNC_FORCPU);
4176 }
4177 (*pkt->pkt_comp)(pkt);
4178 }
4179
4180 static void
4181 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4182 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4183 {
4184 uint_t cookiec;
4185 mptti_t *dmap;
4186 uint32_t flags;
4187 pMpi2SGESimple64_t sge;
4188 pMpi2SGEChain64_t sgechain;
4189 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4190
4191 /*
4192 * Save the number of entries in the DMA
4193 * Scatter/Gather list
4194 */
4195 cookiec = cmd->cmd_cookiec;
4196
4197 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4198
4199 /*
4200 * Set read/write bit in control.
4201 */
4202 if (cmd->cmd_flags & CFLAG_DMASEND) {
4203 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4204 } else {
4205 *control |= MPI2_SCSIIO_CONTROL_READ;
4206 }
4207
4208 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4209
4210 /*
4211 * We have 2 cases here. First where we can fit all the
4212 * SG elements into the main frame, and the case
4213 * where we can't.
4214 * If we have more cookies than we can attach to a frame
4215 * we will need to use a chain element to point
4216 * a location of memory where the rest of the S/G
4217 * elements reside.
4218 */
4219 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4220 dmap = cmd->cmd_sg;
4221 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4222 while (cookiec--) {
4223 ddi_put32(acc_hdl,
4224 &sge->Address.Low, dmap->addr.address64.Low);
4225 ddi_put32(acc_hdl,
4226 &sge->Address.High, dmap->addr.address64.High);
4227 ddi_put32(acc_hdl, &sge->FlagsLength,
4228 dmap->count);
4229 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4230 flags |= ((uint32_t)
4231 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4232 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4233 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4234 MPI2_SGE_FLAGS_SHIFT);
4235
4236 /*
4237 * If this is the last cookie, we set the flags
4238 * to indicate so
4239 */
4240 if (cookiec == 0) {
4241 flags |=
4242 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4243 | MPI2_SGE_FLAGS_END_OF_BUFFER
4244 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4245 MPI2_SGE_FLAGS_SHIFT);
4246 }
4247 if (cmd->cmd_flags & CFLAG_DMASEND) {
4248 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4249 MPI2_SGE_FLAGS_SHIFT);
4250 } else {
4251 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4252 MPI2_SGE_FLAGS_SHIFT);
4253 }
4254 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4255 dmap++;
4256 sge++;
4257 }
4258 } else {
4259 /*
4260 * Hereby we start to deal with multiple frames.
4261 * The process is as follows:
4262 * 1. Determine how many frames are needed for SGL element
4263 * storage; Note that all frames are stored in contiguous
4264 * memory space and in 64-bit DMA mode each element is
4265 * 3 double-words (12 bytes) long.
4266 * 2. Fill up the main frame. We need to do this separately
4267 * since it contains the SCSI IO request header and needs
4268 * dedicated processing. Note that the last 4 double-words
4269 * of the SCSI IO header is for SGL element storage
4270 * (MPI2_SGE_IO_UNION).
4271 * 3. Fill the chain element in the main frame, so the DMA
4272 * engine can use the following frames.
4273 * 4. Enter a loop to fill the remaining frames. Note that the
4274 * last frame contains no chain element. The remaining
4275 * frames go into the mpt SGL buffer allocated on the fly,
4276 * not immediately following the main message frame, as in
4277 * Gen1.
4278 * Some restrictions:
4279 * 1. For 64-bit DMA, the simple element and chain element
4280 * are both of 3 double-words (12 bytes) in size, even
4281 * though all frames are stored in the first 4G of mem
4282 * range and the higher 32-bits of the address are always 0.
4283 * 2. On some controllers (like the 1064/1068), a frame can
4284 * hold SGL elements with the last 1 or 2 double-words
4285 * (4 or 8 bytes) un-used. On these controllers, we should
4286 * recognize that there's not enough room for another SGL
4287 * element and move the sge pointer to the next frame.
4288 */
4289 int i, j, k, l, frames, sgemax;
4290 int temp;
4291 uint8_t chainflags;
4292 uint16_t chainlength;
4293 mptsas_cache_frames_t *p;
4294
4295 /*
4296 * Sgemax is the number of SGE's that will fit
4297 * each extra frame and frames is total
4298 * number of frames we'll need. 1 sge entry per
4299 * frame is reseverd for the chain element thus the -1 below.
4300 */
4301 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4302 - 1);
4303 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4304
4305 /*
4306 * A little check to see if we need to round up the number
4307 * of frames we need
4308 */
4309 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4310 sgemax) > 1) {
4311 frames = (temp + 1);
4312 } else {
4313 frames = temp;
4314 }
4315 dmap = cmd->cmd_sg;
4316 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4317
4318 /*
4319 * First fill in the main frame
4320 */
4321 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4322 ddi_put32(acc_hdl, &sge->Address.Low,
4323 dmap->addr.address64.Low);
4324 ddi_put32(acc_hdl, &sge->Address.High,
4325 dmap->addr.address64.High);
4326 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4327 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4328 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4329 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4330 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4331 MPI2_SGE_FLAGS_SHIFT);
4332
4333 /*
4334 * If this is the last SGE of this frame
4335 * we set the end of list flag
4336 */
4337 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4338 flags |= ((uint32_t)
4339 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4340 MPI2_SGE_FLAGS_SHIFT);
4341 }
4342 if (cmd->cmd_flags & CFLAG_DMASEND) {
4343 flags |=
4344 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4345 MPI2_SGE_FLAGS_SHIFT);
4346 } else {
4347 flags |=
4348 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4349 MPI2_SGE_FLAGS_SHIFT);
4350 }
4351 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4352 dmap++;
4353 sge++;
4354 }
4355
4356 /*
4357 * Fill in the chain element in the main frame.
4358 * About calculation on ChainOffset:
4359 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4360 * in the end reserved for SGL element storage
4361 * (MPI2_SGE_IO_UNION); we should count it in our
4362 * calculation. See its definition in the header file.
4363 * 2. Constant j is the counter of the current SGL element
4364 * that will be processed, and (j - 1) is the number of
4365 * SGL elements that have been processed (stored in the
4366 * main frame).
4367 * 3. ChainOffset value should be in units of double-words (4
4368 * bytes) so the last value should be divided by 4.
4369 */
4370 ddi_put8(acc_hdl, &frame->ChainOffset,
4371 (sizeof (MPI2_SCSI_IO_REQUEST) -
4372 sizeof (MPI2_SGE_IO_UNION) +
4373 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4374 sgechain = (pMpi2SGEChain64_t)sge;
4375 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4376 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4377 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4378 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4379
4380 /*
4381 * The size of the next frame is the accurate size of space
4382 * (in bytes) used to store the SGL elements. j is the counter
4383 * of SGL elements. (j - 1) is the number of SGL elements that
4384 * have been processed (stored in frames).
4385 */
4386 if (frames >= 2) {
4387 chainlength = mpt->m_req_frame_size /
4388 sizeof (MPI2_SGE_SIMPLE64) *
4389 sizeof (MPI2_SGE_SIMPLE64);
4390 } else {
4391 chainlength = ((cookiec - (j - 1)) *
4392 sizeof (MPI2_SGE_SIMPLE64));
4393 }
4394
4395 p = cmd->cmd_extra_frames;
4396
4397 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4398 ddi_put32(acc_hdl, &sgechain->Address.Low,
4399 p->m_phys_addr);
4400 /* SGL is allocated in the first 4G mem range */
4401 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4402
4403 /*
4404 * If there are more than 2 frames left we have to
4405 * fill in the next chain offset to the location of
4406 * the chain element in the next frame.
4407 * sgemax is the number of simple elements in an extra
4408 * frame. Note that the value NextChainOffset should be
4409 * in double-words (4 bytes).
4410 */
4411 if (frames >= 2) {
4412 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4413 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4414 } else {
4415 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4416 }
4417
4418 /*
4419 * Jump to next frame;
4420 * Starting here, chain buffers go into the per command SGL.
4421 * This buffer is allocated when chain buffers are needed.
4422 */
4423 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4424 i = cookiec;
4425
4426 /*
4427 * Start filling in frames with SGE's. If we
4428 * reach the end of frame and still have SGE's
4429 * to fill we need to add a chain element and
4430 * use another frame. j will be our counter
4431 * for what cookie we are at and i will be
4432 * the total cookiec. k is the current frame
4433 */
4434 for (k = 1; k <= frames; k++) {
4435 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4436
4437 /*
4438 * If we have reached the end of frame
4439 * and we have more SGE's to fill in
4440 * we have to fill the final entry
4441 * with a chain element and then
4442 * continue to the next frame
4443 */
4444 if ((l == (sgemax + 1)) && (k != frames)) {
4445 sgechain = (pMpi2SGEChain64_t)sge;
4446 j--;
4447 chainflags = (
4448 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4449 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4450 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4451 ddi_put8(p->m_acc_hdl,
4452 &sgechain->Flags, chainflags);
4453 /*
4454 * k is the frame counter and (k + 1)
4455 * is the number of the next frame.
4456 * Note that frames are in contiguous
4457 * memory space.
4458 */
4459 ddi_put32(p->m_acc_hdl,
4460 &sgechain->Address.Low,
4461 (p->m_phys_addr +
4462 (mpt->m_req_frame_size * k)));
4463 ddi_put32(p->m_acc_hdl,
4464 &sgechain->Address.High, 0);
4465
4466 /*
4467 * If there are more than 2 frames left
4468 * we have to next chain offset to
4469 * the location of the chain element
4470 * in the next frame and fill in the
4471 * length of the next chain
4472 */
4473 if ((frames - k) >= 2) {
4474 ddi_put8(p->m_acc_hdl,
4475 &sgechain->NextChainOffset,
4476 (sgemax *
4477 sizeof (MPI2_SGE_SIMPLE64))
4478 >> 2);
4479 ddi_put16(p->m_acc_hdl,
4480 &sgechain->Length,
4481 mpt->m_req_frame_size /
4482 sizeof (MPI2_SGE_SIMPLE64) *
4483 sizeof (MPI2_SGE_SIMPLE64));
4484 } else {
4485 /*
4486 * This is the last frame. Set
4487 * the NextChainOffset to 0 and
4488 * Length is the total size of
4489 * all remaining simple elements
4490 */
4491 ddi_put8(p->m_acc_hdl,
4492 &sgechain->NextChainOffset,
4493 0);
4494 ddi_put16(p->m_acc_hdl,
4495 &sgechain->Length,
4496 (cookiec - j) *
4497 sizeof (MPI2_SGE_SIMPLE64));
4498 }
4499
4500 /* Jump to the next frame */
4501 sge = (pMpi2SGESimple64_t)
4502 ((char *)p->m_frames_addr +
4503 (int)mpt->m_req_frame_size * k);
4504
4505 continue;
4506 }
4507
4508 ddi_put32(p->m_acc_hdl,
4509 &sge->Address.Low,
4510 dmap->addr.address64.Low);
4511 ddi_put32(p->m_acc_hdl,
4512 &sge->Address.High,
4513 dmap->addr.address64.High);
4514 ddi_put32(p->m_acc_hdl,
4515 &sge->FlagsLength, dmap->count);
4516 flags = ddi_get32(p->m_acc_hdl,
4517 &sge->FlagsLength);
4518 flags |= ((uint32_t)(
4519 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4520 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4521 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4522 MPI2_SGE_FLAGS_SHIFT);
4523
4524 /*
4525 * If we are at the end of the frame and
4526 * there is another frame to fill in
4527 * we set the last simple element as last
4528 * element
4529 */
4530 if ((l == sgemax) && (k != frames)) {
4531 flags |= ((uint32_t)
4532 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4533 MPI2_SGE_FLAGS_SHIFT);
4534 }
4535
4536 /*
4537 * If this is the final cookie we
4538 * indicate it by setting the flags
4539 */
4540 if (j == i) {
4541 flags |= ((uint32_t)
4542 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4543 MPI2_SGE_FLAGS_END_OF_BUFFER |
4544 MPI2_SGE_FLAGS_END_OF_LIST) <<
4545 MPI2_SGE_FLAGS_SHIFT);
4546 }
4547 if (cmd->cmd_flags & CFLAG_DMASEND) {
4548 flags |=
4549 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4550 MPI2_SGE_FLAGS_SHIFT);
4551 } else {
4552 flags |=
4553 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4554 MPI2_SGE_FLAGS_SHIFT);
4555 }
4556 ddi_put32(p->m_acc_hdl,
4557 &sge->FlagsLength, flags);
4558 dmap++;
4559 sge++;
4560 }
4561 }
4562
4563 /*
4564 * Sync DMA with the chain buffers that were just created
4565 */
4566 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4567 }
4568 }
4569
4570 /*
4571 * Interrupt handling
4572 * Utility routine. Poll for status of a command sent to HBA
4573 * without interrupts (a FLAG_NOINTR command).
4574 */
4575 int
4576 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4577 {
4578 int rval = TRUE;
4579
4580 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4581
4582 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4583 mptsas_restart_hba(mpt);
4584 }
4585
4586 /*
4587 * Wait, using drv_usecwait(), long enough for the command to
4588 * reasonably return from the target if the target isn't
4589 * "dead". A polled command may well be sent from scsi_poll, and
4590 * there are retries built in to scsi_poll if the transport
4591 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4592 * and retries the transport up to scsi_poll_busycnt times
4593 * (currently 60) if
4594 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4595 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4596 *
4597 * limit the waiting to avoid a hang in the event that the
4598 * cmd never gets started but we are still receiving interrupts
4599 */
4600 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4601 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4602 NDBG5(("mptsas_poll: command incomplete"));
4603 rval = FALSE;
4604 break;
4605 }
4606 }
4607
4608 if (rval == FALSE) {
4609
4610 /*
4611 * this isn't supposed to happen, the hba must be wedged
4612 * Mark this cmd as a timeout.
4613 */
4614 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4615 (STAT_TIMEOUT|STAT_ABORTED));
4616
4617 if (poll_cmd->cmd_queued == FALSE) {
4618
4619 NDBG5(("mptsas_poll: not on waitq"));
4620
4621 poll_cmd->cmd_pkt->pkt_state |=
4622 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4623 } else {
4624
4625 /* find and remove it from the waitq */
4626 NDBG5(("mptsas_poll: delete from waitq"));
4627 mptsas_waitq_delete(mpt, poll_cmd);
4628 }
4629
4630 }
4631 mptsas_fma_check(mpt, poll_cmd);
4632 NDBG5(("mptsas_poll: done"));
4633 return (rval);
4634 }
4635
4636 /*
4637 * Used for polling cmds and TM function
4638 */
4639 static int
4640 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4641 {
4642 int cnt;
4643 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4644 uint32_t int_mask;
4645
4646 NDBG5(("mptsas_wait_intr"));
4647
4648 mpt->m_polled_intr = 1;
4649
4650 /*
4651 * Get the current interrupt mask and disable interrupts. When
4652 * re-enabling ints, set mask to saved value.
4653 */
4654 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4655 MPTSAS_DISABLE_INTR(mpt);
4656
4657 /*
4658 * Keep polling for at least (polltime * 1000) seconds
4659 */
4660 for (cnt = 0; cnt < polltime; cnt++) {
4661 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4662 DDI_DMA_SYNC_FORCPU);
4663
4664 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4665 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4666
4667 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4668 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4669 ddi_get32(mpt->m_acc_post_queue_hdl,
4670 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4671 drv_usecwait(1000);
4672 continue;
4673 }
4674
4675 /*
4676 * The reply is valid, process it according to its
4677 * type.
4678 */
4679 mptsas_process_intr(mpt, reply_desc_union);
4680
4681 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4682 mpt->m_post_index = 0;
4683 }
4684
4685 /*
4686 * Update the global reply index
4687 */
4688 ddi_put32(mpt->m_datap,
4689 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4690 mpt->m_polled_intr = 0;
4691
4692 /*
4693 * Re-enable interrupts and quit.
4694 */
4695 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4696 int_mask);
4697 return (TRUE);
4698
4699 }
4700
4701 /*
4702 * Clear polling flag, re-enable interrupts and quit.
4703 */
4704 mpt->m_polled_intr = 0;
4705 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4706 return (FALSE);
4707 }
4708
4709 static void
4710 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4711 pMpi2ReplyDescriptorsUnion_t reply_desc)
4712 {
4713 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4714 uint16_t SMID;
4715 mptsas_slots_t *slots = mpt->m_active;
4716 mptsas_cmd_t *cmd = NULL;
4717 struct scsi_pkt *pkt;
4718
4719 ASSERT(mutex_owned(&mpt->m_mutex));
4720
4721 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4722 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4723
4724 /*
4725 * This is a success reply so just complete the IO. First, do a sanity
4726 * check on the SMID. The final slot is used for TM requests, which
4727 * would not come into this reply handler.
4728 */
4729 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
4730 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4731 SMID);
4732 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4733 return;
4734 }
4735
4736 cmd = slots->m_slot[SMID];
4737
4738 /*
4739 * print warning and return if the slot is empty
4740 */
4741 if (cmd == NULL) {
4742 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4743 "in slot %d", SMID);
4744 return;
4745 }
4746
4747 pkt = CMD2PKT(cmd);
4748 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4749 STATE_GOT_STATUS);
4750 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4751 pkt->pkt_state |= STATE_XFERRED_DATA;
4752 }
4753 pkt->pkt_resid = 0;
4754
4755 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4756 cmd->cmd_flags |= CFLAG_FINISHED;
4757 cv_broadcast(&mpt->m_passthru_cv);
4758 return;
4759 } else {
4760 mptsas_remove_cmd(mpt, cmd);
4761 }
4762
4763 if (cmd->cmd_flags & CFLAG_RETRY) {
4764 /*
4765 * The target returned QFULL or busy, do not add tihs
4766 * pkt to the doneq since the hba will retry
4767 * this cmd.
4768 *
4769 * The pkt has already been resubmitted in
4770 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4771 * Remove this cmd_flag here.
4772 */
4773 cmd->cmd_flags &= ~CFLAG_RETRY;
4774 } else {
4775 mptsas_doneq_add(mpt, cmd);
4776 }
4777 }
4778
4779 static void
4780 mptsas_handle_address_reply(mptsas_t *mpt,
4781 pMpi2ReplyDescriptorsUnion_t reply_desc)
4782 {
4783 pMpi2AddressReplyDescriptor_t address_reply;
4784 pMPI2DefaultReply_t reply;
4785 mptsas_fw_diagnostic_buffer_t *pBuffer;
4786 uint32_t reply_addr;
4787 uint16_t SMID, iocstatus;
4788 mptsas_slots_t *slots = mpt->m_active;
4789 mptsas_cmd_t *cmd = NULL;
4790 uint8_t function, buffer_type;
4791 m_replyh_arg_t *args;
4792 int reply_frame_no;
4793
4794 ASSERT(mutex_owned(&mpt->m_mutex));
4795
4796 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4797 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4798 &address_reply->ReplyFrameAddress);
4799 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4800
4801 /*
4802 * If reply frame is not in the proper range we should ignore this
4803 * message and exit the interrupt handler.
4804 */
4805 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4806 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4807 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4808 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4809 mpt->m_reply_frame_size != 0)) {
4810 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4811 "address 0x%x\n", reply_addr);
4812 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4813 return;
4814 }
4815
4816 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4817 DDI_DMA_SYNC_FORCPU);
4818 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4819 mpt->m_reply_frame_dma_addr));
4820 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4821
4822 /*
4823 * don't get slot information and command for events since these values
4824 * don't exist
4825 */
4826 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4827 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4828 /*
4829 * This could be a TM reply, which use the last allocated SMID,
4830 * so allow for that.
4831 */
4832 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
4833 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4834 "%d\n", SMID);
4835 ddi_fm_service_impact(mpt->m_dip,
4836 DDI_SERVICE_UNAFFECTED);
4837 return;
4838 }
4839
4840 cmd = slots->m_slot[SMID];
4841
4842 /*
4843 * print warning and return if the slot is empty
4844 */
4845 if (cmd == NULL) {
4846 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4847 "reply in slot %d", SMID);
4848 return;
4849 }
4850 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4851 (cmd->cmd_flags & CFLAG_CONFIG) ||
4852 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4853 cmd->cmd_rfm = reply_addr;
4854 cmd->cmd_flags |= CFLAG_FINISHED;
4855 cv_broadcast(&mpt->m_passthru_cv);
4856 cv_broadcast(&mpt->m_config_cv);
4857 cv_broadcast(&mpt->m_fw_diag_cv);
4858 return;
4859 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4860 mptsas_remove_cmd(mpt, cmd);
4861 }
4862 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4863 }
4864 /*
4865 * Depending on the function, we need to handle
4866 * the reply frame (and cmd) differently.
4867 */
4868 switch (function) {
4869 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4870 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4871 break;
4872 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4873 cmd->cmd_rfm = reply_addr;
4874 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4875 cmd);
4876 break;
4877 case MPI2_FUNCTION_FW_DOWNLOAD:
4878 cmd->cmd_flags |= CFLAG_FINISHED;
4879 cv_signal(&mpt->m_fw_cv);
4880 break;
4881 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4882 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4883 mpt->m_reply_frame_size;
4884 args = &mpt->m_replyh_args[reply_frame_no];
4885 args->mpt = (void *)mpt;
4886 args->rfm = reply_addr;
4887
4888 /*
4889 * Record the event if its type is enabled in
4890 * this mpt instance by ioctl.
4891 */
4892 mptsas_record_event(args);
4893
4894 /*
4895 * Handle time critical events
4896 * NOT_RESPONDING/ADDED only now
4897 */
4898 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4899 /*
4900 * Would not return main process,
4901 * just let taskq resolve ack action
4902 * and ack would be sent in taskq thread
4903 */
4904 NDBG20(("send mptsas_handle_event_sync success"));
4905 }
4906
4907 if (mpt->m_in_reset) {
4908 NDBG20(("dropping event received during reset"));
4909 return;
4910 }
4911
4912 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4913 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4914 mptsas_log(mpt, CE_WARN, "No memory available"
4915 "for dispatch taskq");
4916 /*
4917 * Return the reply frame to the free queue.
4918 */
4919 ddi_put32(mpt->m_acc_free_queue_hdl,
4920 &((uint32_t *)(void *)
4921 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4922 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4923 DDI_DMA_SYNC_FORDEV);
4924 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4925 mpt->m_free_index = 0;
4926 }
4927
4928 ddi_put32(mpt->m_datap,
4929 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4930 }
4931 return;
4932 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4933 /*
4934 * If SMID is 0, this implies that the reply is due to a
4935 * release function with a status that the buffer has been
4936 * released. Set the buffer flags accordingly.
4937 */
4938 if (SMID == 0) {
4939 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4940 &reply->IOCStatus);
4941 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4942 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4943 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4944 pBuffer =
4945 &mpt->m_fw_diag_buffer_list[buffer_type];
4946 pBuffer->valid_data = TRUE;
4947 pBuffer->owned_by_firmware = FALSE;
4948 pBuffer->immediate = FALSE;
4949 }
4950 } else {
4951 /*
4952 * Normal handling of diag post reply with SMID.
4953 */
4954 cmd = slots->m_slot[SMID];
4955
4956 /*
4957 * print warning and return if the slot is empty
4958 */
4959 if (cmd == NULL) {
4960 mptsas_log(mpt, CE_WARN, "?NULL command for "
4961 "address reply in slot %d", SMID);
4962 return;
4963 }
4964 cmd->cmd_rfm = reply_addr;
4965 cmd->cmd_flags |= CFLAG_FINISHED;
4966 cv_broadcast(&mpt->m_fw_diag_cv);
4967 }
4968 return;
4969 default:
4970 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4971 break;
4972 }
4973
4974 /*
4975 * Return the reply frame to the free queue.
4976 */
4977 ddi_put32(mpt->m_acc_free_queue_hdl,
4978 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4979 reply_addr);
4980 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4981 DDI_DMA_SYNC_FORDEV);
4982 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4983 mpt->m_free_index = 0;
4984 }
4985 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4986 mpt->m_free_index);
4987
4988 if (cmd->cmd_flags & CFLAG_FW_CMD)
4989 return;
4990
4991 if (cmd->cmd_flags & CFLAG_RETRY) {
4992 /*
4993 * The target returned QFULL or busy, do not add tihs
4994 * pkt to the doneq since the hba will retry
4995 * this cmd.
4996 *
4997 * The pkt has already been resubmitted in
4998 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4999 * Remove this cmd_flag here.
5000 */
5001 cmd->cmd_flags &= ~CFLAG_RETRY;
5002 } else {
5003 mptsas_doneq_add(mpt, cmd);
5004 }
5005 }
5006
5007 static void
5008 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5009 mptsas_cmd_t *cmd)
5010 {
5011 uint8_t scsi_status, scsi_state;
5012 uint16_t ioc_status;
5013 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5014 struct scsi_pkt *pkt;
5015 struct scsi_arq_status *arqstat;
5016 struct buf *bp;
5017 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5018 uint8_t *sensedata = NULL;
5019
5020 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5021 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5022 bp = cmd->cmd_ext_arq_buf;
5023 } else {
5024 bp = cmd->cmd_arq_buf;
5025 }
5026
5027 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5028 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5029 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5030 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5031 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5032 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5033 &reply->ResponseInfo);
5034
5035 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5036 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5037 &reply->IOCLogInfo);
5038 mptsas_log(mpt, CE_NOTE,
5039 "?Log info 0x%x received for target %d.\n"
5040 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5041 loginfo, Tgt(cmd), scsi_status, ioc_status,
5042 scsi_state);
5043 }
5044
5045 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5046 scsi_status, ioc_status, scsi_state));
5047
5048 pkt = CMD2PKT(cmd);
5049 *(pkt->pkt_scbp) = scsi_status;
5050
5051 if (loginfo == 0x31170000) {
5052 /*
5053 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5054 * 0x31170000 comes, that means the device missing delay
5055 * is in progressing, the command need retry later.
5056 */
5057 *(pkt->pkt_scbp) = STATUS_BUSY;
5058 return;
5059 }
5060
5061 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5062 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5063 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5064 pkt->pkt_reason = CMD_INCOMPLETE;
5065 pkt->pkt_state |= STATE_GOT_BUS;
5066 if (ptgt->m_reset_delay == 0) {
5067 mptsas_set_throttle(mpt, ptgt,
5068 DRAIN_THROTTLE);
5069 }
5070 return;
5071 }
5072
5073 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5074 responsedata &= 0x000000FF;
5075 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5076 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5077 pkt->pkt_reason = CMD_TLR_OFF;
5078 return;
5079 }
5080 }
5081
5082
5083 switch (scsi_status) {
5084 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5085 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5086 arqstat = (void*)(pkt->pkt_scbp);
5087 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5088 (pkt->pkt_scbp));
5089 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5090 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5091 if (cmd->cmd_flags & CFLAG_XARQ) {
5092 pkt->pkt_state |= STATE_XARQ_DONE;
5093 }
5094 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5095 pkt->pkt_state |= STATE_XFERRED_DATA;
5096 }
5097 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5098 arqstat->sts_rqpkt_state = pkt->pkt_state;
5099 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5100 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5101 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5102
5103 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5104 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5105 cmd->cmd_rqslen));
5106 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5107 cmd->cmd_flags |= CFLAG_CMDARQ;
5108 /*
5109 * Set proper status for pkt if autosense was valid
5110 */
5111 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5112 struct scsi_status zero_status = { 0 };
5113 arqstat->sts_rqpkt_status = zero_status;
5114 }
5115
5116 /*
5117 * ASC=0x47 is parity error
5118 * ASC=0x48 is initiator detected error received
5119 */
5120 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5121 ((scsi_sense_asc(sensedata) == 0x47) ||
5122 (scsi_sense_asc(sensedata) == 0x48))) {
5123 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5124 }
5125
5126 /*
5127 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5128 * ASC/ASCQ=0x25/0x00 means invalid lun
5129 */
5130 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5131 (scsi_sense_asc(sensedata) == 0x3F) &&
5132 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5133 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5134 (scsi_sense_asc(sensedata) == 0x25) &&
5135 (scsi_sense_ascq(sensedata) == 0x00))) {
5136 mptsas_topo_change_list_t *topo_node = NULL;
5137
5138 topo_node = kmem_zalloc(
5139 sizeof (mptsas_topo_change_list_t),
5140 KM_NOSLEEP);
5141 if (topo_node == NULL) {
5142 mptsas_log(mpt, CE_NOTE, "No memory"
5143 "resource for handle SAS dynamic"
5144 "reconfigure.\n");
5145 break;
5146 }
5147 topo_node->mpt = mpt;
5148 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5149 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5150 topo_node->devhdl = ptgt->m_devhdl;
5151 topo_node->object = (void *)ptgt;
5152 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5153
5154 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5155 mptsas_handle_dr,
5156 (void *)topo_node,
5157 DDI_NOSLEEP)) != DDI_SUCCESS) {
5158 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5159 "for handle SAS dynamic reconfigure"
5160 "failed. \n");
5161 }
5162 }
5163 break;
5164 case MPI2_SCSI_STATUS_GOOD:
5165 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5166 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5167 pkt->pkt_reason = CMD_DEV_GONE;
5168 pkt->pkt_state |= STATE_GOT_BUS;
5169 if (ptgt->m_reset_delay == 0) {
5170 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5171 }
5172 NDBG31(("lost disk for target%d, command:%x",
5173 Tgt(cmd), pkt->pkt_cdbp[0]));
5174 break;
5175 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5176 NDBG31(("data overrun: xferred=%d", xferred));
5177 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5178 pkt->pkt_reason = CMD_DATA_OVR;
5179 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5180 | STATE_SENT_CMD | STATE_GOT_STATUS
5181 | STATE_XFERRED_DATA);
5182 pkt->pkt_resid = 0;
5183 break;
5184 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5185 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5186 NDBG31(("data underrun: xferred=%d", xferred));
5187 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5188 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5189 | STATE_SENT_CMD | STATE_GOT_STATUS);
5190 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5191 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5192 pkt->pkt_state |= STATE_XFERRED_DATA;
5193 }
5194 break;
5195 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5196 mptsas_set_pkt_reason(mpt,
5197 cmd, CMD_RESET, STAT_BUS_RESET);
5198 break;
5199 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5200 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5201 mptsas_set_pkt_reason(mpt,
5202 cmd, CMD_RESET, STAT_DEV_RESET);
5203 break;
5204 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5205 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5206 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5207 mptsas_set_pkt_reason(mpt,
5208 cmd, CMD_TERMINATED, STAT_TERMINATED);
5209 break;
5210 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5211 case MPI2_IOCSTATUS_BUSY:
5212 /*
5213 * set throttles to drain
5214 */
5215 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5216 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5217 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5218 }
5219
5220 /*
5221 * retry command
5222 */
5223 cmd->cmd_flags |= CFLAG_RETRY;
5224 cmd->cmd_pkt_flags |= FLAG_HEAD;
5225
5226 (void) mptsas_accept_pkt(mpt, cmd);
5227 break;
5228 default:
5229 mptsas_log(mpt, CE_WARN,
5230 "unknown ioc_status = %x\n", ioc_status);
5231 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5232 "count = %x, scsi_status = %x", scsi_state,
5233 xferred, scsi_status);
5234 break;
5235 }
5236 break;
5237 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5238 mptsas_handle_qfull(mpt, cmd);
5239 break;
5240 case MPI2_SCSI_STATUS_BUSY:
5241 NDBG31(("scsi_status busy received"));
5242 break;
5243 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5244 NDBG31(("scsi_status reservation conflict received"));
5245 break;
5246 default:
5247 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5248 scsi_status, ioc_status);
5249 mptsas_log(mpt, CE_WARN,
5250 "mptsas_process_intr: invalid scsi status\n");
5251 break;
5252 }
5253 }
5254
5255 static void
5256 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5257 mptsas_cmd_t *cmd)
5258 {
5259 uint8_t task_type;
5260 uint16_t ioc_status;
5261 uint32_t log_info;
5262 uint16_t dev_handle;
5263 struct scsi_pkt *pkt = CMD2PKT(cmd);
5264
5265 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5266 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5267 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5268 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5269
5270 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5271 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5272 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5273 task_type, ioc_status, log_info, dev_handle);
5274 pkt->pkt_reason = CMD_INCOMPLETE;
5275 return;
5276 }
5277
5278 switch (task_type) {
5279 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5280 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5281 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5282 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5283 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5284 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5285 break;
5286 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5287 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5288 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5289 /*
5290 * Check for invalid DevHandle of 0 in case application
5291 * sends bad command. DevHandle of 0 could cause problems.
5292 */
5293 if (dev_handle == 0) {
5294 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5295 " DevHandle of 0.");
5296 } else {
5297 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5298 task_type);
5299 }
5300 break;
5301 default:
5302 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5303 task_type);
5304 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5305 break;
5306 }
5307 }
5308
5309 static void
5310 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5311 {
5312 mptsas_t *mpt = arg->mpt;
5313 uint64_t t = arg->t;
5314 mptsas_cmd_t *cmd;
5315 struct scsi_pkt *pkt;
5316 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5317
5318 mutex_enter(&item->mutex);
5319 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5320 if (!item->doneq) {
5321 cv_wait(&item->cv, &item->mutex);
5322 }
5323 pkt = NULL;
5324 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5325 cmd->cmd_flags |= CFLAG_COMPLETED;
5326 pkt = CMD2PKT(cmd);
5327 }
5328 mutex_exit(&item->mutex);
5329 if (pkt) {
5330 mptsas_pkt_comp(pkt, cmd);
5331 }
5332 mutex_enter(&item->mutex);
5333 }
5334 mutex_exit(&item->mutex);
5335 mutex_enter(&mpt->m_doneq_mutex);
5336 mpt->m_doneq_thread_n--;
5337 cv_broadcast(&mpt->m_doneq_thread_cv);
5338 mutex_exit(&mpt->m_doneq_mutex);
5339 }
5340
5341
5342 /*
5343 * mpt interrupt handler.
5344 */
5345 static uint_t
5346 mptsas_intr(caddr_t arg1, caddr_t arg2)
5347 {
5348 mptsas_t *mpt = (void *)arg1;
5349 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5350 uchar_t did_reply = FALSE;
5351
5352 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5353
5354 mutex_enter(&mpt->m_mutex);
5355
5356 /*
5357 * If interrupts are shared by two channels then check whether this
5358 * interrupt is genuinely for this channel by making sure first the
5359 * chip is in high power state.
5360 */
5361 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5362 (mpt->m_power_level != PM_LEVEL_D0)) {
5363 mutex_exit(&mpt->m_mutex);
5364 return (DDI_INTR_UNCLAIMED);
5365 }
5366
5367 /*
5368 * If polling, interrupt was triggered by some shared interrupt because
5369 * IOC interrupts are disabled during polling, so polling routine will
5370 * handle any replies. Considering this, if polling is happening,
5371 * return with interrupt unclaimed.
5372 */
5373 if (mpt->m_polled_intr) {
5374 mutex_exit(&mpt->m_mutex);
5375 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5376 return (DDI_INTR_UNCLAIMED);
5377 }
5378
5379 /*
5380 * Read the istat register.
5381 */
5382 if ((INTPENDING(mpt)) != 0) {
5383 /*
5384 * read fifo until empty.
5385 */
5386 #ifndef __lock_lint
5387 _NOTE(CONSTCOND)
5388 #endif
5389 while (TRUE) {
5390 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5391 DDI_DMA_SYNC_FORCPU);
5392 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5393 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5394
5395 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5396 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5397 ddi_get32(mpt->m_acc_post_queue_hdl,
5398 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5399 break;
5400 }
5401
5402 /*
5403 * The reply is valid, process it according to its
5404 * type. Also, set a flag for updating the reply index
5405 * after they've all been processed.
5406 */
5407 did_reply = TRUE;
5408
5409 mptsas_process_intr(mpt, reply_desc_union);
5410
5411 /*
5412 * Increment post index and roll over if needed.
5413 */
5414 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5415 mpt->m_post_index = 0;
5416 }
5417 }
5418
5419 /*
5420 * Update the global reply index if at least one reply was
5421 * processed.
5422 */
5423 if (did_reply) {
5424 ddi_put32(mpt->m_datap,
5425 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5426 }
5427 } else {
5428 mutex_exit(&mpt->m_mutex);
5429 return (DDI_INTR_UNCLAIMED);
5430 }
5431 NDBG1(("mptsas_intr complete"));
5432
5433 /*
5434 * If no helper threads are created, process the doneq in ISR. If
5435 * helpers are created, use the doneq length as a metric to measure the
5436 * load on the interrupt CPU. If it is long enough, which indicates the
5437 * load is heavy, then we deliver the IO completions to the helpers.
5438 * This measurement has some limitations, although it is simple and
5439 * straightforward and works well for most of the cases at present.
5440 */
5441 if (!mpt->m_doneq_thread_n ||
5442 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5443 mptsas_doneq_empty(mpt);
5444 } else {
5445 mptsas_deliver_doneq_thread(mpt);
5446 }
5447
5448 /*
5449 * If there are queued cmd, start them now.
5450 */
5451 if (mpt->m_waitq != NULL) {
5452 mptsas_restart_waitq(mpt);
5453 }
5454
5455 mutex_exit(&mpt->m_mutex);
5456 return (DDI_INTR_CLAIMED);
5457 }
5458
5459 static void
5460 mptsas_process_intr(mptsas_t *mpt,
5461 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5462 {
5463 uint8_t reply_type;
5464
5465 ASSERT(mutex_owned(&mpt->m_mutex));
5466
5467 /*
5468 * The reply is valid, process it according to its
5469 * type. Also, set a flag for updated the reply index
5470 * after they've all been processed.
5471 */
5472 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5473 &reply_desc_union->Default.ReplyFlags);
5474 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5475 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5476 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5477 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5478 mptsas_handle_address_reply(mpt, reply_desc_union);
5479 } else {
5480 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5481 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5482 }
5483
5484 /*
5485 * Clear the reply descriptor for re-use and increment
5486 * index.
5487 */
5488 ddi_put64(mpt->m_acc_post_queue_hdl,
5489 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5490 0xFFFFFFFFFFFFFFFF);
5491 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5492 DDI_DMA_SYNC_FORDEV);
5493 }
5494
5495 /*
5496 * handle qfull condition
5497 */
5498 static void
5499 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5500 {
5501 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5502
5503 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5504 (ptgt->m_qfull_retries == 0)) {
5505 /*
5506 * We have exhausted the retries on QFULL, or,
5507 * the target driver has indicated that it
5508 * wants to handle QFULL itself by setting
5509 * qfull-retries capability to 0. In either case
5510 * we want the target driver's QFULL handling
5511 * to kick in. We do this by having pkt_reason
5512 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5513 */
5514 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5515 } else {
5516 if (ptgt->m_reset_delay == 0) {
5517 ptgt->m_t_throttle =
5518 max((ptgt->m_t_ncmds - 2), 0);
5519 }
5520
5521 cmd->cmd_pkt_flags |= FLAG_HEAD;
5522 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5523 cmd->cmd_flags |= CFLAG_RETRY;
5524
5525 (void) mptsas_accept_pkt(mpt, cmd);
5526
5527 /*
5528 * when target gives queue full status with no commands
5529 * outstanding (m_t_ncmds == 0), throttle is set to 0
5530 * (HOLD_THROTTLE), and the queue full handling start
5531 * (see psarc/1994/313); if there are commands outstanding,
5532 * throttle is set to (m_t_ncmds - 2)
5533 */
5534 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5535 /*
5536 * By setting throttle to QFULL_THROTTLE, we
5537 * avoid submitting new commands and in
5538 * mptsas_restart_cmd find out slots which need
5539 * their throttles to be cleared.
5540 */
5541 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5542 if (mpt->m_restart_cmd_timeid == 0) {
5543 mpt->m_restart_cmd_timeid =
5544 timeout(mptsas_restart_cmd, mpt,
5545 ptgt->m_qfull_retry_interval);
5546 }
5547 }
5548 }
5549 }
5550
5551 mptsas_phymask_t
5552 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5553 {
5554 mptsas_phymask_t phy_mask = 0;
5555 uint8_t i = 0;
5556
5557 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5558
5559 ASSERT(mutex_owned(&mpt->m_mutex));
5560
5561 /*
5562 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5563 */
5564 if (physport == 0xFF) {
5565 return (0);
5566 }
5567
5568 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5569 if (mpt->m_phy_info[i].attached_devhdl &&
5570 (mpt->m_phy_info[i].phy_mask != 0) &&
5571 (mpt->m_phy_info[i].port_num == physport)) {
5572 phy_mask = mpt->m_phy_info[i].phy_mask;
5573 break;
5574 }
5575 }
5576 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5577 mpt->m_instance, physport, phy_mask));
5578 return (phy_mask);
5579 }
5580
5581 /*
5582 * mpt free device handle after device gone, by use of passthrough
5583 */
5584 static int
5585 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5586 {
5587 Mpi2SasIoUnitControlRequest_t req;
5588 Mpi2SasIoUnitControlReply_t rep;
5589 int ret;
5590
5591 ASSERT(mutex_owned(&mpt->m_mutex));
5592
5593 /*
5594 * Need to compose a SAS IO Unit Control request message
5595 * and call mptsas_do_passthru() function
5596 */
5597 bzero(&req, sizeof (req));
5598 bzero(&rep, sizeof (rep));
5599
5600 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5601 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5602 req.DevHandle = LE_16(devhdl);
5603
5604 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5605 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5606 if (ret != 0) {
5607 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5608 "Control error %d", ret);
5609 return (DDI_FAILURE);
5610 }
5611
5612 /* do passthrough success, check the ioc status */
5613 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5614 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5615 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5616 return (DDI_FAILURE);
5617 }
5618
5619 return (DDI_SUCCESS);
5620 }
5621
5622 static void
5623 mptsas_update_phymask(mptsas_t *mpt)
5624 {
5625 mptsas_phymask_t mask = 0, phy_mask;
5626 char *phy_mask_name;
5627 uint8_t current_port;
5628 int i, j;
5629
5630 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5631
5632 ASSERT(mutex_owned(&mpt->m_mutex));
5633
5634 (void) mptsas_get_sas_io_unit_page(mpt);
5635
5636 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5637
5638 for (i = 0; i < mpt->m_num_phys; i++) {
5639 phy_mask = 0x00;
5640
5641 if (mpt->m_phy_info[i].attached_devhdl == 0)
5642 continue;
5643
5644 bzero(phy_mask_name, sizeof (phy_mask_name));
5645
5646 current_port = mpt->m_phy_info[i].port_num;
5647
5648 if ((mask & (1 << i)) != 0)
5649 continue;
5650
5651 for (j = 0; j < mpt->m_num_phys; j++) {
5652 if (mpt->m_phy_info[j].attached_devhdl &&
5653 (mpt->m_phy_info[j].port_num == current_port)) {
5654 phy_mask |= (1 << j);
5655 }
5656 }
5657 mask = mask | phy_mask;
5658
5659 for (j = 0; j < mpt->m_num_phys; j++) {
5660 if ((phy_mask >> j) & 0x01) {
5661 mpt->m_phy_info[j].phy_mask = phy_mask;
5662 }
5663 }
5664
5665 (void) sprintf(phy_mask_name, "%x", phy_mask);
5666
5667 mutex_exit(&mpt->m_mutex);
5668 /*
5669 * register a iport, if the port has already been existed
5670 * SCSA will do nothing and just return.
5671 */
5672 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5673 mutex_enter(&mpt->m_mutex);
5674 }
5675 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5676 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5677 }
5678
5679 /*
5680 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5681 * 1. Directly attched Device Added/Removed.
5682 * 2. Expander Device Added/Removed.
5683 * 3. Indirectly Attached Device Added/Expander.
5684 * 4. LUNs of a existing device status change.
5685 * 5. RAID volume created/deleted.
5686 * 6. Member of RAID volume is released because of RAID deletion.
5687 * 7. Physical disks are removed because of RAID creation.
5688 */
5689 static void
5690 mptsas_handle_dr(void *args) {
5691 mptsas_topo_change_list_t *topo_node = NULL;
5692 mptsas_topo_change_list_t *save_node = NULL;
5693 mptsas_t *mpt;
5694 dev_info_t *parent = NULL;
5695 mptsas_phymask_t phymask = 0;
5696 char *phy_mask_name;
5697 uint8_t flags = 0, physport = 0xff;
5698 uint8_t port_update = 0;
5699 uint_t event;
5700
5701 topo_node = (mptsas_topo_change_list_t *)args;
5702
5703 mpt = topo_node->mpt;
5704 event = topo_node->event;
5705 flags = topo_node->flags;
5706
5707 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5708
5709 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5710
5711 switch (event) {
5712 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5713 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5714 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5715 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5716 /*
5717 * Direct attached or expander attached device added
5718 * into system or a Phys Disk that is being unhidden.
5719 */
5720 port_update = 1;
5721 }
5722 break;
5723 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5724 /*
5725 * New expander added into system, it must be the head
5726 * of topo_change_list_t
5727 */
5728 port_update = 1;
5729 break;
5730 default:
5731 port_update = 0;
5732 break;
5733 }
5734 /*
5735 * All cases port_update == 1 may cause initiator port form change
5736 */
5737 mutex_enter(&mpt->m_mutex);
5738 if (mpt->m_port_chng && port_update) {
5739 /*
5740 * mpt->m_port_chng flag indicates some PHYs of initiator
5741 * port have changed to online. So when expander added or
5742 * directly attached device online event come, we force to
5743 * update port information by issueing SAS IO Unit Page and
5744 * update PHYMASKs.
5745 */
5746 (void) mptsas_update_phymask(mpt);
5747 mpt->m_port_chng = 0;
5748
5749 }
5750 mutex_exit(&mpt->m_mutex);
5751 while (topo_node) {
5752 phymask = 0;
5753 if (parent == NULL) {
5754 physport = topo_node->un.physport;
5755 event = topo_node->event;
5756 flags = topo_node->flags;
5757 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5758 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5759 /*
5760 * For all offline events, phymask is known
5761 */
5762 phymask = topo_node->un.phymask;
5763 goto find_parent;
5764 }
5765 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5766 goto handle_topo_change;
5767 }
5768 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5769 phymask = topo_node->un.phymask;
5770 goto find_parent;
5771 }
5772
5773 if ((flags ==
5774 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5775 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5776 /*
5777 * There is no any field in IR_CONFIG_CHANGE
5778 * event indicate physport/phynum, let's get
5779 * parent after SAS Device Page0 request.
5780 */
5781 goto handle_topo_change;
5782 }
5783
5784 mutex_enter(&mpt->m_mutex);
5785 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5786 /*
5787 * If the direct attached device added or a
5788 * phys disk is being unhidden, argument
5789 * physport actually is PHY#, so we have to get
5790 * phymask according PHY#.
5791 */
5792 physport = mpt->m_phy_info[physport].port_num;
5793 }
5794
5795 /*
5796 * Translate physport to phymask so that we can search
5797 * parent dip.
5798 */
5799 phymask = mptsas_physport_to_phymask(mpt,
5800 physport);
5801 mutex_exit(&mpt->m_mutex);
5802
5803 find_parent:
5804 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5805 /*
5806 * For RAID topology change node, write the iport name
5807 * as v0.
5808 */
5809 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5810 (void) sprintf(phy_mask_name, "v0");
5811 } else {
5812 /*
5813 * phymask can bo 0 if the drive has been
5814 * pulled by the time an add event is
5815 * processed. If phymask is 0, just skip this
5816 * event and continue.
5817 */
5818 if (phymask == 0) {
5819 mutex_enter(&mpt->m_mutex);
5820 save_node = topo_node;
5821 topo_node = topo_node->next;
5822 ASSERT(save_node);
5823 kmem_free(save_node,
5824 sizeof (mptsas_topo_change_list_t));
5825 mutex_exit(&mpt->m_mutex);
5826
5827 parent = NULL;
5828 continue;
5829 }
5830 (void) sprintf(phy_mask_name, "%x", phymask);
5831 }
5832 parent = scsi_hba_iport_find(mpt->m_dip,
5833 phy_mask_name);
5834 if (parent == NULL) {
5835 mptsas_log(mpt, CE_WARN, "Failed to find an "
5836 "iport, should not happen!");
5837 goto out;
5838 }
5839
5840 }
5841 ASSERT(parent);
5842 handle_topo_change:
5843
5844 mutex_enter(&mpt->m_mutex);
5845 /*
5846 * If HBA is being reset, don't perform operations depending
5847 * on the IOC. We must free the topo list, however.
5848 */
5849 if (!mpt->m_in_reset)
5850 mptsas_handle_topo_change(topo_node, parent);
5851 else
5852 NDBG20(("skipping topo change received during reset"));
5853 save_node = topo_node;
5854 topo_node = topo_node->next;
5855 ASSERT(save_node);
5856 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5857 mutex_exit(&mpt->m_mutex);
5858
5859 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5860 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5861 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5862 /*
5863 * If direct attached device associated, make sure
5864 * reset the parent before start the next one. But
5865 * all devices associated with expander shares the
5866 * parent. Also, reset parent if this is for RAID.
5867 */
5868 parent = NULL;
5869 }
5870 }
5871 out:
5872 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5873 }
5874
5875 static void
5876 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5877 dev_info_t *parent)
5878 {
5879 mptsas_target_t *ptgt = NULL;
5880 mptsas_smp_t *psmp = NULL;
5881 mptsas_t *mpt = (void *)topo_node->mpt;
5882 uint16_t devhdl;
5883 uint16_t attached_devhdl;
5884 uint64_t sas_wwn = 0;
5885 int rval = 0;
5886 uint32_t page_address;
5887 uint8_t phy, flags;
5888 char *addr = NULL;
5889 dev_info_t *lundip;
5890 int circ = 0, circ1 = 0;
5891 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5892
5893 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5894
5895 ASSERT(mutex_owned(&mpt->m_mutex));
5896
5897 switch (topo_node->event) {
5898 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5899 {
5900 char *phy_mask_name;
5901 mptsas_phymask_t phymask = 0;
5902
5903 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5904 /*
5905 * Get latest RAID info.
5906 */
5907 (void) mptsas_get_raid_info(mpt);
5908 ptgt = refhash_linear_search(mpt->m_targets,
5909 mptsas_target_eval_devhdl, &topo_node->devhdl);
5910 if (ptgt == NULL)
5911 break;
5912 } else {
5913 ptgt = (void *)topo_node->object;
5914 }
5915
5916 if (ptgt == NULL) {
5917 /*
5918 * If a Phys Disk was deleted, RAID info needs to be
5919 * updated to reflect the new topology.
5920 */
5921 (void) mptsas_get_raid_info(mpt);
5922
5923 /*
5924 * Get sas device page 0 by DevHandle to make sure if
5925 * SSP/SATA end device exist.
5926 */
5927 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5928 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5929 topo_node->devhdl;
5930
5931 rval = mptsas_get_target_device_info(mpt, page_address,
5932 &devhdl, &ptgt);
5933 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5934 mptsas_log(mpt, CE_NOTE,
5935 "mptsas_handle_topo_change: target %d is "
5936 "not a SAS/SATA device. \n",
5937 topo_node->devhdl);
5938 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5939 mptsas_log(mpt, CE_NOTE,
5940 "mptsas_handle_topo_change: could not "
5941 "allocate memory. \n");
5942 }
5943 /*
5944 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5945 * else to do, just leave.
5946 */
5947 if (rval != DEV_INFO_SUCCESS) {
5948 return;
5949 }
5950 }
5951
5952 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5953
5954 mutex_exit(&mpt->m_mutex);
5955 flags = topo_node->flags;
5956
5957 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5958 phymask = ptgt->m_addr.mta_phymask;
5959 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5960 (void) sprintf(phy_mask_name, "%x", phymask);
5961 parent = scsi_hba_iport_find(mpt->m_dip,
5962 phy_mask_name);
5963 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5964 if (parent == NULL) {
5965 mptsas_log(mpt, CE_WARN, "Failed to find a "
5966 "iport for PD, should not happen!");
5967 mutex_enter(&mpt->m_mutex);
5968 break;
5969 }
5970 }
5971
5972 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5973 ndi_devi_enter(parent, &circ1);
5974 (void) mptsas_config_raid(parent, topo_node->devhdl,
5975 &lundip);
5976 ndi_devi_exit(parent, circ1);
5977 } else {
5978 /*
5979 * hold nexus for bus configure
5980 */
5981 ndi_devi_enter(scsi_vhci_dip, &circ);
5982 ndi_devi_enter(parent, &circ1);
5983 rval = mptsas_config_target(parent, ptgt);
5984 /*
5985 * release nexus for bus configure
5986 */
5987 ndi_devi_exit(parent, circ1);
5988 ndi_devi_exit(scsi_vhci_dip, circ);
5989
5990 /*
5991 * Add parent's props for SMHBA support
5992 */
5993 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5994 bzero(attached_wwnstr,
5995 sizeof (attached_wwnstr));
5996 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
5997 ptgt->m_addr.mta_wwn);
5998 if (ddi_prop_update_string(DDI_DEV_T_NONE,
5999 parent,
6000 SCSI_ADDR_PROP_ATTACHED_PORT,
6001 attached_wwnstr)
6002 != DDI_PROP_SUCCESS) {
6003 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6004 parent,
6005 SCSI_ADDR_PROP_ATTACHED_PORT);
6006 mptsas_log(mpt, CE_WARN, "Failed to"
6007 "attached-port props");
6008 return;
6009 }
6010 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6011 MPTSAS_NUM_PHYS, 1) !=
6012 DDI_PROP_SUCCESS) {
6013 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6014 parent, MPTSAS_NUM_PHYS);
6015 mptsas_log(mpt, CE_WARN, "Failed to"
6016 " create num-phys props");
6017 return;
6018 }
6019
6020 /*
6021 * Update PHY info for smhba
6022 */
6023 mutex_enter(&mpt->m_mutex);
6024 if (mptsas_smhba_phy_init(mpt)) {
6025 mutex_exit(&mpt->m_mutex);
6026 mptsas_log(mpt, CE_WARN, "mptsas phy"
6027 " update failed");
6028 return;
6029 }
6030 mutex_exit(&mpt->m_mutex);
6031
6032 /*
6033 * topo_node->un.physport is really the PHY#
6034 * for direct attached devices
6035 */
6036 mptsas_smhba_set_one_phy_props(mpt, parent,
6037 topo_node->un.physport, &attached_devhdl);
6038
6039 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6040 MPTSAS_VIRTUAL_PORT, 0) !=
6041 DDI_PROP_SUCCESS) {
6042 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6043 parent, MPTSAS_VIRTUAL_PORT);
6044 mptsas_log(mpt, CE_WARN,
6045 "mptsas virtual-port"
6046 "port prop update failed");
6047 return;
6048 }
6049 }
6050 }
6051 mutex_enter(&mpt->m_mutex);
6052
6053 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6054 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6055 ptgt->m_addr.mta_phymask));
6056 break;
6057 }
6058 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6059 {
6060 devhdl = topo_node->devhdl;
6061 ptgt = refhash_linear_search(mpt->m_targets,
6062 mptsas_target_eval_devhdl, &devhdl);
6063 if (ptgt == NULL)
6064 break;
6065
6066 sas_wwn = ptgt->m_addr.mta_wwn;
6067 phy = ptgt->m_phynum;
6068
6069 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6070
6071 if (sas_wwn) {
6072 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6073 } else {
6074 (void) sprintf(addr, "p%x", phy);
6075 }
6076 ASSERT(ptgt->m_devhdl == devhdl);
6077
6078 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6079 (topo_node->flags ==
6080 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6081 /*
6082 * Get latest RAID info if RAID volume status changes
6083 * or Phys Disk status changes
6084 */
6085 (void) mptsas_get_raid_info(mpt);
6086 }
6087 /*
6088 * Abort all outstanding command on the device
6089 */
6090 rval = mptsas_do_scsi_reset(mpt, devhdl);
6091 if (rval) {
6092 NDBG20(("mptsas%d handle_topo_change to reset target "
6093 "before offline devhdl:%x, phymask:%x, rval:%x",
6094 mpt->m_instance, ptgt->m_devhdl,
6095 ptgt->m_addr.mta_phymask, rval));
6096 }
6097
6098 mutex_exit(&mpt->m_mutex);
6099
6100 ndi_devi_enter(scsi_vhci_dip, &circ);
6101 ndi_devi_enter(parent, &circ1);
6102 rval = mptsas_offline_target(parent, addr);
6103 ndi_devi_exit(parent, circ1);
6104 ndi_devi_exit(scsi_vhci_dip, circ);
6105 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6106 "phymask:%x, rval:%x", mpt->m_instance,
6107 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6108
6109 kmem_free(addr, SCSI_MAXNAMELEN);
6110
6111 /*
6112 * Clear parent's props for SMHBA support
6113 */
6114 flags = topo_node->flags;
6115 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6116 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6117 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6118 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6119 DDI_PROP_SUCCESS) {
6120 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6121 SCSI_ADDR_PROP_ATTACHED_PORT);
6122 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6123 "prop update failed");
6124 break;
6125 }
6126 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6127 MPTSAS_NUM_PHYS, 0) !=
6128 DDI_PROP_SUCCESS) {
6129 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6130 MPTSAS_NUM_PHYS);
6131 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6132 "prop update failed");
6133 break;
6134 }
6135 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6136 MPTSAS_VIRTUAL_PORT, 1) !=
6137 DDI_PROP_SUCCESS) {
6138 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6139 MPTSAS_VIRTUAL_PORT);
6140 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6141 "prop update failed");
6142 break;
6143 }
6144 }
6145
6146 mutex_enter(&mpt->m_mutex);
6147 ptgt->m_led_status = 0;
6148 (void) mptsas_flush_led_status(mpt, ptgt);
6149 if (rval == DDI_SUCCESS) {
6150 refhash_remove(mpt->m_targets, ptgt);
6151 ptgt = NULL;
6152 } else {
6153 /*
6154 * clean DR_INTRANSITION flag to allow I/O down to
6155 * PHCI driver since failover finished.
6156 * Invalidate the devhdl
6157 */
6158 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6159 ptgt->m_tgt_unconfigured = 0;
6160 mutex_enter(&mpt->m_tx_waitq_mutex);
6161 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6162 mutex_exit(&mpt->m_tx_waitq_mutex);
6163 }
6164
6165 /*
6166 * Send SAS IO Unit Control to free the dev handle
6167 */
6168 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6169 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6170 rval = mptsas_free_devhdl(mpt, devhdl);
6171
6172 NDBG20(("mptsas%d handle_topo_change to remove "
6173 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6174 rval));
6175 }
6176
6177 break;
6178 }
6179 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6180 {
6181 devhdl = topo_node->devhdl;
6182 /*
6183 * If this is the remove handle event, do a reset first.
6184 */
6185 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6186 rval = mptsas_do_scsi_reset(mpt, devhdl);
6187 if (rval) {
6188 NDBG20(("mpt%d reset target before remove "
6189 "devhdl:%x, rval:%x", mpt->m_instance,
6190 devhdl, rval));
6191 }
6192 }
6193
6194 /*
6195 * Send SAS IO Unit Control to free the dev handle
6196 */
6197 rval = mptsas_free_devhdl(mpt, devhdl);
6198 NDBG20(("mptsas%d handle_topo_change to remove "
6199 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6200 rval));
6201 break;
6202 }
6203 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6204 {
6205 mptsas_smp_t smp;
6206 dev_info_t *smpdip;
6207
6208 devhdl = topo_node->devhdl;
6209
6210 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6211 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6212 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6213 if (rval != DDI_SUCCESS) {
6214 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6215 "handle %x", devhdl);
6216 return;
6217 }
6218
6219 psmp = mptsas_smp_alloc(mpt, &smp);
6220 if (psmp == NULL) {
6221 return;
6222 }
6223
6224 mutex_exit(&mpt->m_mutex);
6225 ndi_devi_enter(parent, &circ1);
6226 (void) mptsas_online_smp(parent, psmp, &smpdip);
6227 ndi_devi_exit(parent, circ1);
6228
6229 mutex_enter(&mpt->m_mutex);
6230 break;
6231 }
6232 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6233 {
6234 devhdl = topo_node->devhdl;
6235 uint32_t dev_info;
6236
6237 psmp = refhash_linear_search(mpt->m_smp_targets,
6238 mptsas_smp_eval_devhdl, &devhdl);
6239 if (psmp == NULL)
6240 break;
6241 /*
6242 * The mptsas_smp_t data is released only if the dip is offlined
6243 * successfully.
6244 */
6245 mutex_exit(&mpt->m_mutex);
6246
6247 ndi_devi_enter(parent, &circ1);
6248 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6249 ndi_devi_exit(parent, circ1);
6250
6251 dev_info = psmp->m_deviceinfo;
6252 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6253 DEVINFO_DIRECT_ATTACHED) {
6254 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6255 MPTSAS_VIRTUAL_PORT, 1) !=
6256 DDI_PROP_SUCCESS) {
6257 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6258 MPTSAS_VIRTUAL_PORT);
6259 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6260 "prop update failed");
6261 return;
6262 }
6263 /*
6264 * Check whether the smp connected to the iport,
6265 */
6266 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6267 MPTSAS_NUM_PHYS, 0) !=
6268 DDI_PROP_SUCCESS) {
6269 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6270 MPTSAS_NUM_PHYS);
6271 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6272 "prop update failed");
6273 return;
6274 }
6275 /*
6276 * Clear parent's attached-port props
6277 */
6278 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6279 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6280 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6281 DDI_PROP_SUCCESS) {
6282 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6283 SCSI_ADDR_PROP_ATTACHED_PORT);
6284 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6285 "prop update failed");
6286 return;
6287 }
6288 }
6289
6290 mutex_enter(&mpt->m_mutex);
6291 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6292 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6293 if (rval == DDI_SUCCESS) {
6294 refhash_remove(mpt->m_smp_targets, psmp);
6295 } else {
6296 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6297 }
6298
6299 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6300
6301 break;
6302 }
6303 default:
6304 return;
6305 }
6306 }
6307
6308 /*
6309 * Record the event if its type is enabled in mpt instance by ioctl.
6310 */
6311 static void
6312 mptsas_record_event(void *args)
6313 {
6314 m_replyh_arg_t *replyh_arg;
6315 pMpi2EventNotificationReply_t eventreply;
6316 uint32_t event, rfm;
6317 mptsas_t *mpt;
6318 int i, j;
6319 uint16_t event_data_len;
6320 boolean_t sendAEN = FALSE;
6321
6322 replyh_arg = (m_replyh_arg_t *)args;
6323 rfm = replyh_arg->rfm;
6324 mpt = replyh_arg->mpt;
6325
6326 eventreply = (pMpi2EventNotificationReply_t)
6327 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6328 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6329
6330
6331 /*
6332 * Generate a system event to let anyone who cares know that a
6333 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6334 * event mask is set to.
6335 */
6336 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6337 sendAEN = TRUE;
6338 }
6339
6340 /*
6341 * Record the event only if it is not masked. Determine which dword
6342 * and bit of event mask to test.
6343 */
6344 i = (uint8_t)(event / 32);
6345 j = (uint8_t)(event % 32);
6346 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6347 i = mpt->m_event_index;
6348 mpt->m_events[i].Type = event;
6349 mpt->m_events[i].Number = ++mpt->m_event_number;
6350 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6351 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6352 &eventreply->EventDataLength);
6353
6354 if (event_data_len > 0) {
6355 /*
6356 * Limit data to size in m_event entry
6357 */
6358 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6359 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6360 }
6361 for (j = 0; j < event_data_len; j++) {
6362 mpt->m_events[i].Data[j] =
6363 ddi_get32(mpt->m_acc_reply_frame_hdl,
6364 &(eventreply->EventData[j]));
6365 }
6366
6367 /*
6368 * check for index wrap-around
6369 */
6370 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6371 i = 0;
6372 }
6373 mpt->m_event_index = (uint8_t)i;
6374
6375 /*
6376 * Set flag to send the event.
6377 */
6378 sendAEN = TRUE;
6379 }
6380 }
6381
6382 /*
6383 * Generate a system event if flag is set to let anyone who cares know
6384 * that an event has occurred.
6385 */
6386 if (sendAEN) {
6387 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6388 "SAS", NULL, NULL, DDI_NOSLEEP);
6389 }
6390 }
6391
6392 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6393 /*
6394 * handle sync events from ioc in interrupt
6395 * return value:
6396 * DDI_SUCCESS: The event is handled by this func
6397 * DDI_FAILURE: Event is not handled
6398 */
6399 static int
6400 mptsas_handle_event_sync(void *args)
6401 {
6402 m_replyh_arg_t *replyh_arg;
6403 pMpi2EventNotificationReply_t eventreply;
6404 uint32_t event, rfm;
6405 mptsas_t *mpt;
6406 uint_t iocstatus;
6407
6408 replyh_arg = (m_replyh_arg_t *)args;
6409 rfm = replyh_arg->rfm;
6410 mpt = replyh_arg->mpt;
6411
6412 ASSERT(mutex_owned(&mpt->m_mutex));
6413
6414 eventreply = (pMpi2EventNotificationReply_t)
6415 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6416 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6417
6418 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6419 &eventreply->IOCStatus)) {
6420 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6421 mptsas_log(mpt, CE_WARN,
6422 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6423 "IOCLogInfo=0x%x", iocstatus,
6424 ddi_get32(mpt->m_acc_reply_frame_hdl,
6425 &eventreply->IOCLogInfo));
6426 } else {
6427 mptsas_log(mpt, CE_WARN,
6428 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6429 "IOCLogInfo=0x%x", iocstatus,
6430 ddi_get32(mpt->m_acc_reply_frame_hdl,
6431 &eventreply->IOCLogInfo));
6432 }
6433 }
6434
6435 /*
6436 * figure out what kind of event we got and handle accordingly
6437 */
6438 switch (event) {
6439 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6440 {
6441 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6442 uint8_t num_entries, expstatus, phy;
6443 uint8_t phystatus, physport, state, i;
6444 uint8_t start_phy_num, link_rate;
6445 uint16_t dev_handle, reason_code;
6446 uint16_t enc_handle, expd_handle;
6447 char string[80], curr[80], prev[80];
6448 mptsas_topo_change_list_t *topo_head = NULL;
6449 mptsas_topo_change_list_t *topo_tail = NULL;
6450 mptsas_topo_change_list_t *topo_node = NULL;
6451 mptsas_target_t *ptgt;
6452 mptsas_smp_t *psmp;
6453 uint8_t flags = 0, exp_flag;
6454 smhba_info_t *pSmhba = NULL;
6455
6456 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6457
6458 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6459 eventreply->EventData;
6460
6461 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6462 &sas_topo_change_list->EnclosureHandle);
6463 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6464 &sas_topo_change_list->ExpanderDevHandle);
6465 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6466 &sas_topo_change_list->NumEntries);
6467 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6468 &sas_topo_change_list->StartPhyNum);
6469 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6470 &sas_topo_change_list->ExpStatus);
6471 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6472 &sas_topo_change_list->PhysicalPort);
6473
6474 string[0] = 0;
6475 if (expd_handle) {
6476 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6477 switch (expstatus) {
6478 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6479 (void) sprintf(string, " added");
6480 /*
6481 * New expander device added
6482 */
6483 mpt->m_port_chng = 1;
6484 topo_node = kmem_zalloc(
6485 sizeof (mptsas_topo_change_list_t),
6486 KM_SLEEP);
6487 topo_node->mpt = mpt;
6488 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6489 topo_node->un.physport = physport;
6490 topo_node->devhdl = expd_handle;
6491 topo_node->flags = flags;
6492 topo_node->object = NULL;
6493 if (topo_head == NULL) {
6494 topo_head = topo_tail = topo_node;
6495 } else {
6496 topo_tail->next = topo_node;
6497 topo_tail = topo_node;
6498 }
6499 break;
6500 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6501 (void) sprintf(string, " not responding, "
6502 "removed");
6503 psmp = refhash_linear_search(mpt->m_smp_targets,
6504 mptsas_smp_eval_devhdl, &expd_handle);
6505 if (psmp == NULL)
6506 break;
6507
6508 topo_node = kmem_zalloc(
6509 sizeof (mptsas_topo_change_list_t),
6510 KM_SLEEP);
6511 topo_node->mpt = mpt;
6512 topo_node->un.phymask =
6513 psmp->m_addr.mta_phymask;
6514 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6515 topo_node->devhdl = expd_handle;
6516 topo_node->flags = flags;
6517 topo_node->object = NULL;
6518 if (topo_head == NULL) {
6519 topo_head = topo_tail = topo_node;
6520 } else {
6521 topo_tail->next = topo_node;
6522 topo_tail = topo_node;
6523 }
6524 break;
6525 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6526 break;
6527 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6528 (void) sprintf(string, " not responding, "
6529 "delaying removal");
6530 break;
6531 default:
6532 break;
6533 }
6534 } else {
6535 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6536 }
6537
6538 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6539 enc_handle, expd_handle, string));
6540 for (i = 0; i < num_entries; i++) {
6541 phy = i + start_phy_num;
6542 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6543 &sas_topo_change_list->PHY[i].PhyStatus);
6544 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6545 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6546 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6547 /*
6548 * Filter out processing of Phy Vacant Status unless
6549 * the reason code is "Not Responding". Process all
6550 * other combinations of Phy Status and Reason Codes.
6551 */
6552 if ((phystatus &
6553 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6554 (reason_code !=
6555 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6556 continue;
6557 }
6558 curr[0] = 0;
6559 prev[0] = 0;
6560 string[0] = 0;
6561 switch (reason_code) {
6562 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6563 {
6564 NDBG20(("mptsas%d phy %d physical_port %d "
6565 "dev_handle %d added", mpt->m_instance, phy,
6566 physport, dev_handle));
6567 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6568 &sas_topo_change_list->PHY[i].LinkRate);
6569 state = (link_rate &
6570 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6571 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6572 switch (state) {
6573 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6574 (void) sprintf(curr, "is disabled");
6575 break;
6576 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6577 (void) sprintf(curr, "is offline, "
6578 "failed speed negotiation");
6579 break;
6580 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6581 (void) sprintf(curr, "SATA OOB "
6582 "complete");
6583 break;
6584 case SMP_RESET_IN_PROGRESS:
6585 (void) sprintf(curr, "SMP reset in "
6586 "progress");
6587 break;
6588 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6589 (void) sprintf(curr, "is online at "
6590 "1.5 Gbps");
6591 break;
6592 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6593 (void) sprintf(curr, "is online at 3.0 "
6594 "Gbps");
6595 break;
6596 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6597 (void) sprintf(curr, "is online at 6.0 "
6598 "Gbps");
6599 break;
6600 default:
6601 (void) sprintf(curr, "state is "
6602 "unknown");
6603 break;
6604 }
6605 /*
6606 * New target device added into the system.
6607 * Set association flag according to if an
6608 * expander is used or not.
6609 */
6610 exp_flag =
6611 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6612 if (flags ==
6613 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6614 flags = exp_flag;
6615 }
6616 topo_node = kmem_zalloc(
6617 sizeof (mptsas_topo_change_list_t),
6618 KM_SLEEP);
6619 topo_node->mpt = mpt;
6620 topo_node->event =
6621 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6622 if (expd_handle == 0) {
6623 /*
6624 * Per MPI 2, if expander dev handle
6625 * is 0, it's a directly attached
6626 * device. So driver use PHY to decide
6627 * which iport is associated
6628 */
6629 physport = phy;
6630 mpt->m_port_chng = 1;
6631 }
6632 topo_node->un.physport = physport;
6633 topo_node->devhdl = dev_handle;
6634 topo_node->flags = flags;
6635 topo_node->object = NULL;
6636 if (topo_head == NULL) {
6637 topo_head = topo_tail = topo_node;
6638 } else {
6639 topo_tail->next = topo_node;
6640 topo_tail = topo_node;
6641 }
6642 break;
6643 }
6644 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6645 {
6646 NDBG20(("mptsas%d phy %d physical_port %d "
6647 "dev_handle %d removed", mpt->m_instance,
6648 phy, physport, dev_handle));
6649 /*
6650 * Set association flag according to if an
6651 * expander is used or not.
6652 */
6653 exp_flag =
6654 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6655 if (flags ==
6656 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6657 flags = exp_flag;
6658 }
6659 /*
6660 * Target device is removed from the system
6661 * Before the device is really offline from
6662 * from system.
6663 */
6664 ptgt = refhash_linear_search(mpt->m_targets,
6665 mptsas_target_eval_devhdl, &dev_handle);
6666 /*
6667 * If ptgt is NULL here, it means that the
6668 * DevHandle is not in the hash table. This is
6669 * reasonable sometimes. For example, if a
6670 * disk was pulled, then added, then pulled
6671 * again, the disk will not have been put into
6672 * the hash table because the add event will
6673 * have an invalid phymask. BUT, this does not
6674 * mean that the DevHandle is invalid. The
6675 * controller will still have a valid DevHandle
6676 * that must be removed. To do this, use the
6677 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6678 */
6679 if (ptgt == NULL) {
6680 topo_node = kmem_zalloc(
6681 sizeof (mptsas_topo_change_list_t),
6682 KM_SLEEP);
6683 topo_node->mpt = mpt;
6684 topo_node->un.phymask = 0;
6685 topo_node->event =
6686 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6687 topo_node->devhdl = dev_handle;
6688 topo_node->flags = flags;
6689 topo_node->object = NULL;
6690 if (topo_head == NULL) {
6691 topo_head = topo_tail =
6692 topo_node;
6693 } else {
6694 topo_tail->next = topo_node;
6695 topo_tail = topo_node;
6696 }
6697 break;
6698 }
6699
6700 /*
6701 * Update DR flag immediately avoid I/O failure
6702 * before failover finish. Pay attention to the
6703 * mutex protect, we need grab m_tx_waitq_mutex
6704 * during set m_dr_flag because we won't add
6705 * the following command into waitq, instead,
6706 * we need return TRAN_BUSY in the tran_start
6707 * context.
6708 */
6709 mutex_enter(&mpt->m_tx_waitq_mutex);
6710 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6711 mutex_exit(&mpt->m_tx_waitq_mutex);
6712
6713 topo_node = kmem_zalloc(
6714 sizeof (mptsas_topo_change_list_t),
6715 KM_SLEEP);
6716 topo_node->mpt = mpt;
6717 topo_node->un.phymask =
6718 ptgt->m_addr.mta_phymask;
6719 topo_node->event =
6720 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6721 topo_node->devhdl = dev_handle;
6722 topo_node->flags = flags;
6723 topo_node->object = NULL;
6724 if (topo_head == NULL) {
6725 topo_head = topo_tail = topo_node;
6726 } else {
6727 topo_tail->next = topo_node;
6728 topo_tail = topo_node;
6729 }
6730 break;
6731 }
6732 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6733 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6734 &sas_topo_change_list->PHY[i].LinkRate);
6735 state = (link_rate &
6736 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6737 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6738 pSmhba = &mpt->m_phy_info[i].smhba_info;
6739 pSmhba->negotiated_link_rate = state;
6740 switch (state) {
6741 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6742 (void) sprintf(curr, "is disabled");
6743 mptsas_smhba_log_sysevent(mpt,
6744 ESC_SAS_PHY_EVENT,
6745 SAS_PHY_REMOVE,
6746 &mpt->m_phy_info[i].smhba_info);
6747 mpt->m_phy_info[i].smhba_info.
6748 negotiated_link_rate
6749 = 0x1;
6750 break;
6751 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6752 (void) sprintf(curr, "is offline, "
6753 "failed speed negotiation");
6754 mptsas_smhba_log_sysevent(mpt,
6755 ESC_SAS_PHY_EVENT,
6756 SAS_PHY_OFFLINE,
6757 &mpt->m_phy_info[i].smhba_info);
6758 break;
6759 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6760 (void) sprintf(curr, "SATA OOB "
6761 "complete");
6762 break;
6763 case SMP_RESET_IN_PROGRESS:
6764 (void) sprintf(curr, "SMP reset in "
6765 "progress");
6766 break;
6767 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6768 (void) sprintf(curr, "is online at "
6769 "1.5 Gbps");
6770 if ((expd_handle == 0) &&
6771 (enc_handle == 1)) {
6772 mpt->m_port_chng = 1;
6773 }
6774 mptsas_smhba_log_sysevent(mpt,
6775 ESC_SAS_PHY_EVENT,
6776 SAS_PHY_ONLINE,
6777 &mpt->m_phy_info[i].smhba_info);
6778 break;
6779 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6780 (void) sprintf(curr, "is online at 3.0 "
6781 "Gbps");
6782 if ((expd_handle == 0) &&
6783 (enc_handle == 1)) {
6784 mpt->m_port_chng = 1;
6785 }
6786 mptsas_smhba_log_sysevent(mpt,
6787 ESC_SAS_PHY_EVENT,
6788 SAS_PHY_ONLINE,
6789 &mpt->m_phy_info[i].smhba_info);
6790 break;
6791 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6792 (void) sprintf(curr, "is online at "
6793 "6.0 Gbps");
6794 if ((expd_handle == 0) &&
6795 (enc_handle == 1)) {
6796 mpt->m_port_chng = 1;
6797 }
6798 mptsas_smhba_log_sysevent(mpt,
6799 ESC_SAS_PHY_EVENT,
6800 SAS_PHY_ONLINE,
6801 &mpt->m_phy_info[i].smhba_info);
6802 break;
6803 default:
6804 (void) sprintf(curr, "state is "
6805 "unknown");
6806 break;
6807 }
6808
6809 state = (link_rate &
6810 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6811 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6812 switch (state) {
6813 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6814 (void) sprintf(prev, ", was disabled");
6815 break;
6816 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6817 (void) sprintf(prev, ", was offline, "
6818 "failed speed negotiation");
6819 break;
6820 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6821 (void) sprintf(prev, ", was SATA OOB "
6822 "complete");
6823 break;
6824 case SMP_RESET_IN_PROGRESS:
6825 (void) sprintf(prev, ", was SMP reset "
6826 "in progress");
6827 break;
6828 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6829 (void) sprintf(prev, ", was online at "
6830 "1.5 Gbps");
6831 break;
6832 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6833 (void) sprintf(prev, ", was online at "
6834 "3.0 Gbps");
6835 break;
6836 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6837 (void) sprintf(prev, ", was online at "
6838 "6.0 Gbps");
6839 break;
6840 default:
6841 break;
6842 }
6843 (void) sprintf(&string[strlen(string)], "link "
6844 "changed, ");
6845 break;
6846 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6847 continue;
6848 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6849 (void) sprintf(&string[strlen(string)],
6850 "target not responding, delaying "
6851 "removal");
6852 break;
6853 }
6854 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6855 mpt->m_instance, phy, dev_handle, string, curr,
6856 prev));
6857 }
6858 if (topo_head != NULL) {
6859 /*
6860 * Launch DR taskq to handle topology change
6861 */
6862 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6863 mptsas_handle_dr, (void *)topo_head,
6864 DDI_NOSLEEP)) != DDI_SUCCESS) {
6865 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6866 "for handle SAS DR event failed. \n");
6867 }
6868 }
6869 break;
6870 }
6871 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6872 {
6873 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6874 mptsas_topo_change_list_t *topo_head = NULL;
6875 mptsas_topo_change_list_t *topo_tail = NULL;
6876 mptsas_topo_change_list_t *topo_node = NULL;
6877 mptsas_target_t *ptgt;
6878 uint8_t num_entries, i, reason;
6879 uint16_t volhandle, diskhandle;
6880
6881 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6882 eventreply->EventData;
6883 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6884 &irChangeList->NumElements);
6885
6886 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6887 mpt->m_instance));
6888
6889 for (i = 0; i < num_entries; i++) {
6890 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6891 &irChangeList->ConfigElement[i].ReasonCode);
6892 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6893 &irChangeList->ConfigElement[i].VolDevHandle);
6894 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6895 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6896
6897 switch (reason) {
6898 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6899 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6900 {
6901 NDBG20(("mptsas %d volume added\n",
6902 mpt->m_instance));
6903
6904 topo_node = kmem_zalloc(
6905 sizeof (mptsas_topo_change_list_t),
6906 KM_SLEEP);
6907
6908 topo_node->mpt = mpt;
6909 topo_node->event =
6910 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6911 topo_node->un.physport = 0xff;
6912 topo_node->devhdl = volhandle;
6913 topo_node->flags =
6914 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6915 topo_node->object = NULL;
6916 if (topo_head == NULL) {
6917 topo_head = topo_tail = topo_node;
6918 } else {
6919 topo_tail->next = topo_node;
6920 topo_tail = topo_node;
6921 }
6922 break;
6923 }
6924 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6925 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6926 {
6927 NDBG20(("mptsas %d volume deleted\n",
6928 mpt->m_instance));
6929 ptgt = refhash_linear_search(mpt->m_targets,
6930 mptsas_target_eval_devhdl, &volhandle);
6931 if (ptgt == NULL)
6932 break;
6933
6934 /*
6935 * Clear any flags related to volume
6936 */
6937 (void) mptsas_delete_volume(mpt, volhandle);
6938
6939 /*
6940 * Update DR flag immediately avoid I/O failure
6941 */
6942 mutex_enter(&mpt->m_tx_waitq_mutex);
6943 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6944 mutex_exit(&mpt->m_tx_waitq_mutex);
6945
6946 topo_node = kmem_zalloc(
6947 sizeof (mptsas_topo_change_list_t),
6948 KM_SLEEP);
6949 topo_node->mpt = mpt;
6950 topo_node->un.phymask =
6951 ptgt->m_addr.mta_phymask;
6952 topo_node->event =
6953 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6954 topo_node->devhdl = volhandle;
6955 topo_node->flags =
6956 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6957 topo_node->object = (void *)ptgt;
6958 if (topo_head == NULL) {
6959 topo_head = topo_tail = topo_node;
6960 } else {
6961 topo_tail->next = topo_node;
6962 topo_tail = topo_node;
6963 }
6964 break;
6965 }
6966 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6967 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6968 {
6969 ptgt = refhash_linear_search(mpt->m_targets,
6970 mptsas_target_eval_devhdl, &diskhandle);
6971 if (ptgt == NULL)
6972 break;
6973
6974 /*
6975 * Update DR flag immediately avoid I/O failure
6976 */
6977 mutex_enter(&mpt->m_tx_waitq_mutex);
6978 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6979 mutex_exit(&mpt->m_tx_waitq_mutex);
6980
6981 topo_node = kmem_zalloc(
6982 sizeof (mptsas_topo_change_list_t),
6983 KM_SLEEP);
6984 topo_node->mpt = mpt;
6985 topo_node->un.phymask =
6986 ptgt->m_addr.mta_phymask;
6987 topo_node->event =
6988 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6989 topo_node->devhdl = diskhandle;
6990 topo_node->flags =
6991 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6992 topo_node->object = (void *)ptgt;
6993 if (topo_head == NULL) {
6994 topo_head = topo_tail = topo_node;
6995 } else {
6996 topo_tail->next = topo_node;
6997 topo_tail = topo_node;
6998 }
6999 break;
7000 }
7001 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7002 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7003 {
7004 /*
7005 * The physical drive is released by a IR
7006 * volume. But we cannot get the the physport
7007 * or phynum from the event data, so we only
7008 * can get the physport/phynum after SAS
7009 * Device Page0 request for the devhdl.
7010 */
7011 topo_node = kmem_zalloc(
7012 sizeof (mptsas_topo_change_list_t),
7013 KM_SLEEP);
7014 topo_node->mpt = mpt;
7015 topo_node->un.phymask = 0;
7016 topo_node->event =
7017 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7018 topo_node->devhdl = diskhandle;
7019 topo_node->flags =
7020 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7021 topo_node->object = NULL;
7022 mpt->m_port_chng = 1;
7023 if (topo_head == NULL) {
7024 topo_head = topo_tail = topo_node;
7025 } else {
7026 topo_tail->next = topo_node;
7027 topo_tail = topo_node;
7028 }
7029 break;
7030 }
7031 default:
7032 break;
7033 }
7034 }
7035
7036 if (topo_head != NULL) {
7037 /*
7038 * Launch DR taskq to handle topology change
7039 */
7040 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7041 mptsas_handle_dr, (void *)topo_head,
7042 DDI_NOSLEEP)) != DDI_SUCCESS) {
7043 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7044 "for handle SAS DR event failed. \n");
7045 }
7046 }
7047 break;
7048 }
7049 default:
7050 return (DDI_FAILURE);
7051 }
7052
7053 return (DDI_SUCCESS);
7054 }
7055
7056 /*
7057 * handle events from ioc
7058 */
7059 static void
7060 mptsas_handle_event(void *args)
7061 {
7062 m_replyh_arg_t *replyh_arg;
7063 pMpi2EventNotificationReply_t eventreply;
7064 uint32_t event, iocloginfo, rfm;
7065 uint32_t status;
7066 uint8_t port;
7067 mptsas_t *mpt;
7068 uint_t iocstatus;
7069
7070 replyh_arg = (m_replyh_arg_t *)args;
7071 rfm = replyh_arg->rfm;
7072 mpt = replyh_arg->mpt;
7073
7074 mutex_enter(&mpt->m_mutex);
7075 /*
7076 * If HBA is being reset, drop incoming event.
7077 */
7078 if (mpt->m_in_reset) {
7079 NDBG20(("dropping event received prior to reset"));
7080 mutex_exit(&mpt->m_mutex);
7081 return;
7082 }
7083
7084 eventreply = (pMpi2EventNotificationReply_t)
7085 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7086 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7087
7088 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7089 &eventreply->IOCStatus)) {
7090 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7091 mptsas_log(mpt, CE_WARN,
7092 "!mptsas_handle_event: IOCStatus=0x%x, "
7093 "IOCLogInfo=0x%x", iocstatus,
7094 ddi_get32(mpt->m_acc_reply_frame_hdl,
7095 &eventreply->IOCLogInfo));
7096 } else {
7097 mptsas_log(mpt, CE_WARN,
7098 "mptsas_handle_event: IOCStatus=0x%x, "
7099 "IOCLogInfo=0x%x", iocstatus,
7100 ddi_get32(mpt->m_acc_reply_frame_hdl,
7101 &eventreply->IOCLogInfo));
7102 }
7103 }
7104
7105 /*
7106 * figure out what kind of event we got and handle accordingly
7107 */
7108 switch (event) {
7109 case MPI2_EVENT_LOG_ENTRY_ADDED:
7110 break;
7111 case MPI2_EVENT_LOG_DATA:
7112 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7113 &eventreply->IOCLogInfo);
7114 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7115 iocloginfo));
7116 break;
7117 case MPI2_EVENT_STATE_CHANGE:
7118 NDBG20(("mptsas%d state change.", mpt->m_instance));
7119 break;
7120 case MPI2_EVENT_HARD_RESET_RECEIVED:
7121 NDBG20(("mptsas%d event change.", mpt->m_instance));
7122 break;
7123 case MPI2_EVENT_SAS_DISCOVERY:
7124 {
7125 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7126 char string[80];
7127 uint8_t rc;
7128
7129 sasdiscovery =
7130 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7131
7132 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7133 &sasdiscovery->ReasonCode);
7134 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7135 &sasdiscovery->PhysicalPort);
7136 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7137 &sasdiscovery->DiscoveryStatus);
7138
7139 string[0] = 0;
7140 switch (rc) {
7141 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7142 (void) sprintf(string, "STARTING");
7143 break;
7144 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7145 (void) sprintf(string, "COMPLETED");
7146 break;
7147 default:
7148 (void) sprintf(string, "UNKNOWN");
7149 break;
7150 }
7151
7152 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7153 port, status));
7154
7155 break;
7156 }
7157 case MPI2_EVENT_EVENT_CHANGE:
7158 NDBG20(("mptsas%d event change.", mpt->m_instance));
7159 break;
7160 case MPI2_EVENT_TASK_SET_FULL:
7161 {
7162 pMpi2EventDataTaskSetFull_t taskfull;
7163
7164 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7165
7166 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7167 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7168 &taskfull->CurrentDepth)));
7169 break;
7170 }
7171 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7172 {
7173 /*
7174 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7175 * in mptsas_handle_event_sync() of interrupt context
7176 */
7177 break;
7178 }
7179 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7180 {
7181 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7182 uint8_t rc;
7183 char string[80];
7184
7185 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7186 eventreply->EventData;
7187
7188 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7189 &encstatus->ReasonCode);
7190 switch (rc) {
7191 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7192 (void) sprintf(string, "added");
7193 break;
7194 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7195 (void) sprintf(string, ", not responding");
7196 break;
7197 default:
7198 break;
7199 }
7200 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7201 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7202 &encstatus->EnclosureHandle), string));
7203 break;
7204 }
7205
7206 /*
7207 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7208 * mptsas_handle_event_sync,in here just send ack message.
7209 */
7210 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7211 {
7212 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7213 uint8_t rc;
7214 uint16_t devhdl;
7215 uint64_t wwn = 0;
7216 uint32_t wwn_lo, wwn_hi;
7217
7218 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7219 eventreply->EventData;
7220 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7221 &statuschange->ReasonCode);
7222 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7223 (uint32_t *)(void *)&statuschange->SASAddress);
7224 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7225 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7226 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7227 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7228 &statuschange->DevHandle);
7229
7230 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7231 wwn));
7232
7233 switch (rc) {
7234 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7235 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7236 ddi_get8(mpt->m_acc_reply_frame_hdl,
7237 &statuschange->ASC),
7238 ddi_get8(mpt->m_acc_reply_frame_hdl,
7239 &statuschange->ASCQ)));
7240 break;
7241
7242 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7243 NDBG20(("Device not supported"));
7244 break;
7245
7246 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7247 NDBG20(("IOC internally generated the Target Reset "
7248 "for devhdl:%x", devhdl));
7249 break;
7250
7251 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7252 NDBG20(("IOC's internally generated Target Reset "
7253 "completed for devhdl:%x", devhdl));
7254 break;
7255
7256 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7257 NDBG20(("IOC internally generated Abort Task"));
7258 break;
7259
7260 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7261 NDBG20(("IOC's internally generated Abort Task "
7262 "completed"));
7263 break;
7264
7265 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7266 NDBG20(("IOC internally generated Abort Task Set"));
7267 break;
7268
7269 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7270 NDBG20(("IOC internally generated Clear Task Set"));
7271 break;
7272
7273 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7274 NDBG20(("IOC internally generated Query Task"));
7275 break;
7276
7277 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7278 NDBG20(("Device sent an Asynchronous Notification"));
7279 break;
7280
7281 default:
7282 break;
7283 }
7284 break;
7285 }
7286 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7287 {
7288 /*
7289 * IR TOPOLOGY CHANGE LIST Event has already been handled
7290 * in mpt_handle_event_sync() of interrupt context
7291 */
7292 break;
7293 }
7294 case MPI2_EVENT_IR_OPERATION_STATUS:
7295 {
7296 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7297 char reason_str[80];
7298 uint8_t rc, percent;
7299 uint16_t handle;
7300
7301 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7302 eventreply->EventData;
7303 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7304 &irOpStatus->RAIDOperation);
7305 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7306 &irOpStatus->PercentComplete);
7307 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7308 &irOpStatus->VolDevHandle);
7309
7310 switch (rc) {
7311 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7312 (void) sprintf(reason_str, "resync");
7313 break;
7314 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7315 (void) sprintf(reason_str, "online capacity "
7316 "expansion");
7317 break;
7318 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7319 (void) sprintf(reason_str, "consistency check");
7320 break;
7321 default:
7322 (void) sprintf(reason_str, "unknown reason %x",
7323 rc);
7324 }
7325
7326 NDBG20(("mptsas%d raid operational status: (%s)"
7327 "\thandle(0x%04x), percent complete(%d)\n",
7328 mpt->m_instance, reason_str, handle, percent));
7329 break;
7330 }
7331 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7332 {
7333 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7334 uint8_t phy_num;
7335 uint8_t primitive;
7336
7337 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7338 eventreply->EventData;
7339
7340 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7341 &sas_broadcast->PhyNum);
7342 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7343 &sas_broadcast->Primitive);
7344
7345 switch (primitive) {
7346 case MPI2_EVENT_PRIMITIVE_CHANGE:
7347 mptsas_smhba_log_sysevent(mpt,
7348 ESC_SAS_HBA_PORT_BROADCAST,
7349 SAS_PORT_BROADCAST_CHANGE,
7350 &mpt->m_phy_info[phy_num].smhba_info);
7351 break;
7352 case MPI2_EVENT_PRIMITIVE_SES:
7353 mptsas_smhba_log_sysevent(mpt,
7354 ESC_SAS_HBA_PORT_BROADCAST,
7355 SAS_PORT_BROADCAST_SES,
7356 &mpt->m_phy_info[phy_num].smhba_info);
7357 break;
7358 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7359 mptsas_smhba_log_sysevent(mpt,
7360 ESC_SAS_HBA_PORT_BROADCAST,
7361 SAS_PORT_BROADCAST_D01_4,
7362 &mpt->m_phy_info[phy_num].smhba_info);
7363 break;
7364 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7365 mptsas_smhba_log_sysevent(mpt,
7366 ESC_SAS_HBA_PORT_BROADCAST,
7367 SAS_PORT_BROADCAST_D04_7,
7368 &mpt->m_phy_info[phy_num].smhba_info);
7369 break;
7370 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7371 mptsas_smhba_log_sysevent(mpt,
7372 ESC_SAS_HBA_PORT_BROADCAST,
7373 SAS_PORT_BROADCAST_D16_7,
7374 &mpt->m_phy_info[phy_num].smhba_info);
7375 break;
7376 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7377 mptsas_smhba_log_sysevent(mpt,
7378 ESC_SAS_HBA_PORT_BROADCAST,
7379 SAS_PORT_BROADCAST_D29_7,
7380 &mpt->m_phy_info[phy_num].smhba_info);
7381 break;
7382 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7383 mptsas_smhba_log_sysevent(mpt,
7384 ESC_SAS_HBA_PORT_BROADCAST,
7385 SAS_PORT_BROADCAST_D24_0,
7386 &mpt->m_phy_info[phy_num].smhba_info);
7387 break;
7388 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7389 mptsas_smhba_log_sysevent(mpt,
7390 ESC_SAS_HBA_PORT_BROADCAST,
7391 SAS_PORT_BROADCAST_D27_4,
7392 &mpt->m_phy_info[phy_num].smhba_info);
7393 break;
7394 default:
7395 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7396 " %x received",
7397 mpt->m_instance, primitive));
7398 break;
7399 }
7400 NDBG20(("mptsas%d sas broadcast primitive: "
7401 "\tprimitive(0x%04x), phy(%d) complete\n",
7402 mpt->m_instance, primitive, phy_num));
7403 break;
7404 }
7405 case MPI2_EVENT_IR_VOLUME:
7406 {
7407 Mpi2EventDataIrVolume_t *irVolume;
7408 uint16_t devhandle;
7409 uint32_t state;
7410 int config, vol;
7411 uint8_t found = FALSE;
7412
7413 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7414 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7415 &irVolume->NewValue);
7416 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7417 &irVolume->VolDevHandle);
7418
7419 NDBG20(("EVENT_IR_VOLUME event is received"));
7420
7421 /*
7422 * Get latest RAID info and then find the DevHandle for this
7423 * event in the configuration. If the DevHandle is not found
7424 * just exit the event.
7425 */
7426 (void) mptsas_get_raid_info(mpt);
7427 for (config = 0; (config < mpt->m_num_raid_configs) &&
7428 (!found); config++) {
7429 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7430 if (mpt->m_raidconfig[config].m_raidvol[vol].
7431 m_raidhandle == devhandle) {
7432 found = TRUE;
7433 break;
7434 }
7435 }
7436 }
7437 if (!found) {
7438 break;
7439 }
7440
7441 switch (irVolume->ReasonCode) {
7442 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7443 {
7444 uint32_t i;
7445 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7446 state;
7447
7448 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7449 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7450 ", auto-config of hot-swap drives is %s"
7451 ", write caching is %s"
7452 ", hot-spare pool mask is %02x\n",
7453 vol, state &
7454 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7455 ? "disabled" : "enabled",
7456 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7457 ? "controlled by member disks" :
7458 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7459 ? "disabled" :
7460 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7461 ? "enabled" :
7462 "incorrectly set",
7463 (state >> 16) & 0xff);
7464 break;
7465 }
7466 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7467 {
7468 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7469 (uint8_t)state;
7470
7471 mptsas_log(mpt, CE_NOTE,
7472 "Volume %d is now %s\n", vol,
7473 state == MPI2_RAID_VOL_STATE_OPTIMAL
7474 ? "optimal" :
7475 state == MPI2_RAID_VOL_STATE_DEGRADED
7476 ? "degraded" :
7477 state == MPI2_RAID_VOL_STATE_ONLINE
7478 ? "online" :
7479 state == MPI2_RAID_VOL_STATE_INITIALIZING
7480 ? "initializing" :
7481 state == MPI2_RAID_VOL_STATE_FAILED
7482 ? "failed" :
7483 state == MPI2_RAID_VOL_STATE_MISSING
7484 ? "missing" :
7485 "state unknown");
7486 break;
7487 }
7488 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7489 {
7490 mpt->m_raidconfig[config].m_raidvol[vol].
7491 m_statusflags = state;
7492
7493 mptsas_log(mpt, CE_NOTE,
7494 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7495 vol,
7496 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7497 ? ", enabled" : ", disabled",
7498 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7499 ? ", quiesced" : "",
7500 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7501 ? ", inactive" : ", active",
7502 state &
7503 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7504 ? ", bad block table is full" : "",
7505 state &
7506 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7507 ? ", resync in progress" : "",
7508 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7509 ? ", background initialization in progress" : "",
7510 state &
7511 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7512 ? ", capacity expansion in progress" : "",
7513 state &
7514 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7515 ? ", consistency check in progress" : "",
7516 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7517 ? ", data scrub in progress" : "");
7518 break;
7519 }
7520 default:
7521 break;
7522 }
7523 break;
7524 }
7525 case MPI2_EVENT_IR_PHYSICAL_DISK:
7526 {
7527 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7528 uint16_t devhandle, enchandle, slot;
7529 uint32_t status, state;
7530 uint8_t physdisknum, reason;
7531
7532 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7533 eventreply->EventData;
7534 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7535 &irPhysDisk->PhysDiskNum);
7536 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7537 &irPhysDisk->PhysDiskDevHandle);
7538 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7539 &irPhysDisk->EnclosureHandle);
7540 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7541 &irPhysDisk->Slot);
7542 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7543 &irPhysDisk->NewValue);
7544 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7545 &irPhysDisk->ReasonCode);
7546
7547 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7548
7549 switch (reason) {
7550 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7551 mptsas_log(mpt, CE_NOTE,
7552 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7553 "for enclosure with handle 0x%x is now in hot "
7554 "spare pool %d",
7555 physdisknum, devhandle, slot, enchandle,
7556 (state >> 16) & 0xff);
7557 break;
7558
7559 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7560 status = state;
7561 mptsas_log(mpt, CE_NOTE,
7562 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7563 "for enclosure with handle 0x%x is now "
7564 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7565 enchandle,
7566 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7567 ? ", inactive" : ", active",
7568 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7569 ? ", out of sync" : "",
7570 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7571 ? ", quiesced" : "",
7572 status &
7573 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7574 ? ", write cache enabled" : "",
7575 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7576 ? ", capacity expansion target" : "");
7577 break;
7578
7579 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7580 mptsas_log(mpt, CE_NOTE,
7581 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7582 "for enclosure with handle 0x%x is now %s\n",
7583 physdisknum, devhandle, slot, enchandle,
7584 state == MPI2_RAID_PD_STATE_OPTIMAL
7585 ? "optimal" :
7586 state == MPI2_RAID_PD_STATE_REBUILDING
7587 ? "rebuilding" :
7588 state == MPI2_RAID_PD_STATE_DEGRADED
7589 ? "degraded" :
7590 state == MPI2_RAID_PD_STATE_HOT_SPARE
7591 ? "a hot spare" :
7592 state == MPI2_RAID_PD_STATE_ONLINE
7593 ? "online" :
7594 state == MPI2_RAID_PD_STATE_OFFLINE
7595 ? "offline" :
7596 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7597 ? "not compatible" :
7598 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7599 ? "not configured" :
7600 "state unknown");
7601 break;
7602 }
7603 break;
7604 }
7605 default:
7606 NDBG20(("mptsas%d: unknown event %x received",
7607 mpt->m_instance, event));
7608 break;
7609 }
7610
7611 /*
7612 * Return the reply frame to the free queue.
7613 */
7614 ddi_put32(mpt->m_acc_free_queue_hdl,
7615 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7616 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7617 DDI_DMA_SYNC_FORDEV);
7618 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7619 mpt->m_free_index = 0;
7620 }
7621 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7622 mpt->m_free_index);
7623 mutex_exit(&mpt->m_mutex);
7624 }
7625
7626 /*
7627 * invoked from timeout() to restart qfull cmds with throttle == 0
7628 */
7629 static void
7630 mptsas_restart_cmd(void *arg)
7631 {
7632 mptsas_t *mpt = arg;
7633 mptsas_target_t *ptgt = NULL;
7634
7635 mutex_enter(&mpt->m_mutex);
7636
7637 mpt->m_restart_cmd_timeid = 0;
7638
7639 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
7640 ptgt = refhash_next(mpt->m_targets, ptgt)) {
7641 if (ptgt->m_reset_delay == 0) {
7642 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7643 mptsas_set_throttle(mpt, ptgt,
7644 MAX_THROTTLE);
7645 }
7646 }
7647 }
7648 mptsas_restart_hba(mpt);
7649 mutex_exit(&mpt->m_mutex);
7650 }
7651
7652 void
7653 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7654 {
7655 int slot;
7656 mptsas_slots_t *slots = mpt->m_active;
7657 int t;
7658 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7659
7660 ASSERT(cmd != NULL);
7661 ASSERT(cmd->cmd_queued == FALSE);
7662
7663 /*
7664 * Task Management cmds are removed in their own routines. Also,
7665 * we don't want to modify timeout based on TM cmds.
7666 */
7667 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7668 return;
7669 }
7670
7671 t = Tgt(cmd);
7672 slot = cmd->cmd_slot;
7673
7674 /*
7675 * remove the cmd.
7676 */
7677 if (cmd == slots->m_slot[slot]) {
7678 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7679 slots->m_slot[slot] = NULL;
7680 mpt->m_ncmds--;
7681
7682 /*
7683 * only decrement per target ncmds if command
7684 * has a target associated with it.
7685 */
7686 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7687 ptgt->m_t_ncmds--;
7688 /*
7689 * reset throttle if we just ran an untagged command
7690 * to a tagged target
7691 */
7692 if ((ptgt->m_t_ncmds == 0) &&
7693 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7694 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7695 }
7696 }
7697
7698 }
7699
7700 /*
7701 * This is all we need to do for ioc commands.
7702 */
7703 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7704 mptsas_return_to_pool(mpt, cmd);
7705 return;
7706 }
7707
7708 /*
7709 * Figure out what to set tag Q timeout for...
7710 *
7711 * Optimize: If we have duplicate's of same timeout
7712 * we're using, then we'll use it again until we run
7713 * out of duplicates. This should be the normal case
7714 * for block and raw I/O.
7715 * If no duplicates, we have to scan through tag que and
7716 * find the longest timeout value and use it. This is
7717 * going to take a while...
7718 * Add 1 to m_n_normal to account for TM request.
7719 */
7720 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7721 if (--(ptgt->m_dups) == 0) {
7722 if (ptgt->m_t_ncmds) {
7723 mptsas_cmd_t *ssp;
7724 uint_t n = 0;
7725 ushort_t nslots = (slots->m_n_normal + 1);
7726 ushort_t i;
7727 /*
7728 * This crude check assumes we don't do
7729 * this too often which seems reasonable
7730 * for block and raw I/O.
7731 */
7732 for (i = 0; i < nslots; i++) {
7733 ssp = slots->m_slot[i];
7734 if (ssp && (Tgt(ssp) == t) &&
7735 (ssp->cmd_pkt->pkt_time > n)) {
7736 n = ssp->cmd_pkt->pkt_time;
7737 ptgt->m_dups = 1;
7738 } else if (ssp && (Tgt(ssp) == t) &&
7739 (ssp->cmd_pkt->pkt_time == n)) {
7740 ptgt->m_dups++;
7741 }
7742 }
7743 ptgt->m_timebase = n;
7744 } else {
7745 ptgt->m_dups = 0;
7746 ptgt->m_timebase = 0;
7747 }
7748 }
7749 }
7750 ptgt->m_timeout = ptgt->m_timebase;
7751
7752 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7753 }
7754
7755 /*
7756 * accept all cmds on the tx_waitq if any and then
7757 * start a fresh request from the top of the device queue.
7758 *
7759 * since there are always cmds queued on the tx_waitq, and rare cmds on
7760 * the instance waitq, so this function should not be invoked in the ISR,
7761 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7762 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7763 */
7764 static void
7765 mptsas_restart_hba(mptsas_t *mpt)
7766 {
7767 ASSERT(mutex_owned(&mpt->m_mutex));
7768
7769 mutex_enter(&mpt->m_tx_waitq_mutex);
7770 if (mpt->m_tx_waitq) {
7771 mptsas_accept_tx_waitq(mpt);
7772 }
7773 mutex_exit(&mpt->m_tx_waitq_mutex);
7774 mptsas_restart_waitq(mpt);
7775 }
7776
7777 /*
7778 * start a fresh request from the top of the device queue
7779 */
7780 static void
7781 mptsas_restart_waitq(mptsas_t *mpt)
7782 {
7783 mptsas_cmd_t *cmd, *next_cmd;
7784 mptsas_target_t *ptgt = NULL;
7785
7786 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7787
7788 ASSERT(mutex_owned(&mpt->m_mutex));
7789
7790 /*
7791 * If there is a reset delay, don't start any cmds. Otherwise, start
7792 * as many cmds as possible.
7793 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7794 * commands is m_max_requests - 2.
7795 */
7796 cmd = mpt->m_waitq;
7797
7798 while (cmd != NULL) {
7799 next_cmd = cmd->cmd_linkp;
7800 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7801 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7802 /*
7803 * passthru command get slot need
7804 * set CFLAG_PREPARED.
7805 */
7806 cmd->cmd_flags |= CFLAG_PREPARED;
7807 mptsas_waitq_delete(mpt, cmd);
7808 mptsas_start_passthru(mpt, cmd);
7809 }
7810 cmd = next_cmd;
7811 continue;
7812 }
7813 if (cmd->cmd_flags & CFLAG_CONFIG) {
7814 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7815 /*
7816 * Send the config page request and delete it
7817 * from the waitq.
7818 */
7819 cmd->cmd_flags |= CFLAG_PREPARED;
7820 mptsas_waitq_delete(mpt, cmd);
7821 mptsas_start_config_page_access(mpt, cmd);
7822 }
7823 cmd = next_cmd;
7824 continue;
7825 }
7826 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7827 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7828 /*
7829 * Send the FW Diag request and delete if from
7830 * the waitq.
7831 */
7832 cmd->cmd_flags |= CFLAG_PREPARED;
7833 mptsas_waitq_delete(mpt, cmd);
7834 mptsas_start_diag(mpt, cmd);
7835 }
7836 cmd = next_cmd;
7837 continue;
7838 }
7839
7840 ptgt = cmd->cmd_tgt_addr;
7841 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7842 (ptgt->m_t_ncmds == 0)) {
7843 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7844 }
7845 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7846 (ptgt && (ptgt->m_reset_delay == 0)) &&
7847 (ptgt && (ptgt->m_t_ncmds <
7848 ptgt->m_t_throttle))) {
7849 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7850 mptsas_waitq_delete(mpt, cmd);
7851 (void) mptsas_start_cmd(mpt, cmd);
7852 }
7853 }
7854 cmd = next_cmd;
7855 }
7856 }
7857 /*
7858 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7859 * Accept all those queued cmds before new cmd is accept so that the
7860 * cmds are sent in order.
7861 */
7862 static void
7863 mptsas_accept_tx_waitq(mptsas_t *mpt)
7864 {
7865 mptsas_cmd_t *cmd;
7866
7867 ASSERT(mutex_owned(&mpt->m_mutex));
7868 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7869
7870 /*
7871 * A Bus Reset could occur at any time and flush the tx_waitq,
7872 * so we cannot count on the tx_waitq to contain even one cmd.
7873 * And when the m_tx_waitq_mutex is released and run
7874 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7875 */
7876 cmd = mpt->m_tx_waitq;
7877 for (;;) {
7878 if ((cmd = mpt->m_tx_waitq) == NULL) {
7879 mpt->m_tx_draining = 0;
7880 break;
7881 }
7882 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7883 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7884 }
7885 cmd->cmd_linkp = NULL;
7886 mutex_exit(&mpt->m_tx_waitq_mutex);
7887 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7888 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7889 "to accept cmd on queue\n");
7890 mutex_enter(&mpt->m_tx_waitq_mutex);
7891 }
7892 }
7893
7894
7895 /*
7896 * mpt tag type lookup
7897 */
7898 static char mptsas_tag_lookup[] =
7899 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7900
7901 static int
7902 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7903 {
7904 struct scsi_pkt *pkt = CMD2PKT(cmd);
7905 uint32_t control = 0;
7906 int n;
7907 caddr_t mem;
7908 pMpi2SCSIIORequest_t io_request;
7909 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7910 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7911 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7912 uint16_t SMID, io_flags = 0;
7913 uint32_t request_desc_low, request_desc_high;
7914
7915 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7916
7917 /*
7918 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7919 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7920 */
7921 SMID = cmd->cmd_slot;
7922
7923 /*
7924 * It is possible for back to back device reset to
7925 * happen before the reset delay has expired. That's
7926 * ok, just let the device reset go out on the bus.
7927 */
7928 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7929 ASSERT(ptgt->m_reset_delay == 0);
7930 }
7931
7932 /*
7933 * if a non-tagged cmd is submitted to an active tagged target
7934 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7935 * to be untagged
7936 */
7937 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7938 (ptgt->m_t_ncmds > 1) &&
7939 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7940 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7941 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7942 NDBG23(("target=%d, untagged cmd, start draining\n",
7943 ptgt->m_devhdl));
7944
7945 if (ptgt->m_reset_delay == 0) {
7946 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7947 }
7948
7949 mptsas_remove_cmd(mpt, cmd);
7950 cmd->cmd_pkt_flags |= FLAG_HEAD;
7951 mptsas_waitq_add(mpt, cmd);
7952 }
7953 return (DDI_FAILURE);
7954 }
7955
7956 /*
7957 * Set correct tag bits.
7958 */
7959 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7960 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7961 FLAG_TAGMASK) >> 12)]) {
7962 case MSG_SIMPLE_QTAG:
7963 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7964 break;
7965 case MSG_HEAD_QTAG:
7966 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7967 break;
7968 case MSG_ORDERED_QTAG:
7969 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7970 break;
7971 default:
7972 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7973 break;
7974 }
7975 } else {
7976 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7977 ptgt->m_t_throttle = 1;
7978 }
7979 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7980 }
7981
7982 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7983 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7984 }
7985
7986 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7987 io_request = (pMpi2SCSIIORequest_t)mem;
7988
7989 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7990 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7991 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7992 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7993 MPI2_FUNCTION_SCSI_IO_REQUEST);
7994
7995 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7996 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7997
7998 io_flags = cmd->cmd_cdblen;
7999 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8000 /*
8001 * setup the Scatter/Gather DMA list for this request
8002 */
8003 if (cmd->cmd_cookiec > 0) {
8004 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8005 } else {
8006 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8007 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8008 MPI2_SGE_FLAGS_END_OF_BUFFER |
8009 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8010 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8011 }
8012
8013 /*
8014 * save ARQ information
8015 */
8016 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8017 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8018 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8019 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8020 cmd->cmd_ext_arqcookie.dmac_address);
8021 } else {
8022 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8023 cmd->cmd_arqcookie.dmac_address);
8024 }
8025
8026 ddi_put32(acc_hdl, &io_request->Control, control);
8027
8028 NDBG31(("starting message=0x%p, with cmd=0x%p",
8029 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8030
8031 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8032
8033 /*
8034 * Build request descriptor and write it to the request desc post reg.
8035 */
8036 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8037 request_desc_high = ptgt->m_devhdl << 16;
8038 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8039
8040 /*
8041 * Start timeout.
8042 */
8043 #ifdef MPTSAS_TEST
8044 /*
8045 * Temporarily set timebase = 0; needed for
8046 * timeout torture test.
8047 */
8048 if (mptsas_test_timeouts) {
8049 ptgt->m_timebase = 0;
8050 }
8051 #endif
8052 n = pkt->pkt_time - ptgt->m_timebase;
8053
8054 if (n == 0) {
8055 (ptgt->m_dups)++;
8056 ptgt->m_timeout = ptgt->m_timebase;
8057 } else if (n > 0) {
8058 ptgt->m_timeout =
8059 ptgt->m_timebase = pkt->pkt_time;
8060 ptgt->m_dups = 1;
8061 } else if (n < 0) {
8062 ptgt->m_timeout = ptgt->m_timebase;
8063 }
8064 #ifdef MPTSAS_TEST
8065 /*
8066 * Set back to a number higher than
8067 * mptsas_scsi_watchdog_tick
8068 * so timeouts will happen in mptsas_watchsubr
8069 */
8070 if (mptsas_test_timeouts) {
8071 ptgt->m_timebase = 60;
8072 }
8073 #endif
8074
8075 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8076 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8077 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8078 return (DDI_FAILURE);
8079 }
8080 return (DDI_SUCCESS);
8081 }
8082
8083 /*
8084 * Select a helper thread to handle current doneq
8085 */
8086 static void
8087 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8088 {
8089 uint64_t t, i;
8090 uint32_t min = 0xffffffff;
8091 mptsas_doneq_thread_list_t *item;
8092
8093 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8094 item = &mpt->m_doneq_thread_id[i];
8095 /*
8096 * If the completed command on help thread[i] less than
8097 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8098 * pick a thread which has least completed command.
8099 */
8100
8101 mutex_enter(&item->mutex);
8102 if (item->len < mpt->m_doneq_thread_threshold) {
8103 t = i;
8104 mutex_exit(&item->mutex);
8105 break;
8106 }
8107 if (item->len < min) {
8108 min = item->len;
8109 t = i;
8110 }
8111 mutex_exit(&item->mutex);
8112 }
8113 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8114 mptsas_doneq_mv(mpt, t);
8115 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8116 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8117 }
8118
8119 /*
8120 * move the current global doneq to the doneq of thead[t]
8121 */
8122 static void
8123 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8124 {
8125 mptsas_cmd_t *cmd;
8126 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8127
8128 ASSERT(mutex_owned(&item->mutex));
8129 while ((cmd = mpt->m_doneq) != NULL) {
8130 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8131 mpt->m_donetail = &mpt->m_doneq;
8132 }
8133 cmd->cmd_linkp = NULL;
8134 *item->donetail = cmd;
8135 item->donetail = &cmd->cmd_linkp;
8136 mpt->m_doneq_len--;
8137 item->len++;
8138 }
8139 }
8140
8141 void
8142 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8143 {
8144 struct scsi_pkt *pkt = CMD2PKT(cmd);
8145
8146 /* Check all acc and dma handles */
8147 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8148 DDI_SUCCESS) ||
8149 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8150 DDI_SUCCESS) ||
8151 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8152 DDI_SUCCESS) ||
8153 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8154 DDI_SUCCESS) ||
8155 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8156 DDI_SUCCESS) ||
8157 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8158 DDI_SUCCESS) ||
8159 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8160 DDI_SUCCESS)) {
8161 ddi_fm_service_impact(mpt->m_dip,
8162 DDI_SERVICE_UNAFFECTED);
8163 ddi_fm_acc_err_clear(mpt->m_config_handle,
8164 DDI_FME_VER0);
8165 pkt->pkt_reason = CMD_TRAN_ERR;
8166 pkt->pkt_statistics = 0;
8167 }
8168 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8169 DDI_SUCCESS) ||
8170 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8171 DDI_SUCCESS) ||
8172 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8173 DDI_SUCCESS) ||
8174 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8175 DDI_SUCCESS) ||
8176 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8177 DDI_SUCCESS)) {
8178 ddi_fm_service_impact(mpt->m_dip,
8179 DDI_SERVICE_UNAFFECTED);
8180 pkt->pkt_reason = CMD_TRAN_ERR;
8181 pkt->pkt_statistics = 0;
8182 }
8183 if (cmd->cmd_dmahandle &&
8184 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8185 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8186 pkt->pkt_reason = CMD_TRAN_ERR;
8187 pkt->pkt_statistics = 0;
8188 }
8189 if ((cmd->cmd_extra_frames &&
8190 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8191 DDI_SUCCESS) ||
8192 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8193 DDI_SUCCESS)))) {
8194 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8195 pkt->pkt_reason = CMD_TRAN_ERR;
8196 pkt->pkt_statistics = 0;
8197 }
8198 if (cmd->cmd_arqhandle &&
8199 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8200 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8201 pkt->pkt_reason = CMD_TRAN_ERR;
8202 pkt->pkt_statistics = 0;
8203 }
8204 if (cmd->cmd_ext_arqhandle &&
8205 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8206 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8207 pkt->pkt_reason = CMD_TRAN_ERR;
8208 pkt->pkt_statistics = 0;
8209 }
8210 }
8211
8212 /*
8213 * These routines manipulate the queue of commands that
8214 * are waiting for their completion routines to be called.
8215 * The queue is usually in FIFO order but on an MP system
8216 * it's possible for the completion routines to get out
8217 * of order. If that's a problem you need to add a global
8218 * mutex around the code that calls the completion routine
8219 * in the interrupt handler.
8220 */
8221 static void
8222 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8223 {
8224 struct scsi_pkt *pkt = CMD2PKT(cmd);
8225
8226 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8227
8228 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8229 cmd->cmd_linkp = NULL;
8230 cmd->cmd_flags |= CFLAG_FINISHED;
8231 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8232
8233 mptsas_fma_check(mpt, cmd);
8234
8235 /*
8236 * only add scsi pkts that have completion routines to
8237 * the doneq. no intr cmds do not have callbacks.
8238 */
8239 if (pkt && (pkt->pkt_comp)) {
8240 *mpt->m_donetail = cmd;
8241 mpt->m_donetail = &cmd->cmd_linkp;
8242 mpt->m_doneq_len++;
8243 }
8244 }
8245
8246 static mptsas_cmd_t *
8247 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8248 {
8249 mptsas_cmd_t *cmd;
8250 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8251
8252 /* pop one off the done queue */
8253 if ((cmd = item->doneq) != NULL) {
8254 /* if the queue is now empty fix the tail pointer */
8255 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8256 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8257 item->donetail = &item->doneq;
8258 }
8259 cmd->cmd_linkp = NULL;
8260 item->len--;
8261 }
8262 return (cmd);
8263 }
8264
8265 static void
8266 mptsas_doneq_empty(mptsas_t *mpt)
8267 {
8268 if (mpt->m_doneq && !mpt->m_in_callback) {
8269 mptsas_cmd_t *cmd, *next;
8270 struct scsi_pkt *pkt;
8271
8272 mpt->m_in_callback = 1;
8273 cmd = mpt->m_doneq;
8274 mpt->m_doneq = NULL;
8275 mpt->m_donetail = &mpt->m_doneq;
8276 mpt->m_doneq_len = 0;
8277
8278 mutex_exit(&mpt->m_mutex);
8279 /*
8280 * run the completion routines of all the
8281 * completed commands
8282 */
8283 while (cmd != NULL) {
8284 next = cmd->cmd_linkp;
8285 cmd->cmd_linkp = NULL;
8286 /* run this command's completion routine */
8287 cmd->cmd_flags |= CFLAG_COMPLETED;
8288 pkt = CMD2PKT(cmd);
8289 mptsas_pkt_comp(pkt, cmd);
8290 cmd = next;
8291 }
8292 mutex_enter(&mpt->m_mutex);
8293 mpt->m_in_callback = 0;
8294 }
8295 }
8296
8297 /*
8298 * These routines manipulate the target's queue of pending requests
8299 */
8300 void
8301 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8302 {
8303 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8304 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8305 cmd->cmd_queued = TRUE;
8306 if (ptgt)
8307 ptgt->m_t_nwait++;
8308 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8309 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8310 mpt->m_waitqtail = &cmd->cmd_linkp;
8311 }
8312 mpt->m_waitq = cmd;
8313 } else {
8314 cmd->cmd_linkp = NULL;
8315 *(mpt->m_waitqtail) = cmd;
8316 mpt->m_waitqtail = &cmd->cmd_linkp;
8317 }
8318 }
8319
8320 static mptsas_cmd_t *
8321 mptsas_waitq_rm(mptsas_t *mpt)
8322 {
8323 mptsas_cmd_t *cmd;
8324 mptsas_target_t *ptgt;
8325 NDBG7(("mptsas_waitq_rm"));
8326
8327 MPTSAS_WAITQ_RM(mpt, cmd);
8328
8329 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8330 if (cmd) {
8331 ptgt = cmd->cmd_tgt_addr;
8332 if (ptgt) {
8333 ptgt->m_t_nwait--;
8334 ASSERT(ptgt->m_t_nwait >= 0);
8335 }
8336 }
8337 return (cmd);
8338 }
8339
8340 /*
8341 * remove specified cmd from the middle of the wait queue.
8342 */
8343 static void
8344 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8345 {
8346 mptsas_cmd_t *prevp = mpt->m_waitq;
8347 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8348
8349 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8350 (void *)mpt, (void *)cmd));
8351 if (ptgt) {
8352 ptgt->m_t_nwait--;
8353 ASSERT(ptgt->m_t_nwait >= 0);
8354 }
8355
8356 if (prevp == cmd) {
8357 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8358 mpt->m_waitqtail = &mpt->m_waitq;
8359
8360 cmd->cmd_linkp = NULL;
8361 cmd->cmd_queued = FALSE;
8362 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8363 (void *)mpt, (void *)cmd));
8364 return;
8365 }
8366
8367 while (prevp != NULL) {
8368 if (prevp->cmd_linkp == cmd) {
8369 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8370 mpt->m_waitqtail = &prevp->cmd_linkp;
8371
8372 cmd->cmd_linkp = NULL;
8373 cmd->cmd_queued = FALSE;
8374 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8375 (void *)mpt, (void *)cmd));
8376 return;
8377 }
8378 prevp = prevp->cmd_linkp;
8379 }
8380 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8381 }
8382
8383 static mptsas_cmd_t *
8384 mptsas_tx_waitq_rm(mptsas_t *mpt)
8385 {
8386 mptsas_cmd_t *cmd;
8387 NDBG7(("mptsas_tx_waitq_rm"));
8388
8389 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8390
8391 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8392
8393 return (cmd);
8394 }
8395
8396 /*
8397 * remove specified cmd from the middle of the tx_waitq.
8398 */
8399 static void
8400 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8401 {
8402 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8403
8404 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8405 (void *)mpt, (void *)cmd));
8406
8407 if (prevp == cmd) {
8408 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8409 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8410
8411 cmd->cmd_linkp = NULL;
8412 cmd->cmd_queued = FALSE;
8413 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8414 (void *)mpt, (void *)cmd));
8415 return;
8416 }
8417
8418 while (prevp != NULL) {
8419 if (prevp->cmd_linkp == cmd) {
8420 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8421 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8422
8423 cmd->cmd_linkp = NULL;
8424 cmd->cmd_queued = FALSE;
8425 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8426 (void *)mpt, (void *)cmd));
8427 return;
8428 }
8429 prevp = prevp->cmd_linkp;
8430 }
8431 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8432 }
8433
8434 /*
8435 * device and bus reset handling
8436 *
8437 * Notes:
8438 * - RESET_ALL: reset the controller
8439 * - RESET_TARGET: reset the target specified in scsi_address
8440 */
8441 static int
8442 mptsas_scsi_reset(struct scsi_address *ap, int level)
8443 {
8444 mptsas_t *mpt = ADDR2MPT(ap);
8445 int rval;
8446 mptsas_tgt_private_t *tgt_private;
8447 mptsas_target_t *ptgt = NULL;
8448
8449 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8450 ptgt = tgt_private->t_private;
8451 if (ptgt == NULL) {
8452 return (FALSE);
8453 }
8454 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8455 level));
8456
8457 mutex_enter(&mpt->m_mutex);
8458 /*
8459 * if we are not in panic set up a reset delay for this target
8460 */
8461 if (!ddi_in_panic()) {
8462 mptsas_setup_bus_reset_delay(mpt);
8463 } else {
8464 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8465 }
8466 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8467 mutex_exit(&mpt->m_mutex);
8468
8469 /*
8470 * The transport layer expect to only see TRUE and
8471 * FALSE. Therefore, we will adjust the return value
8472 * if mptsas_do_scsi_reset returns FAILED.
8473 */
8474 if (rval == FAILED)
8475 rval = FALSE;
8476 return (rval);
8477 }
8478
8479 static int
8480 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8481 {
8482 int rval = FALSE;
8483 uint8_t config, disk;
8484
8485 ASSERT(mutex_owned(&mpt->m_mutex));
8486
8487 if (mptsas_debug_resets) {
8488 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8489 devhdl);
8490 }
8491
8492 /*
8493 * Issue a Target Reset message to the target specified but not to a
8494 * disk making up a raid volume. Just look through the RAID config
8495 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8496 * list, then don't reset this target.
8497 */
8498 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8499 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8500 if (devhdl == mpt->m_raidconfig[config].
8501 m_physdisk_devhdl[disk]) {
8502 return (TRUE);
8503 }
8504 }
8505 }
8506
8507 rval = mptsas_ioc_task_management(mpt,
8508 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8509
8510 mptsas_doneq_empty(mpt);
8511 return (rval);
8512 }
8513
8514 static int
8515 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8516 void (*callback)(caddr_t), caddr_t arg)
8517 {
8518 mptsas_t *mpt = ADDR2MPT(ap);
8519
8520 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8521
8522 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8523 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8524 }
8525
8526 static int
8527 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8528 {
8529 dev_info_t *lun_dip = NULL;
8530
8531 ASSERT(sd != NULL);
8532 ASSERT(name != NULL);
8533 lun_dip = sd->sd_dev;
8534 ASSERT(lun_dip != NULL);
8535
8536 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8537 return (1);
8538 } else {
8539 return (0);
8540 }
8541 }
8542
8543 static int
8544 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8545 {
8546 return (mptsas_get_name(sd, name, len));
8547 }
8548
8549 void
8550 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8551 {
8552
8553 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8554
8555 /*
8556 * if the bus is draining/quiesced, no changes to the throttles
8557 * are allowed. Not allowing change of throttles during draining
8558 * limits error recovery but will reduce draining time
8559 *
8560 * all throttles should have been set to HOLD_THROTTLE
8561 */
8562 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8563 return;
8564 }
8565
8566 if (what == HOLD_THROTTLE) {
8567 ptgt->m_t_throttle = HOLD_THROTTLE;
8568 } else if (ptgt->m_reset_delay == 0) {
8569 ptgt->m_t_throttle = what;
8570 }
8571 }
8572
8573 /*
8574 * Clean up from a device reset.
8575 * For the case of target reset, this function clears the waitq of all
8576 * commands for a particular target. For the case of abort task set, this
8577 * function clears the waitq of all commonds for a particular target/lun.
8578 */
8579 static void
8580 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8581 {
8582 mptsas_slots_t *slots = mpt->m_active;
8583 mptsas_cmd_t *cmd, *next_cmd;
8584 int slot;
8585 uchar_t reason;
8586 uint_t stat;
8587
8588 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8589
8590 /*
8591 * Make sure the I/O Controller has flushed all cmds
8592 * that are associated with this target for a target reset
8593 * and target/lun for abort task set.
8594 * Account for TM requests, which use the last SMID.
8595 */
8596 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8597 if ((cmd = slots->m_slot[slot]) == NULL)
8598 continue;
8599 reason = CMD_RESET;
8600 stat = STAT_DEV_RESET;
8601 switch (tasktype) {
8602 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8603 if (Tgt(cmd) == target) {
8604 NDBG25(("mptsas_flush_target discovered non-"
8605 "NULL cmd in slot %d, tasktype 0x%x", slot,
8606 tasktype));
8607 mptsas_dump_cmd(mpt, cmd);
8608 mptsas_remove_cmd(mpt, cmd);
8609 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8610 mptsas_doneq_add(mpt, cmd);
8611 }
8612 break;
8613 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8614 reason = CMD_ABORTED;
8615 stat = STAT_ABORTED;
8616 /*FALLTHROUGH*/
8617 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8618 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8619
8620 NDBG25(("mptsas_flush_target discovered non-"
8621 "NULL cmd in slot %d, tasktype 0x%x", slot,
8622 tasktype));
8623 mptsas_dump_cmd(mpt, cmd);
8624 mptsas_remove_cmd(mpt, cmd);
8625 mptsas_set_pkt_reason(mpt, cmd, reason,
8626 stat);
8627 mptsas_doneq_add(mpt, cmd);
8628 }
8629 break;
8630 default:
8631 break;
8632 }
8633 }
8634
8635 /*
8636 * Flush the waitq and tx_waitq of this target's cmds
8637 */
8638 cmd = mpt->m_waitq;
8639
8640 reason = CMD_RESET;
8641 stat = STAT_DEV_RESET;
8642
8643 switch (tasktype) {
8644 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8645 while (cmd != NULL) {
8646 next_cmd = cmd->cmd_linkp;
8647 if (Tgt(cmd) == target) {
8648 mptsas_waitq_delete(mpt, cmd);
8649 mptsas_set_pkt_reason(mpt, cmd,
8650 reason, stat);
8651 mptsas_doneq_add(mpt, cmd);
8652 }
8653 cmd = next_cmd;
8654 }
8655 mutex_enter(&mpt->m_tx_waitq_mutex);
8656 cmd = mpt->m_tx_waitq;
8657 while (cmd != NULL) {
8658 next_cmd = cmd->cmd_linkp;
8659 if (Tgt(cmd) == target) {
8660 mptsas_tx_waitq_delete(mpt, cmd);
8661 mutex_exit(&mpt->m_tx_waitq_mutex);
8662 mptsas_set_pkt_reason(mpt, cmd,
8663 reason, stat);
8664 mptsas_doneq_add(mpt, cmd);
8665 mutex_enter(&mpt->m_tx_waitq_mutex);
8666 }
8667 cmd = next_cmd;
8668 }
8669 mutex_exit(&mpt->m_tx_waitq_mutex);
8670 break;
8671 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8672 reason = CMD_ABORTED;
8673 stat = STAT_ABORTED;
8674 /*FALLTHROUGH*/
8675 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8676 while (cmd != NULL) {
8677 next_cmd = cmd->cmd_linkp;
8678 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8679 mptsas_waitq_delete(mpt, cmd);
8680 mptsas_set_pkt_reason(mpt, cmd,
8681 reason, stat);
8682 mptsas_doneq_add(mpt, cmd);
8683 }
8684 cmd = next_cmd;
8685 }
8686 mutex_enter(&mpt->m_tx_waitq_mutex);
8687 cmd = mpt->m_tx_waitq;
8688 while (cmd != NULL) {
8689 next_cmd = cmd->cmd_linkp;
8690 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8691 mptsas_tx_waitq_delete(mpt, cmd);
8692 mutex_exit(&mpt->m_tx_waitq_mutex);
8693 mptsas_set_pkt_reason(mpt, cmd,
8694 reason, stat);
8695 mptsas_doneq_add(mpt, cmd);
8696 mutex_enter(&mpt->m_tx_waitq_mutex);
8697 }
8698 cmd = next_cmd;
8699 }
8700 mutex_exit(&mpt->m_tx_waitq_mutex);
8701 break;
8702 default:
8703 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8704 tasktype);
8705 break;
8706 }
8707 }
8708
8709 /*
8710 * Clean up hba state, abort all outstanding command and commands in waitq
8711 * reset timeout of all targets.
8712 */
8713 static void
8714 mptsas_flush_hba(mptsas_t *mpt)
8715 {
8716 mptsas_slots_t *slots = mpt->m_active;
8717 mptsas_cmd_t *cmd;
8718 int slot;
8719
8720 NDBG25(("mptsas_flush_hba"));
8721
8722 /*
8723 * The I/O Controller should have already sent back
8724 * all commands via the scsi I/O reply frame. Make
8725 * sure all commands have been flushed.
8726 * Account for TM request, which use the last SMID.
8727 */
8728 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8729 if ((cmd = slots->m_slot[slot]) == NULL)
8730 continue;
8731
8732 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8733 /*
8734 * Need to make sure to tell everyone that might be
8735 * waiting on this command that it's going to fail. If
8736 * we get here, this command will never timeout because
8737 * the active command table is going to be re-allocated,
8738 * so there will be nothing to check against a time out.
8739 * Instead, mark the command as failed due to reset.
8740 */
8741 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8742 STAT_BUS_RESET);
8743 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8744 (cmd->cmd_flags & CFLAG_CONFIG) ||
8745 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8746 cmd->cmd_flags |= CFLAG_FINISHED;
8747 cv_broadcast(&mpt->m_passthru_cv);
8748 cv_broadcast(&mpt->m_config_cv);
8749 cv_broadcast(&mpt->m_fw_diag_cv);
8750 }
8751 continue;
8752 }
8753
8754 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8755 slot));
8756 mptsas_dump_cmd(mpt, cmd);
8757
8758 mptsas_remove_cmd(mpt, cmd);
8759 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8760 mptsas_doneq_add(mpt, cmd);
8761 }
8762
8763 /*
8764 * Flush the waitq.
8765 */
8766 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8767 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8768 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8769 (cmd->cmd_flags & CFLAG_CONFIG) ||
8770 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8771 cmd->cmd_flags |= CFLAG_FINISHED;
8772 cv_broadcast(&mpt->m_passthru_cv);
8773 cv_broadcast(&mpt->m_config_cv);
8774 cv_broadcast(&mpt->m_fw_diag_cv);
8775 } else {
8776 mptsas_doneq_add(mpt, cmd);
8777 }
8778 }
8779
8780 /*
8781 * Flush the tx_waitq
8782 */
8783 mutex_enter(&mpt->m_tx_waitq_mutex);
8784 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8785 mutex_exit(&mpt->m_tx_waitq_mutex);
8786 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8787 mptsas_doneq_add(mpt, cmd);
8788 mutex_enter(&mpt->m_tx_waitq_mutex);
8789 }
8790 mutex_exit(&mpt->m_tx_waitq_mutex);
8791
8792 /*
8793 * Drain the taskqs prior to reallocating resources.
8794 */
8795 mutex_exit(&mpt->m_mutex);
8796 ddi_taskq_wait(mpt->m_event_taskq);
8797 ddi_taskq_wait(mpt->m_dr_taskq);
8798 mutex_enter(&mpt->m_mutex);
8799 }
8800
8801 /*
8802 * set pkt_reason and OR in pkt_statistics flag
8803 */
8804 static void
8805 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8806 uint_t stat)
8807 {
8808 #ifndef __lock_lint
8809 _NOTE(ARGUNUSED(mpt))
8810 #endif
8811
8812 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8813 (void *)cmd, reason, stat));
8814
8815 if (cmd) {
8816 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8817 cmd->cmd_pkt->pkt_reason = reason;
8818 }
8819 cmd->cmd_pkt->pkt_statistics |= stat;
8820 }
8821 }
8822
8823 static void
8824 mptsas_start_watch_reset_delay()
8825 {
8826 NDBG22(("mptsas_start_watch_reset_delay"));
8827
8828 mutex_enter(&mptsas_global_mutex);
8829 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8830 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8831 drv_usectohz((clock_t)
8832 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8833 ASSERT(mptsas_reset_watch != NULL);
8834 }
8835 mutex_exit(&mptsas_global_mutex);
8836 }
8837
8838 static void
8839 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8840 {
8841 mptsas_target_t *ptgt = NULL;
8842
8843 ASSERT(MUTEX_HELD(&mpt->m_mutex));
8844
8845 NDBG22(("mptsas_setup_bus_reset_delay"));
8846 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8847 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8848 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8849 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8850 }
8851
8852 mptsas_start_watch_reset_delay();
8853 }
8854
8855 /*
8856 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8857 * mpt instance for active reset delays
8858 */
8859 static void
8860 mptsas_watch_reset_delay(void *arg)
8861 {
8862 #ifndef __lock_lint
8863 _NOTE(ARGUNUSED(arg))
8864 #endif
8865
8866 mptsas_t *mpt;
8867 int not_done = 0;
8868
8869 NDBG22(("mptsas_watch_reset_delay"));
8870
8871 mutex_enter(&mptsas_global_mutex);
8872 mptsas_reset_watch = 0;
8873 mutex_exit(&mptsas_global_mutex);
8874 rw_enter(&mptsas_global_rwlock, RW_READER);
8875 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8876 if (mpt->m_tran == 0) {
8877 continue;
8878 }
8879 mutex_enter(&mpt->m_mutex);
8880 not_done += mptsas_watch_reset_delay_subr(mpt);
8881 mutex_exit(&mpt->m_mutex);
8882 }
8883 rw_exit(&mptsas_global_rwlock);
8884
8885 if (not_done) {
8886 mptsas_start_watch_reset_delay();
8887 }
8888 }
8889
8890 static int
8891 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8892 {
8893 int done = 0;
8894 int restart = 0;
8895 mptsas_target_t *ptgt = NULL;
8896
8897 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8898
8899 ASSERT(mutex_owned(&mpt->m_mutex));
8900
8901 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8902 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8903 if (ptgt->m_reset_delay != 0) {
8904 ptgt->m_reset_delay -=
8905 MPTSAS_WATCH_RESET_DELAY_TICK;
8906 if (ptgt->m_reset_delay <= 0) {
8907 ptgt->m_reset_delay = 0;
8908 mptsas_set_throttle(mpt, ptgt,
8909 MAX_THROTTLE);
8910 restart++;
8911 } else {
8912 done = -1;
8913 }
8914 }
8915 }
8916
8917 if (restart > 0) {
8918 mptsas_restart_hba(mpt);
8919 }
8920 return (done);
8921 }
8922
8923 #ifdef MPTSAS_TEST
8924 static void
8925 mptsas_test_reset(mptsas_t *mpt, int target)
8926 {
8927 mptsas_target_t *ptgt = NULL;
8928
8929 if (mptsas_rtest == target) {
8930 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8931 mptsas_rtest = -1;
8932 }
8933 if (mptsas_rtest == -1) {
8934 NDBG22(("mptsas_test_reset success"));
8935 }
8936 }
8937 }
8938 #endif
8939
8940 /*
8941 * abort handling:
8942 *
8943 * Notes:
8944 * - if pkt is not NULL, abort just that command
8945 * - if pkt is NULL, abort all outstanding commands for target
8946 */
8947 static int
8948 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8949 {
8950 mptsas_t *mpt = ADDR2MPT(ap);
8951 int rval;
8952 mptsas_tgt_private_t *tgt_private;
8953 int target, lun;
8954
8955 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8956 tran_tgt_private;
8957 ASSERT(tgt_private != NULL);
8958 target = tgt_private->t_private->m_devhdl;
8959 lun = tgt_private->t_lun;
8960
8961 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8962
8963 mutex_enter(&mpt->m_mutex);
8964 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8965 mutex_exit(&mpt->m_mutex);
8966 return (rval);
8967 }
8968
8969 static int
8970 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8971 {
8972 mptsas_cmd_t *sp = NULL;
8973 mptsas_slots_t *slots = mpt->m_active;
8974 int rval = FALSE;
8975
8976 ASSERT(mutex_owned(&mpt->m_mutex));
8977
8978 /*
8979 * Abort the command pkt on the target/lun in ap. If pkt is
8980 * NULL, abort all outstanding commands on that target/lun.
8981 * If you can abort them, return 1, else return 0.
8982 * Each packet that's aborted should be sent back to the target
8983 * driver through the callback routine, with pkt_reason set to
8984 * CMD_ABORTED.
8985 *
8986 * abort cmd pkt on HBA hardware; clean out of outstanding
8987 * command lists, etc.
8988 */
8989 if (pkt != NULL) {
8990 /* abort the specified packet */
8991 sp = PKT2CMD(pkt);
8992
8993 if (sp->cmd_queued) {
8994 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8995 (void *)sp));
8996 mptsas_waitq_delete(mpt, sp);
8997 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8998 STAT_ABORTED);
8999 mptsas_doneq_add(mpt, sp);
9000 rval = TRUE;
9001 goto done;
9002 }
9003
9004 /*
9005 * Have mpt firmware abort this command
9006 */
9007
9008 if (slots->m_slot[sp->cmd_slot] != NULL) {
9009 rval = mptsas_ioc_task_management(mpt,
9010 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9011 lun, NULL, 0, 0);
9012
9013 /*
9014 * The transport layer expects only TRUE and FALSE.
9015 * Therefore, if mptsas_ioc_task_management returns
9016 * FAILED we will return FALSE.
9017 */
9018 if (rval == FAILED)
9019 rval = FALSE;
9020 goto done;
9021 }
9022 }
9023
9024 /*
9025 * If pkt is NULL then abort task set
9026 */
9027 rval = mptsas_ioc_task_management(mpt,
9028 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9029
9030 /*
9031 * The transport layer expects only TRUE and FALSE.
9032 * Therefore, if mptsas_ioc_task_management returns
9033 * FAILED we will return FALSE.
9034 */
9035 if (rval == FAILED)
9036 rval = FALSE;
9037
9038 #ifdef MPTSAS_TEST
9039 if (rval && mptsas_test_stop) {
9040 debug_enter("mptsas_do_scsi_abort");
9041 }
9042 #endif
9043
9044 done:
9045 mptsas_doneq_empty(mpt);
9046 return (rval);
9047 }
9048
9049 /*
9050 * capability handling:
9051 * (*tran_getcap). Get the capability named, and return its value.
9052 */
9053 static int
9054 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9055 {
9056 mptsas_t *mpt = ADDR2MPT(ap);
9057 int ckey;
9058 int rval = FALSE;
9059
9060 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9061 ap->a_target, cap, tgtonly));
9062
9063 mutex_enter(&mpt->m_mutex);
9064
9065 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9066 mutex_exit(&mpt->m_mutex);
9067 return (UNDEFINED);
9068 }
9069
9070 switch (ckey) {
9071 case SCSI_CAP_DMA_MAX:
9072 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9073 break;
9074 case SCSI_CAP_ARQ:
9075 rval = TRUE;
9076 break;
9077 case SCSI_CAP_MSG_OUT:
9078 case SCSI_CAP_PARITY:
9079 case SCSI_CAP_UNTAGGED_QING:
9080 rval = TRUE;
9081 break;
9082 case SCSI_CAP_TAGGED_QING:
9083 rval = TRUE;
9084 break;
9085 case SCSI_CAP_RESET_NOTIFICATION:
9086 rval = TRUE;
9087 break;
9088 case SCSI_CAP_LINKED_CMDS:
9089 rval = FALSE;
9090 break;
9091 case SCSI_CAP_QFULL_RETRIES:
9092 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9093 tran_tgt_private))->t_private->m_qfull_retries;
9094 break;
9095 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9096 rval = drv_hztousec(((mptsas_tgt_private_t *)
9097 (ap->a_hba_tran->tran_tgt_private))->
9098 t_private->m_qfull_retry_interval) / 1000;
9099 break;
9100 case SCSI_CAP_CDB_LEN:
9101 rval = CDB_GROUP4;
9102 break;
9103 case SCSI_CAP_INTERCONNECT_TYPE:
9104 rval = INTERCONNECT_SAS;
9105 break;
9106 case SCSI_CAP_TRAN_LAYER_RETRIES:
9107 if (mpt->m_ioc_capabilities &
9108 MPI2_IOCFACTS_CAPABILITY_TLR)
9109 rval = TRUE;
9110 else
9111 rval = FALSE;
9112 break;
9113 default:
9114 rval = UNDEFINED;
9115 break;
9116 }
9117
9118 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9119
9120 mutex_exit(&mpt->m_mutex);
9121 return (rval);
9122 }
9123
9124 /*
9125 * (*tran_setcap). Set the capability named to the value given.
9126 */
9127 static int
9128 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9129 {
9130 mptsas_t *mpt = ADDR2MPT(ap);
9131 int ckey;
9132 int rval = FALSE;
9133
9134 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9135 ap->a_target, cap, value, tgtonly));
9136
9137 if (!tgtonly) {
9138 return (rval);
9139 }
9140
9141 mutex_enter(&mpt->m_mutex);
9142
9143 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9144 mutex_exit(&mpt->m_mutex);
9145 return (UNDEFINED);
9146 }
9147
9148 switch (ckey) {
9149 case SCSI_CAP_DMA_MAX:
9150 case SCSI_CAP_MSG_OUT:
9151 case SCSI_CAP_PARITY:
9152 case SCSI_CAP_INITIATOR_ID:
9153 case SCSI_CAP_LINKED_CMDS:
9154 case SCSI_CAP_UNTAGGED_QING:
9155 case SCSI_CAP_RESET_NOTIFICATION:
9156 /*
9157 * None of these are settable via
9158 * the capability interface.
9159 */
9160 break;
9161 case SCSI_CAP_ARQ:
9162 /*
9163 * We cannot turn off arq so return false if asked to
9164 */
9165 if (value) {
9166 rval = TRUE;
9167 } else {
9168 rval = FALSE;
9169 }
9170 break;
9171 case SCSI_CAP_TAGGED_QING:
9172 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9173 (ap->a_hba_tran->tran_tgt_private))->t_private,
9174 MAX_THROTTLE);
9175 rval = TRUE;
9176 break;
9177 case SCSI_CAP_QFULL_RETRIES:
9178 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9179 t_private->m_qfull_retries = (uchar_t)value;
9180 rval = TRUE;
9181 break;
9182 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9183 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9184 t_private->m_qfull_retry_interval =
9185 drv_usectohz(value * 1000);
9186 rval = TRUE;
9187 break;
9188 default:
9189 rval = UNDEFINED;
9190 break;
9191 }
9192 mutex_exit(&mpt->m_mutex);
9193 return (rval);
9194 }
9195
9196 /*
9197 * Utility routine for mptsas_ifsetcap/ifgetcap
9198 */
9199 /*ARGSUSED*/
9200 static int
9201 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9202 {
9203 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9204
9205 if (!cap)
9206 return (FALSE);
9207
9208 *cidxp = scsi_hba_lookup_capstr(cap);
9209 return (TRUE);
9210 }
9211
9212 static int
9213 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9214 {
9215 mptsas_slots_t *old_active = mpt->m_active;
9216 mptsas_slots_t *new_active;
9217 size_t size;
9218
9219 /*
9220 * if there are active commands, then we cannot
9221 * change size of active slots array.
9222 */
9223 ASSERT(mpt->m_ncmds == 0);
9224
9225 size = MPTSAS_SLOTS_SIZE(mpt);
9226 new_active = kmem_zalloc(size, flag);
9227 if (new_active == NULL) {
9228 NDBG1(("new active alloc failed"));
9229 return (-1);
9230 }
9231 /*
9232 * Since SMID 0 is reserved and the TM slot is reserved, the
9233 * number of slots that can be used at any one time is
9234 * m_max_requests - 2.
9235 */
9236 new_active->m_n_normal = (mpt->m_max_requests - 2);
9237 new_active->m_size = size;
9238 new_active->m_rotor = 1;
9239 if (old_active)
9240 mptsas_free_active_slots(mpt);
9241 mpt->m_active = new_active;
9242
9243 return (0);
9244 }
9245
9246 static void
9247 mptsas_free_active_slots(mptsas_t *mpt)
9248 {
9249 mptsas_slots_t *active = mpt->m_active;
9250 size_t size;
9251
9252 if (active == NULL)
9253 return;
9254 size = active->m_size;
9255 kmem_free(active, size);
9256 mpt->m_active = NULL;
9257 }
9258
9259 /*
9260 * Error logging, printing, and debug print routines.
9261 */
9262 static char *mptsas_label = "mpt_sas";
9263
9264 /*PRINTFLIKE3*/
9265 void
9266 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9267 {
9268 dev_info_t *dev;
9269 va_list ap;
9270
9271 if (mpt) {
9272 dev = mpt->m_dip;
9273 } else {
9274 dev = 0;
9275 }
9276
9277 mutex_enter(&mptsas_log_mutex);
9278
9279 va_start(ap, fmt);
9280 (void) vsprintf(mptsas_log_buf, fmt, ap);
9281 va_end(ap);
9282
9283 if (level == CE_CONT) {
9284 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9285 } else {
9286 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9287 }
9288
9289 mutex_exit(&mptsas_log_mutex);
9290 }
9291
9292 #ifdef MPTSAS_DEBUG
9293 /*PRINTFLIKE1*/
9294 void
9295 mptsas_printf(char *fmt, ...)
9296 {
9297 dev_info_t *dev = 0;
9298 va_list ap;
9299
9300 mutex_enter(&mptsas_log_mutex);
9301
9302 va_start(ap, fmt);
9303 (void) vsprintf(mptsas_log_buf, fmt, ap);
9304 va_end(ap);
9305
9306 #ifdef PROM_PRINTF
9307 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9308 #else
9309 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9310 #endif
9311 mutex_exit(&mptsas_log_mutex);
9312 }
9313 #endif
9314
9315 /*
9316 * timeout handling
9317 */
9318 static void
9319 mptsas_watch(void *arg)
9320 {
9321 #ifndef __lock_lint
9322 _NOTE(ARGUNUSED(arg))
9323 #endif
9324
9325 mptsas_t *mpt;
9326 uint32_t doorbell;
9327
9328 NDBG30(("mptsas_watch"));
9329
9330 rw_enter(&mptsas_global_rwlock, RW_READER);
9331 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9332
9333 mutex_enter(&mpt->m_mutex);
9334
9335 /* Skip device if not powered on */
9336 if (mpt->m_options & MPTSAS_OPT_PM) {
9337 if (mpt->m_power_level == PM_LEVEL_D0) {
9338 (void) pm_busy_component(mpt->m_dip, 0);
9339 mpt->m_busy = 1;
9340 } else {
9341 mutex_exit(&mpt->m_mutex);
9342 continue;
9343 }
9344 }
9345
9346 /*
9347 * Check if controller is in a FAULT state. If so, reset it.
9348 */
9349 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9350 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9351 doorbell &= MPI2_DOORBELL_DATA_MASK;
9352 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9353 "code: %04x", doorbell);
9354 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9355 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9356 mptsas_log(mpt, CE_WARN, "Reset failed"
9357 "after fault was detected");
9358 }
9359 }
9360
9361 /*
9362 * For now, always call mptsas_watchsubr.
9363 */
9364 mptsas_watchsubr(mpt);
9365
9366 if (mpt->m_options & MPTSAS_OPT_PM) {
9367 mpt->m_busy = 0;
9368 (void) pm_idle_component(mpt->m_dip, 0);
9369 }
9370
9371 mutex_exit(&mpt->m_mutex);
9372 }
9373 rw_exit(&mptsas_global_rwlock);
9374
9375 mutex_enter(&mptsas_global_mutex);
9376 if (mptsas_timeouts_enabled)
9377 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9378 mutex_exit(&mptsas_global_mutex);
9379 }
9380
9381 static void
9382 mptsas_watchsubr(mptsas_t *mpt)
9383 {
9384 int i;
9385 mptsas_cmd_t *cmd;
9386 mptsas_target_t *ptgt = NULL;
9387
9388 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9389
9390 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9391
9392 #ifdef MPTSAS_TEST
9393 if (mptsas_enable_untagged) {
9394 mptsas_test_untagged++;
9395 }
9396 #endif
9397
9398 /*
9399 * Check for commands stuck in active slot
9400 * Account for TM requests, which use the last SMID.
9401 */
9402 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9403 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9404 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9405 cmd->cmd_active_timeout -=
9406 mptsas_scsi_watchdog_tick;
9407 if (cmd->cmd_active_timeout <= 0) {
9408 /*
9409 * There seems to be a command stuck
9410 * in the active slot. Drain throttle.
9411 */
9412 mptsas_set_throttle(mpt,
9413 cmd->cmd_tgt_addr,
9414 DRAIN_THROTTLE);
9415 }
9416 }
9417 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9418 (cmd->cmd_flags & CFLAG_CONFIG) ||
9419 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9420 cmd->cmd_active_timeout -=
9421 mptsas_scsi_watchdog_tick;
9422 if (cmd->cmd_active_timeout <= 0) {
9423 /*
9424 * passthrough command timeout
9425 */
9426 cmd->cmd_flags |= (CFLAG_FINISHED |
9427 CFLAG_TIMEOUT);
9428 cv_broadcast(&mpt->m_passthru_cv);
9429 cv_broadcast(&mpt->m_config_cv);
9430 cv_broadcast(&mpt->m_fw_diag_cv);
9431 }
9432 }
9433 }
9434 }
9435
9436 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9437 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9438 /*
9439 * If we were draining due to a qfull condition,
9440 * go back to full throttle.
9441 */
9442 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9443 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9444 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9445 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9446 mptsas_restart_hba(mpt);
9447 }
9448
9449 if ((ptgt->m_t_ncmds > 0) &&
9450 (ptgt->m_timebase)) {
9451
9452 if (ptgt->m_timebase <=
9453 mptsas_scsi_watchdog_tick) {
9454 ptgt->m_timebase +=
9455 mptsas_scsi_watchdog_tick;
9456 continue;
9457 }
9458
9459 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9460
9461 if (ptgt->m_timeout < 0) {
9462 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9463 continue;
9464 }
9465
9466 if ((ptgt->m_timeout) <=
9467 mptsas_scsi_watchdog_tick) {
9468 NDBG23(("pending timeout"));
9469 mptsas_set_throttle(mpt, ptgt,
9470 DRAIN_THROTTLE);
9471 }
9472 }
9473 }
9474 }
9475
9476 /*
9477 * timeout recovery
9478 */
9479 static void
9480 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9481 {
9482
9483 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9484 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9485 "Target %d", devhdl);
9486
9487 /*
9488 * If the current target is not the target passed in,
9489 * try to reset that target.
9490 */
9491 NDBG29(("mptsas_cmd_timeout: device reset"));
9492 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9493 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9494 "recovery failed!", devhdl);
9495 }
9496 }
9497
9498 /*
9499 * Device / Hotplug control
9500 */
9501 static int
9502 mptsas_scsi_quiesce(dev_info_t *dip)
9503 {
9504 mptsas_t *mpt;
9505 scsi_hba_tran_t *tran;
9506
9507 tran = ddi_get_driver_private(dip);
9508 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9509 return (-1);
9510
9511 return (mptsas_quiesce_bus(mpt));
9512 }
9513
9514 static int
9515 mptsas_scsi_unquiesce(dev_info_t *dip)
9516 {
9517 mptsas_t *mpt;
9518 scsi_hba_tran_t *tran;
9519
9520 tran = ddi_get_driver_private(dip);
9521 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9522 return (-1);
9523
9524 return (mptsas_unquiesce_bus(mpt));
9525 }
9526
9527 static int
9528 mptsas_quiesce_bus(mptsas_t *mpt)
9529 {
9530 mptsas_target_t *ptgt = NULL;
9531
9532 NDBG28(("mptsas_quiesce_bus"));
9533 mutex_enter(&mpt->m_mutex);
9534
9535 /* Set all the throttles to zero */
9536 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9537 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9538 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9539 }
9540
9541 /* If there are any outstanding commands in the queue */
9542 if (mpt->m_ncmds) {
9543 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9544 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9545 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9546 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9547 /*
9548 * Quiesce has been interrupted
9549 */
9550 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9551 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9552 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9553 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9554 }
9555 mptsas_restart_hba(mpt);
9556 if (mpt->m_quiesce_timeid != 0) {
9557 timeout_id_t tid = mpt->m_quiesce_timeid;
9558 mpt->m_quiesce_timeid = 0;
9559 mutex_exit(&mpt->m_mutex);
9560 (void) untimeout(tid);
9561 return (-1);
9562 }
9563 mutex_exit(&mpt->m_mutex);
9564 return (-1);
9565 } else {
9566 /* Bus has been quiesced */
9567 ASSERT(mpt->m_quiesce_timeid == 0);
9568 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9569 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9570 mutex_exit(&mpt->m_mutex);
9571 return (0);
9572 }
9573 }
9574 /* Bus was not busy - QUIESCED */
9575 mutex_exit(&mpt->m_mutex);
9576
9577 return (0);
9578 }
9579
9580 static int
9581 mptsas_unquiesce_bus(mptsas_t *mpt)
9582 {
9583 mptsas_target_t *ptgt = NULL;
9584
9585 NDBG28(("mptsas_unquiesce_bus"));
9586 mutex_enter(&mpt->m_mutex);
9587 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9588 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9589 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9590 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9591 }
9592 mptsas_restart_hba(mpt);
9593 mutex_exit(&mpt->m_mutex);
9594 return (0);
9595 }
9596
9597 static void
9598 mptsas_ncmds_checkdrain(void *arg)
9599 {
9600 mptsas_t *mpt = arg;
9601 mptsas_target_t *ptgt = NULL;
9602
9603 mutex_enter(&mpt->m_mutex);
9604 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9605 mpt->m_quiesce_timeid = 0;
9606 if (mpt->m_ncmds == 0) {
9607 /* Command queue has been drained */
9608 cv_signal(&mpt->m_cv);
9609 } else {
9610 /*
9611 * The throttle may have been reset because
9612 * of a SCSI bus reset
9613 */
9614 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9615 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9616 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9617 }
9618
9619 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9620 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9621 drv_usectohz(1000000)));
9622 }
9623 }
9624 mutex_exit(&mpt->m_mutex);
9625 }
9626
9627 /*ARGSUSED*/
9628 static void
9629 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9630 {
9631 int i;
9632 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9633 char buf[128];
9634
9635 buf[0] = '\0';
9636 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9637 Tgt(cmd), Lun(cmd)));
9638 (void) sprintf(&buf[0], "\tcdb=[");
9639 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9640 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9641 }
9642 (void) sprintf(&buf[strlen(buf)], " ]");
9643 NDBG25(("?%s\n", buf));
9644 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9645 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9646 cmd->cmd_pkt->pkt_state));
9647 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9648 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9649 }
9650
9651 static void
9652 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9653 {
9654 caddr_t memp;
9655 pMPI2RequestHeader_t request_hdrp;
9656 struct scsi_pkt *pkt = cmd->cmd_pkt;
9657 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9658 uint32_t request_size, data_size, dataout_size;
9659 uint32_t direction;
9660 ddi_dma_cookie_t data_cookie;
9661 ddi_dma_cookie_t dataout_cookie;
9662 uint32_t request_desc_low, request_desc_high = 0;
9663 uint32_t i, sense_bufp;
9664 uint8_t desc_type;
9665 uint8_t *request, function;
9666 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9667 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9668
9669 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9670
9671 request = pt->request;
9672 direction = pt->direction;
9673 request_size = pt->request_size;
9674 data_size = pt->data_size;
9675 dataout_size = pt->dataout_size;
9676 data_cookie = pt->data_cookie;
9677 dataout_cookie = pt->dataout_cookie;
9678
9679 /*
9680 * Store the passthrough message in memory location
9681 * corresponding to our slot number
9682 */
9683 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9684 request_hdrp = (pMPI2RequestHeader_t)memp;
9685 bzero(memp, mpt->m_req_frame_size);
9686
9687 for (i = 0; i < request_size; i++) {
9688 bcopy(request + i, memp + i, 1);
9689 }
9690
9691 if (data_size || dataout_size) {
9692 pMpi2SGESimple64_t sgep;
9693 uint32_t sge_flags;
9694
9695 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9696 request_size);
9697 if (dataout_size) {
9698
9699 sge_flags = dataout_size |
9700 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9701 MPI2_SGE_FLAGS_END_OF_BUFFER |
9702 MPI2_SGE_FLAGS_HOST_TO_IOC |
9703 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9704 MPI2_SGE_FLAGS_SHIFT);
9705 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9706 ddi_put32(acc_hdl, &sgep->Address.Low,
9707 (uint32_t)(dataout_cookie.dmac_laddress &
9708 0xffffffffull));
9709 ddi_put32(acc_hdl, &sgep->Address.High,
9710 (uint32_t)(dataout_cookie.dmac_laddress
9711 >> 32));
9712 sgep++;
9713 }
9714 sge_flags = data_size;
9715 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9716 MPI2_SGE_FLAGS_LAST_ELEMENT |
9717 MPI2_SGE_FLAGS_END_OF_BUFFER |
9718 MPI2_SGE_FLAGS_END_OF_LIST |
9719 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9720 MPI2_SGE_FLAGS_SHIFT);
9721 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9722 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9723 MPI2_SGE_FLAGS_SHIFT);
9724 } else {
9725 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9726 MPI2_SGE_FLAGS_SHIFT);
9727 }
9728 ddi_put32(acc_hdl, &sgep->FlagsLength,
9729 sge_flags);
9730 ddi_put32(acc_hdl, &sgep->Address.Low,
9731 (uint32_t)(data_cookie.dmac_laddress &
9732 0xffffffffull));
9733 ddi_put32(acc_hdl, &sgep->Address.High,
9734 (uint32_t)(data_cookie.dmac_laddress >> 32));
9735 }
9736
9737 function = request_hdrp->Function;
9738 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9739 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9740 pMpi2SCSIIORequest_t scsi_io_req;
9741
9742 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9743 /*
9744 * Put SGE for data and data_out buffer at the end of
9745 * scsi_io_request message header.(64 bytes in total)
9746 * Following above SGEs, the residual space will be
9747 * used by sense data.
9748 */
9749 ddi_put8(acc_hdl,
9750 &scsi_io_req->SenseBufferLength,
9751 (uint8_t)(request_size - 64));
9752
9753 sense_bufp = mpt->m_req_frame_dma_addr +
9754 (mpt->m_req_frame_size * cmd->cmd_slot);
9755 sense_bufp += 64;
9756 ddi_put32(acc_hdl,
9757 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9758
9759 /*
9760 * Set SGLOffset0 value
9761 */
9762 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9763 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9764
9765 /*
9766 * Setup descriptor info. RAID passthrough must use the
9767 * default request descriptor which is already set, so if this
9768 * is a SCSI IO request, change the descriptor to SCSI IO.
9769 */
9770 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9771 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9772 request_desc_high = (ddi_get16(acc_hdl,
9773 &scsi_io_req->DevHandle) << 16);
9774 }
9775 }
9776
9777 /*
9778 * We must wait till the message has been completed before
9779 * beginning the next message so we wait for this one to
9780 * finish.
9781 */
9782 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9783 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9784 cmd->cmd_rfm = NULL;
9785 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9786 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9787 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9788 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9789 }
9790 }
9791
9792
9793
9794 static int
9795 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9796 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9797 uint32_t data_size, uint32_t direction, uint8_t *dataout,
9798 uint32_t dataout_size, short timeout, int mode)
9799 {
9800 mptsas_pt_request_t pt;
9801 mptsas_dma_alloc_state_t data_dma_state;
9802 mptsas_dma_alloc_state_t dataout_dma_state;
9803 caddr_t memp;
9804 mptsas_cmd_t *cmd = NULL;
9805 struct scsi_pkt *pkt;
9806 uint32_t reply_len = 0, sense_len = 0;
9807 pMPI2RequestHeader_t request_hdrp;
9808 pMPI2RequestHeader_t request_msg;
9809 pMPI2DefaultReply_t reply_msg;
9810 Mpi2SCSIIOReply_t rep_msg;
9811 int i, status = 0, pt_flags = 0, rv = 0;
9812 int rvalue;
9813 uint8_t function;
9814
9815 ASSERT(mutex_owned(&mpt->m_mutex));
9816
9817 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9818 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9819 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9820
9821 mutex_exit(&mpt->m_mutex);
9822 /*
9823 * copy in the request buffer since it could be used by
9824 * another thread when the pt request into waitq
9825 */
9826 if (ddi_copyin(request, request_msg, request_size, mode)) {
9827 mutex_enter(&mpt->m_mutex);
9828 status = EFAULT;
9829 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9830 goto out;
9831 }
9832 mutex_enter(&mpt->m_mutex);
9833
9834 function = request_msg->Function;
9835 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9836 pMpi2SCSITaskManagementRequest_t task;
9837 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9838 mptsas_setup_bus_reset_delay(mpt);
9839 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9840 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9841 mode);
9842
9843 if (rv != TRUE) {
9844 status = EIO;
9845 mptsas_log(mpt, CE_WARN, "task management failed");
9846 }
9847 goto out;
9848 }
9849
9850 if (data_size != 0) {
9851 data_dma_state.size = data_size;
9852 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9853 status = ENOMEM;
9854 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9855 "resource");
9856 goto out;
9857 }
9858 pt_flags |= MPTSAS_DATA_ALLOCATED;
9859 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9860 mutex_exit(&mpt->m_mutex);
9861 for (i = 0; i < data_size; i++) {
9862 if (ddi_copyin(data + i, (uint8_t *)
9863 data_dma_state.memp + i, 1, mode)) {
9864 mutex_enter(&mpt->m_mutex);
9865 status = EFAULT;
9866 mptsas_log(mpt, CE_WARN, "failed to "
9867 "copy read data");
9868 goto out;
9869 }
9870 }
9871 mutex_enter(&mpt->m_mutex);
9872 }
9873 }
9874
9875 if (dataout_size != 0) {
9876 dataout_dma_state.size = dataout_size;
9877 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9878 status = ENOMEM;
9879 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9880 "resource");
9881 goto out;
9882 }
9883 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9884 mutex_exit(&mpt->m_mutex);
9885 for (i = 0; i < dataout_size; i++) {
9886 if (ddi_copyin(dataout + i, (uint8_t *)
9887 dataout_dma_state.memp + i, 1, mode)) {
9888 mutex_enter(&mpt->m_mutex);
9889 mptsas_log(mpt, CE_WARN, "failed to copy out"
9890 " data");
9891 status = EFAULT;
9892 goto out;
9893 }
9894 }
9895 mutex_enter(&mpt->m_mutex);
9896 }
9897
9898 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9899 status = EAGAIN;
9900 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9901 goto out;
9902 }
9903 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9904
9905 bzero((caddr_t)cmd, sizeof (*cmd));
9906 bzero((caddr_t)pkt, scsi_pkt_size());
9907 bzero((caddr_t)&pt, sizeof (pt));
9908
9909 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9910
9911 pt.request = (uint8_t *)request_msg;
9912 pt.direction = direction;
9913 pt.request_size = request_size;
9914 pt.data_size = data_size;
9915 pt.dataout_size = dataout_size;
9916 pt.data_cookie = data_dma_state.cookie;
9917 pt.dataout_cookie = dataout_dma_state.cookie;
9918
9919 /*
9920 * Form a blank cmd/pkt to store the acknowledgement message
9921 */
9922 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9923 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9924 pkt->pkt_ha_private = (opaque_t)&pt;
9925 pkt->pkt_flags = FLAG_HEAD;
9926 pkt->pkt_time = timeout;
9927 cmd->cmd_pkt = pkt;
9928 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9929
9930 /*
9931 * Save the command in a slot
9932 */
9933 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9934 /*
9935 * Once passthru command get slot, set cmd_flags
9936 * CFLAG_PREPARED.
9937 */
9938 cmd->cmd_flags |= CFLAG_PREPARED;
9939 mptsas_start_passthru(mpt, cmd);
9940 } else {
9941 mptsas_waitq_add(mpt, cmd);
9942 }
9943
9944 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9945 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9946 }
9947
9948 if (cmd->cmd_flags & CFLAG_PREPARED) {
9949 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9950 cmd->cmd_slot);
9951 request_hdrp = (pMPI2RequestHeader_t)memp;
9952 }
9953
9954 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9955 status = ETIMEDOUT;
9956 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9957 pt_flags |= MPTSAS_CMD_TIMEOUT;
9958 goto out;
9959 }
9960
9961 if (cmd->cmd_rfm) {
9962 /*
9963 * cmd_rfm is zero means the command reply is a CONTEXT
9964 * reply and no PCI Write to post the free reply SMFA
9965 * because no reply message frame is used.
9966 * cmd_rfm is non-zero means the reply is a ADDRESS
9967 * reply and reply message frame is used.
9968 */
9969 pt_flags |= MPTSAS_ADDRESS_REPLY;
9970 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9971 DDI_DMA_SYNC_FORCPU);
9972 reply_msg = (pMPI2DefaultReply_t)
9973 (mpt->m_reply_frame + (cmd->cmd_rfm -
9974 mpt->m_reply_frame_dma_addr));
9975 }
9976
9977 mptsas_fma_check(mpt, cmd);
9978 if (pkt->pkt_reason == CMD_TRAN_ERR) {
9979 status = EAGAIN;
9980 mptsas_log(mpt, CE_WARN, "passthru fma error");
9981 goto out;
9982 }
9983 if (pkt->pkt_reason == CMD_RESET) {
9984 status = EAGAIN;
9985 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
9986 goto out;
9987 }
9988
9989 if (pkt->pkt_reason == CMD_INCOMPLETE) {
9990 status = EIO;
9991 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
9992 goto out;
9993 }
9994
9995 mutex_exit(&mpt->m_mutex);
9996 if (cmd->cmd_flags & CFLAG_PREPARED) {
9997 function = request_hdrp->Function;
9998 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9999 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10000 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10001 sense_len = reply_size - reply_len;
10002 } else {
10003 reply_len = reply_size;
10004 sense_len = 0;
10005 }
10006
10007 for (i = 0; i < reply_len; i++) {
10008 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10009 mode)) {
10010 mutex_enter(&mpt->m_mutex);
10011 status = EFAULT;
10012 mptsas_log(mpt, CE_WARN, "failed to copy out "
10013 "reply data");
10014 goto out;
10015 }
10016 }
10017 for (i = 0; i < sense_len; i++) {
10018 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10019 reply + reply_len + i, 1, mode)) {
10020 mutex_enter(&mpt->m_mutex);
10021 status = EFAULT;
10022 mptsas_log(mpt, CE_WARN, "failed to copy out "
10023 "sense data");
10024 goto out;
10025 }
10026 }
10027 }
10028
10029 if (data_size) {
10030 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10031 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10032 DDI_DMA_SYNC_FORCPU);
10033 for (i = 0; i < data_size; i++) {
10034 if (ddi_copyout((uint8_t *)(
10035 data_dma_state.memp + i), data + i, 1,
10036 mode)) {
10037 mutex_enter(&mpt->m_mutex);
10038 status = EFAULT;
10039 mptsas_log(mpt, CE_WARN, "failed to "
10040 "copy out the reply data");
10041 goto out;
10042 }
10043 }
10044 }
10045 }
10046 mutex_enter(&mpt->m_mutex);
10047 out:
10048 /*
10049 * Put the reply frame back on the free queue, increment the free
10050 * index, and write the new index to the free index register. But only
10051 * if this reply is an ADDRESS reply.
10052 */
10053 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10054 ddi_put32(mpt->m_acc_free_queue_hdl,
10055 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10056 cmd->cmd_rfm);
10057 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10058 DDI_DMA_SYNC_FORDEV);
10059 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10060 mpt->m_free_index = 0;
10061 }
10062 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10063 mpt->m_free_index);
10064 }
10065 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10066 mptsas_remove_cmd(mpt, cmd);
10067 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10068 }
10069 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10070 mptsas_return_to_pool(mpt, cmd);
10071 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10072 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10073 DDI_SUCCESS) {
10074 ddi_fm_service_impact(mpt->m_dip,
10075 DDI_SERVICE_UNAFFECTED);
10076 status = EFAULT;
10077 }
10078 mptsas_dma_free(&data_dma_state);
10079 }
10080 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10081 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10082 DDI_SUCCESS) {
10083 ddi_fm_service_impact(mpt->m_dip,
10084 DDI_SERVICE_UNAFFECTED);
10085 status = EFAULT;
10086 }
10087 mptsas_dma_free(&dataout_dma_state);
10088 }
10089 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10090 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10091 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10092 }
10093 }
10094 if (request_msg)
10095 kmem_free(request_msg, request_size);
10096
10097 return (status);
10098 }
10099
10100 static int
10101 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10102 {
10103 /*
10104 * If timeout is 0, set timeout to default of 60 seconds.
10105 */
10106 if (data->Timeout == 0) {
10107 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10108 }
10109
10110 if (((data->DataSize == 0) &&
10111 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10112 ((data->DataSize != 0) &&
10113 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10114 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10115 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10116 (data->DataOutSize != 0))))) {
10117 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10118 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10119 } else {
10120 data->DataOutSize = 0;
10121 }
10122 /*
10123 * Send passthru request messages
10124 */
10125 return (mptsas_do_passthru(mpt,
10126 (uint8_t *)((uintptr_t)data->PtrRequest),
10127 (uint8_t *)((uintptr_t)data->PtrReply),
10128 (uint8_t *)((uintptr_t)data->PtrData),
10129 data->RequestSize, data->ReplySize,
10130 data->DataSize, data->DataDirection,
10131 (uint8_t *)((uintptr_t)data->PtrDataOut),
10132 data->DataOutSize, data->Timeout, mode));
10133 } else {
10134 return (EINVAL);
10135 }
10136 }
10137
10138 static uint8_t
10139 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10140 {
10141 uint8_t index;
10142
10143 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10144 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10145 return (index);
10146 }
10147 }
10148
10149 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10150 }
10151
10152 static void
10153 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10154 {
10155 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10156 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10157 struct scsi_pkt *pkt = cmd->cmd_pkt;
10158 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10159 uint32_t request_desc_low, i;
10160
10161 ASSERT(mutex_owned(&mpt->m_mutex));
10162
10163 /*
10164 * Form the diag message depending on the post or release function.
10165 */
10166 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10167 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10168 (mpt->m_req_frame + (mpt->m_req_frame_size *
10169 cmd->cmd_slot));
10170 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10171 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10172 diag->function);
10173 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10174 diag->pBuffer->buffer_type);
10175 ddi_put8(mpt->m_acc_req_frame_hdl,
10176 &pDiag_post_msg->ExtendedType,
10177 diag->pBuffer->extended_type);
10178 ddi_put32(mpt->m_acc_req_frame_hdl,
10179 &pDiag_post_msg->BufferLength,
10180 diag->pBuffer->buffer_data.size);
10181 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10182 i++) {
10183 ddi_put32(mpt->m_acc_req_frame_hdl,
10184 &pDiag_post_msg->ProductSpecific[i],
10185 diag->pBuffer->product_specific[i]);
10186 }
10187 ddi_put32(mpt->m_acc_req_frame_hdl,
10188 &pDiag_post_msg->BufferAddress.Low,
10189 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10190 & 0xffffffffull));
10191 ddi_put32(mpt->m_acc_req_frame_hdl,
10192 &pDiag_post_msg->BufferAddress.High,
10193 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10194 >> 32));
10195 } else {
10196 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10197 (mpt->m_req_frame + (mpt->m_req_frame_size *
10198 cmd->cmd_slot));
10199 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10200 ddi_put8(mpt->m_acc_req_frame_hdl,
10201 &pDiag_release_msg->Function, diag->function);
10202 ddi_put8(mpt->m_acc_req_frame_hdl,
10203 &pDiag_release_msg->BufferType,
10204 diag->pBuffer->buffer_type);
10205 }
10206
10207 /*
10208 * Send the message
10209 */
10210 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10211 DDI_DMA_SYNC_FORDEV);
10212 request_desc_low = (cmd->cmd_slot << 16) +
10213 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10214 cmd->cmd_rfm = NULL;
10215 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10216 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10217 DDI_SUCCESS) ||
10218 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10219 DDI_SUCCESS)) {
10220 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10221 }
10222 }
10223
10224 static int
10225 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10226 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10227 {
10228 mptsas_diag_request_t diag;
10229 int status, slot_num, post_flags = 0;
10230 mptsas_cmd_t *cmd = NULL;
10231 struct scsi_pkt *pkt;
10232 pMpi2DiagBufferPostReply_t reply;
10233 uint16_t iocstatus;
10234 uint32_t iocloginfo, transfer_length;
10235
10236 /*
10237 * If buffer is not enabled, just leave.
10238 */
10239 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10240 if (!pBuffer->enabled) {
10241 status = DDI_FAILURE;
10242 goto out;
10243 }
10244
10245 /*
10246 * Clear some flags initially.
10247 */
10248 pBuffer->force_release = FALSE;
10249 pBuffer->valid_data = FALSE;
10250 pBuffer->owned_by_firmware = FALSE;
10251
10252 /*
10253 * Get a cmd buffer from the cmd buffer pool
10254 */
10255 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10256 status = DDI_FAILURE;
10257 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10258 goto out;
10259 }
10260 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10261
10262 bzero((caddr_t)cmd, sizeof (*cmd));
10263 bzero((caddr_t)pkt, scsi_pkt_size());
10264
10265 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10266
10267 diag.pBuffer = pBuffer;
10268 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10269
10270 /*
10271 * Form a blank cmd/pkt to store the acknowledgement message
10272 */
10273 pkt->pkt_ha_private = (opaque_t)&diag;
10274 pkt->pkt_flags = FLAG_HEAD;
10275 pkt->pkt_time = 60;
10276 cmd->cmd_pkt = pkt;
10277 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10278
10279 /*
10280 * Save the command in a slot
10281 */
10282 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10283 /*
10284 * Once passthru command get slot, set cmd_flags
10285 * CFLAG_PREPARED.
10286 */
10287 cmd->cmd_flags |= CFLAG_PREPARED;
10288 mptsas_start_diag(mpt, cmd);
10289 } else {
10290 mptsas_waitq_add(mpt, cmd);
10291 }
10292
10293 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10294 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10295 }
10296
10297 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10298 status = DDI_FAILURE;
10299 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10300 goto out;
10301 }
10302
10303 /*
10304 * cmd_rfm points to the reply message if a reply was given. Check the
10305 * IOCStatus to make sure everything went OK with the FW diag request
10306 * and set buffer flags.
10307 */
10308 if (cmd->cmd_rfm) {
10309 post_flags |= MPTSAS_ADDRESS_REPLY;
10310 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10311 DDI_DMA_SYNC_FORCPU);
10312 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10313 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10314
10315 /*
10316 * Get the reply message data
10317 */
10318 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10319 &reply->IOCStatus);
10320 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10321 &reply->IOCLogInfo);
10322 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10323 &reply->TransferLength);
10324
10325 /*
10326 * If post failed quit.
10327 */
10328 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10329 status = DDI_FAILURE;
10330 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10331 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10332 iocloginfo, transfer_length));
10333 goto out;
10334 }
10335
10336 /*
10337 * Post was successful.
10338 */
10339 pBuffer->valid_data = TRUE;
10340 pBuffer->owned_by_firmware = TRUE;
10341 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10342 status = DDI_SUCCESS;
10343 }
10344
10345 out:
10346 /*
10347 * Put the reply frame back on the free queue, increment the free
10348 * index, and write the new index to the free index register. But only
10349 * if this reply is an ADDRESS reply.
10350 */
10351 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10352 ddi_put32(mpt->m_acc_free_queue_hdl,
10353 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10354 cmd->cmd_rfm);
10355 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10356 DDI_DMA_SYNC_FORDEV);
10357 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10358 mpt->m_free_index = 0;
10359 }
10360 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10361 mpt->m_free_index);
10362 }
10363 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10364 mptsas_remove_cmd(mpt, cmd);
10365 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10366 }
10367 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10368 mptsas_return_to_pool(mpt, cmd);
10369 }
10370
10371 return (status);
10372 }
10373
10374 static int
10375 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10376 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10377 uint32_t diag_type)
10378 {
10379 mptsas_diag_request_t diag;
10380 int status, slot_num, rel_flags = 0;
10381 mptsas_cmd_t *cmd = NULL;
10382 struct scsi_pkt *pkt;
10383 pMpi2DiagReleaseReply_t reply;
10384 uint16_t iocstatus;
10385 uint32_t iocloginfo;
10386
10387 /*
10388 * If buffer is not enabled, just leave.
10389 */
10390 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10391 if (!pBuffer->enabled) {
10392 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10393 "by the IOC");
10394 status = DDI_FAILURE;
10395 goto out;
10396 }
10397
10398 /*
10399 * Clear some flags initially.
10400 */
10401 pBuffer->force_release = FALSE;
10402 pBuffer->valid_data = FALSE;
10403 pBuffer->owned_by_firmware = FALSE;
10404
10405 /*
10406 * Get a cmd buffer from the cmd buffer pool
10407 */
10408 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10409 status = DDI_FAILURE;
10410 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10411 "Diag");
10412 goto out;
10413 }
10414 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10415
10416 bzero((caddr_t)cmd, sizeof (*cmd));
10417 bzero((caddr_t)pkt, scsi_pkt_size());
10418
10419 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10420
10421 diag.pBuffer = pBuffer;
10422 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10423
10424 /*
10425 * Form a blank cmd/pkt to store the acknowledgement message
10426 */
10427 pkt->pkt_ha_private = (opaque_t)&diag;
10428 pkt->pkt_flags = FLAG_HEAD;
10429 pkt->pkt_time = 60;
10430 cmd->cmd_pkt = pkt;
10431 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10432
10433 /*
10434 * Save the command in a slot
10435 */
10436 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10437 /*
10438 * Once passthru command get slot, set cmd_flags
10439 * CFLAG_PREPARED.
10440 */
10441 cmd->cmd_flags |= CFLAG_PREPARED;
10442 mptsas_start_diag(mpt, cmd);
10443 } else {
10444 mptsas_waitq_add(mpt, cmd);
10445 }
10446
10447 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10448 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10449 }
10450
10451 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10452 status = DDI_FAILURE;
10453 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10454 goto out;
10455 }
10456
10457 /*
10458 * cmd_rfm points to the reply message if a reply was given. Check the
10459 * IOCStatus to make sure everything went OK with the FW diag request
10460 * and set buffer flags.
10461 */
10462 if (cmd->cmd_rfm) {
10463 rel_flags |= MPTSAS_ADDRESS_REPLY;
10464 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10465 DDI_DMA_SYNC_FORCPU);
10466 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10467 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10468
10469 /*
10470 * Get the reply message data
10471 */
10472 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10473 &reply->IOCStatus);
10474 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10475 &reply->IOCLogInfo);
10476
10477 /*
10478 * If release failed quit.
10479 */
10480 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10481 pBuffer->owned_by_firmware) {
10482 status = DDI_FAILURE;
10483 NDBG13(("release FW Diag Buffer failed: "
10484 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10485 iocloginfo));
10486 goto out;
10487 }
10488
10489 /*
10490 * Release was successful.
10491 */
10492 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10493 status = DDI_SUCCESS;
10494
10495 /*
10496 * If this was for an UNREGISTER diag type command, clear the
10497 * unique ID.
10498 */
10499 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10500 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10501 }
10502 }
10503
10504 out:
10505 /*
10506 * Put the reply frame back on the free queue, increment the free
10507 * index, and write the new index to the free index register. But only
10508 * if this reply is an ADDRESS reply.
10509 */
10510 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10511 ddi_put32(mpt->m_acc_free_queue_hdl,
10512 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10513 cmd->cmd_rfm);
10514 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10515 DDI_DMA_SYNC_FORDEV);
10516 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10517 mpt->m_free_index = 0;
10518 }
10519 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10520 mpt->m_free_index);
10521 }
10522 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10523 mptsas_remove_cmd(mpt, cmd);
10524 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10525 }
10526 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10527 mptsas_return_to_pool(mpt, cmd);
10528 }
10529
10530 return (status);
10531 }
10532
10533 static int
10534 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10535 uint32_t *return_code)
10536 {
10537 mptsas_fw_diagnostic_buffer_t *pBuffer;
10538 uint8_t extended_type, buffer_type, i;
10539 uint32_t buffer_size;
10540 uint32_t unique_id;
10541 int status;
10542
10543 ASSERT(mutex_owned(&mpt->m_mutex));
10544
10545 extended_type = diag_register->ExtendedType;
10546 buffer_type = diag_register->BufferType;
10547 buffer_size = diag_register->RequestedBufferSize;
10548 unique_id = diag_register->UniqueId;
10549
10550 /*
10551 * Check for valid buffer type
10552 */
10553 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10554 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10555 return (DDI_FAILURE);
10556 }
10557
10558 /*
10559 * Get the current buffer and look up the unique ID. The unique ID
10560 * should not be found. If it is, the ID is already in use.
10561 */
10562 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10563 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10564 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10565 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10566 return (DDI_FAILURE);
10567 }
10568
10569 /*
10570 * The buffer's unique ID should not be registered yet, and the given
10571 * unique ID cannot be 0.
10572 */
10573 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10574 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10575 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10576 return (DDI_FAILURE);
10577 }
10578
10579 /*
10580 * If this buffer is already posted as immediate, just change owner.
10581 */
10582 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10583 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10584 pBuffer->immediate = FALSE;
10585 pBuffer->unique_id = unique_id;
10586 return (DDI_SUCCESS);
10587 }
10588
10589 /*
10590 * Post a new buffer after checking if it's enabled. The DMA buffer
10591 * that is allocated will be contiguous (sgl_len = 1).
10592 */
10593 if (!pBuffer->enabled) {
10594 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10595 return (DDI_FAILURE);
10596 }
10597 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10598 pBuffer->buffer_data.size = buffer_size;
10599 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10600 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10601 "diag buffer: size = %d bytes", buffer_size);
10602 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10603 return (DDI_FAILURE);
10604 }
10605
10606 /*
10607 * Copy the given info to the diag buffer and post the buffer.
10608 */
10609 pBuffer->buffer_type = buffer_type;
10610 pBuffer->immediate = FALSE;
10611 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10612 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10613 i++) {
10614 pBuffer->product_specific[i] =
10615 diag_register->ProductSpecific[i];
10616 }
10617 }
10618 pBuffer->extended_type = extended_type;
10619 pBuffer->unique_id = unique_id;
10620 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10621
10622 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10623 DDI_SUCCESS) {
10624 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10625 "mptsas_diag_register.");
10626 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10627 status = DDI_FAILURE;
10628 }
10629
10630 /*
10631 * In case there was a failure, free the DMA buffer.
10632 */
10633 if (status == DDI_FAILURE) {
10634 mptsas_dma_free(&pBuffer->buffer_data);
10635 }
10636
10637 return (status);
10638 }
10639
10640 static int
10641 mptsas_diag_unregister(mptsas_t *mpt,
10642 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10643 {
10644 mptsas_fw_diagnostic_buffer_t *pBuffer;
10645 uint8_t i;
10646 uint32_t unique_id;
10647 int status;
10648
10649 ASSERT(mutex_owned(&mpt->m_mutex));
10650
10651 unique_id = diag_unregister->UniqueId;
10652
10653 /*
10654 * Get the current buffer and look up the unique ID. The unique ID
10655 * should be there.
10656 */
10657 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10658 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10659 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10660 return (DDI_FAILURE);
10661 }
10662
10663 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10664
10665 /*
10666 * Try to release the buffer from FW before freeing it. If release
10667 * fails, don't free the DMA buffer in case FW tries to access it
10668 * later. If buffer is not owned by firmware, can't release it.
10669 */
10670 if (!pBuffer->owned_by_firmware) {
10671 status = DDI_SUCCESS;
10672 } else {
10673 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10674 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10675 }
10676
10677 /*
10678 * At this point, return the current status no matter what happens with
10679 * the DMA buffer.
10680 */
10681 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10682 if (status == DDI_SUCCESS) {
10683 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10684 DDI_SUCCESS) {
10685 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10686 "in mptsas_diag_unregister.");
10687 ddi_fm_service_impact(mpt->m_dip,
10688 DDI_SERVICE_UNAFFECTED);
10689 }
10690 mptsas_dma_free(&pBuffer->buffer_data);
10691 }
10692
10693 return (status);
10694 }
10695
10696 static int
10697 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10698 uint32_t *return_code)
10699 {
10700 mptsas_fw_diagnostic_buffer_t *pBuffer;
10701 uint8_t i;
10702 uint32_t unique_id;
10703
10704 ASSERT(mutex_owned(&mpt->m_mutex));
10705
10706 unique_id = diag_query->UniqueId;
10707
10708 /*
10709 * If ID is valid, query on ID.
10710 * If ID is invalid, query on buffer type.
10711 */
10712 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10713 i = diag_query->BufferType;
10714 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10715 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10716 return (DDI_FAILURE);
10717 }
10718 } else {
10719 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10720 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10721 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10722 return (DDI_FAILURE);
10723 }
10724 }
10725
10726 /*
10727 * Fill query structure with the diag buffer info.
10728 */
10729 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10730 diag_query->BufferType = pBuffer->buffer_type;
10731 diag_query->ExtendedType = pBuffer->extended_type;
10732 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10733 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10734 i++) {
10735 diag_query->ProductSpecific[i] =
10736 pBuffer->product_specific[i];
10737 }
10738 }
10739 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10740 diag_query->DriverAddedBufferSize = 0;
10741 diag_query->UniqueId = pBuffer->unique_id;
10742 diag_query->ApplicationFlags = 0;
10743 diag_query->DiagnosticFlags = 0;
10744
10745 /*
10746 * Set/Clear application flags
10747 */
10748 if (pBuffer->immediate) {
10749 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10750 } else {
10751 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10752 }
10753 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10754 diag_query->ApplicationFlags |=
10755 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10756 } else {
10757 diag_query->ApplicationFlags &=
10758 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10759 }
10760 if (pBuffer->owned_by_firmware) {
10761 diag_query->ApplicationFlags |=
10762 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10763 } else {
10764 diag_query->ApplicationFlags &=
10765 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10766 }
10767
10768 return (DDI_SUCCESS);
10769 }
10770
10771 static int
10772 mptsas_diag_read_buffer(mptsas_t *mpt,
10773 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10774 uint32_t *return_code, int ioctl_mode)
10775 {
10776 mptsas_fw_diagnostic_buffer_t *pBuffer;
10777 uint8_t i, *pData;
10778 uint32_t unique_id, byte;
10779 int status;
10780
10781 ASSERT(mutex_owned(&mpt->m_mutex));
10782
10783 unique_id = diag_read_buffer->UniqueId;
10784
10785 /*
10786 * Get the current buffer and look up the unique ID. The unique ID
10787 * should be there.
10788 */
10789 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10790 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10791 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10792 return (DDI_FAILURE);
10793 }
10794
10795 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10796
10797 /*
10798 * Make sure requested read is within limits
10799 */
10800 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10801 pBuffer->buffer_data.size) {
10802 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10803 return (DDI_FAILURE);
10804 }
10805
10806 /*
10807 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10808 * buffer that was allocated is one contiguous buffer.
10809 */
10810 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10811 diag_read_buffer->StartingOffset);
10812 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10813 DDI_DMA_SYNC_FORCPU);
10814 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10815 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10816 != 0) {
10817 return (DDI_FAILURE);
10818 }
10819 }
10820 diag_read_buffer->Status = 0;
10821
10822 /*
10823 * Set or clear the Force Release flag.
10824 */
10825 if (pBuffer->force_release) {
10826 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10827 } else {
10828 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10829 }
10830
10831 /*
10832 * If buffer is to be reregistered, make sure it's not already owned by
10833 * firmware first.
10834 */
10835 status = DDI_SUCCESS;
10836 if (!pBuffer->owned_by_firmware) {
10837 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10838 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10839 return_code);
10840 }
10841 }
10842
10843 return (status);
10844 }
10845
10846 static int
10847 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10848 uint32_t *return_code)
10849 {
10850 mptsas_fw_diagnostic_buffer_t *pBuffer;
10851 uint8_t i;
10852 uint32_t unique_id;
10853 int status;
10854
10855 ASSERT(mutex_owned(&mpt->m_mutex));
10856
10857 unique_id = diag_release->UniqueId;
10858
10859 /*
10860 * Get the current buffer and look up the unique ID. The unique ID
10861 * should be there.
10862 */
10863 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10864 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10865 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10866 return (DDI_FAILURE);
10867 }
10868
10869 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10870
10871 /*
10872 * If buffer is not owned by firmware, it's already been released.
10873 */
10874 if (!pBuffer->owned_by_firmware) {
10875 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10876 return (DDI_FAILURE);
10877 }
10878
10879 /*
10880 * Release the buffer.
10881 */
10882 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10883 MPTSAS_FW_DIAG_TYPE_RELEASE);
10884 return (status);
10885 }
10886
10887 static int
10888 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10889 uint32_t length, uint32_t *return_code, int ioctl_mode)
10890 {
10891 mptsas_fw_diag_register_t diag_register;
10892 mptsas_fw_diag_unregister_t diag_unregister;
10893 mptsas_fw_diag_query_t diag_query;
10894 mptsas_diag_read_buffer_t diag_read_buffer;
10895 mptsas_fw_diag_release_t diag_release;
10896 int status = DDI_SUCCESS;
10897 uint32_t original_return_code, read_buf_len;
10898
10899 ASSERT(mutex_owned(&mpt->m_mutex));
10900
10901 original_return_code = *return_code;
10902 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10903
10904 switch (action) {
10905 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10906 if (!length) {
10907 *return_code =
10908 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10909 status = DDI_FAILURE;
10910 break;
10911 }
10912 if (ddi_copyin(diag_action, &diag_register,
10913 sizeof (diag_register), ioctl_mode) != 0) {
10914 return (DDI_FAILURE);
10915 }
10916 status = mptsas_diag_register(mpt, &diag_register,
10917 return_code);
10918 break;
10919
10920 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10921 if (length < sizeof (diag_unregister)) {
10922 *return_code =
10923 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10924 status = DDI_FAILURE;
10925 break;
10926 }
10927 if (ddi_copyin(diag_action, &diag_unregister,
10928 sizeof (diag_unregister), ioctl_mode) != 0) {
10929 return (DDI_FAILURE);
10930 }
10931 status = mptsas_diag_unregister(mpt, &diag_unregister,
10932 return_code);
10933 break;
10934
10935 case MPTSAS_FW_DIAG_TYPE_QUERY:
10936 if (length < sizeof (diag_query)) {
10937 *return_code =
10938 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10939 status = DDI_FAILURE;
10940 break;
10941 }
10942 if (ddi_copyin(diag_action, &diag_query,
10943 sizeof (diag_query), ioctl_mode) != 0) {
10944 return (DDI_FAILURE);
10945 }
10946 status = mptsas_diag_query(mpt, &diag_query,
10947 return_code);
10948 if (status == DDI_SUCCESS) {
10949 if (ddi_copyout(&diag_query, diag_action,
10950 sizeof (diag_query), ioctl_mode) != 0) {
10951 return (DDI_FAILURE);
10952 }
10953 }
10954 break;
10955
10956 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10957 if (ddi_copyin(diag_action, &diag_read_buffer,
10958 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10959 return (DDI_FAILURE);
10960 }
10961 read_buf_len = sizeof (diag_read_buffer) -
10962 sizeof (diag_read_buffer.DataBuffer) +
10963 diag_read_buffer.BytesToRead;
10964 if (length < read_buf_len) {
10965 *return_code =
10966 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10967 status = DDI_FAILURE;
10968 break;
10969 }
10970 status = mptsas_diag_read_buffer(mpt,
10971 &diag_read_buffer, diag_action +
10972 sizeof (diag_read_buffer) - 4, return_code,
10973 ioctl_mode);
10974 if (status == DDI_SUCCESS) {
10975 if (ddi_copyout(&diag_read_buffer, diag_action,
10976 sizeof (diag_read_buffer) - 4, ioctl_mode)
10977 != 0) {
10978 return (DDI_FAILURE);
10979 }
10980 }
10981 break;
10982
10983 case MPTSAS_FW_DIAG_TYPE_RELEASE:
10984 if (length < sizeof (diag_release)) {
10985 *return_code =
10986 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10987 status = DDI_FAILURE;
10988 break;
10989 }
10990 if (ddi_copyin(diag_action, &diag_release,
10991 sizeof (diag_release), ioctl_mode) != 0) {
10992 return (DDI_FAILURE);
10993 }
10994 status = mptsas_diag_release(mpt, &diag_release,
10995 return_code);
10996 break;
10997
10998 default:
10999 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11000 status = DDI_FAILURE;
11001 break;
11002 }
11003
11004 if ((status == DDI_FAILURE) &&
11005 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11006 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11007 status = DDI_SUCCESS;
11008 }
11009
11010 return (status);
11011 }
11012
11013 static int
11014 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11015 {
11016 int status;
11017 mptsas_diag_action_t driver_data;
11018
11019 ASSERT(mutex_owned(&mpt->m_mutex));
11020
11021 /*
11022 * Copy the user data to a driver data buffer.
11023 */
11024 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11025 mode) == 0) {
11026 /*
11027 * Send diag action request if Action is valid
11028 */
11029 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11030 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11031 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11032 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11033 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11034 status = mptsas_do_diag_action(mpt, driver_data.Action,
11035 (void *)(uintptr_t)driver_data.PtrDiagAction,
11036 driver_data.Length, &driver_data.ReturnCode,
11037 mode);
11038 if (status == DDI_SUCCESS) {
11039 if (ddi_copyout(&driver_data.ReturnCode,
11040 &user_data->ReturnCode,
11041 sizeof (user_data->ReturnCode), mode)
11042 != 0) {
11043 status = EFAULT;
11044 } else {
11045 status = 0;
11046 }
11047 } else {
11048 status = EIO;
11049 }
11050 } else {
11051 status = EINVAL;
11052 }
11053 } else {
11054 status = EFAULT;
11055 }
11056
11057 return (status);
11058 }
11059
11060 /*
11061 * This routine handles the "event query" ioctl.
11062 */
11063 static int
11064 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11065 int *rval)
11066 {
11067 int status;
11068 mptsas_event_query_t driverdata;
11069 uint8_t i;
11070
11071 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11072
11073 mutex_enter(&mpt->m_mutex);
11074 for (i = 0; i < 4; i++) {
11075 driverdata.Types[i] = mpt->m_event_mask[i];
11076 }
11077 mutex_exit(&mpt->m_mutex);
11078
11079 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11080 status = EFAULT;
11081 } else {
11082 *rval = MPTIOCTL_STATUS_GOOD;
11083 status = 0;
11084 }
11085
11086 return (status);
11087 }
11088
11089 /*
11090 * This routine handles the "event enable" ioctl.
11091 */
11092 static int
11093 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11094 int *rval)
11095 {
11096 int status;
11097 mptsas_event_enable_t driverdata;
11098 uint8_t i;
11099
11100 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11101 mutex_enter(&mpt->m_mutex);
11102 for (i = 0; i < 4; i++) {
11103 mpt->m_event_mask[i] = driverdata.Types[i];
11104 }
11105 mutex_exit(&mpt->m_mutex);
11106
11107 *rval = MPTIOCTL_STATUS_GOOD;
11108 status = 0;
11109 } else {
11110 status = EFAULT;
11111 }
11112 return (status);
11113 }
11114
11115 /*
11116 * This routine handles the "event report" ioctl.
11117 */
11118 static int
11119 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11120 int *rval)
11121 {
11122 int status;
11123 mptsas_event_report_t driverdata;
11124
11125 mutex_enter(&mpt->m_mutex);
11126
11127 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11128 mode) == 0) {
11129 if (driverdata.Size >= sizeof (mpt->m_events)) {
11130 if (ddi_copyout(mpt->m_events, data->Events,
11131 sizeof (mpt->m_events), mode) != 0) {
11132 status = EFAULT;
11133 } else {
11134 if (driverdata.Size > sizeof (mpt->m_events)) {
11135 driverdata.Size =
11136 sizeof (mpt->m_events);
11137 if (ddi_copyout(&driverdata.Size,
11138 &data->Size,
11139 sizeof (driverdata.Size),
11140 mode) != 0) {
11141 status = EFAULT;
11142 } else {
11143 *rval = MPTIOCTL_STATUS_GOOD;
11144 status = 0;
11145 }
11146 } else {
11147 *rval = MPTIOCTL_STATUS_GOOD;
11148 status = 0;
11149 }
11150 }
11151 } else {
11152 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11153 status = 0;
11154 }
11155 } else {
11156 status = EFAULT;
11157 }
11158
11159 mutex_exit(&mpt->m_mutex);
11160 return (status);
11161 }
11162
11163 static void
11164 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11165 {
11166 int *reg_data;
11167 uint_t reglen;
11168
11169 /*
11170 * Lookup the 'reg' property and extract the other data
11171 */
11172 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11173 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11174 DDI_PROP_SUCCESS) {
11175 /*
11176 * Extract the PCI data from the 'reg' property first DWORD.
11177 * The entry looks like the following:
11178 * First DWORD:
11179 * Bits 0 - 7 8-bit Register number
11180 * Bits 8 - 10 3-bit Function number
11181 * Bits 11 - 15 5-bit Device number
11182 * Bits 16 - 23 8-bit Bus number
11183 * Bits 24 - 25 2-bit Address Space type identifier
11184 *
11185 */
11186 adapter_data->PciInformation.u.bits.BusNumber =
11187 (reg_data[0] & 0x00FF0000) >> 16;
11188 adapter_data->PciInformation.u.bits.DeviceNumber =
11189 (reg_data[0] & 0x0000F800) >> 11;
11190 adapter_data->PciInformation.u.bits.FunctionNumber =
11191 (reg_data[0] & 0x00000700) >> 8;
11192 ddi_prop_free((void *)reg_data);
11193 } else {
11194 /*
11195 * If we can't determine the PCI data then we fill in FF's for
11196 * the data to indicate this.
11197 */
11198 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11199 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11200 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11201 }
11202
11203 /*
11204 * Saved in the mpt->m_fwversion
11205 */
11206 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11207 }
11208
11209 static void
11210 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11211 {
11212 char *driver_verstr = MPTSAS_MOD_STRING;
11213
11214 mptsas_lookup_pci_data(mpt, adapter_data);
11215 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11216 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11217 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11218 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11219 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11220 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11221 adapter_data->BiosVersion = 0;
11222 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11223 }
11224
11225 static void
11226 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11227 {
11228 int *reg_data, i;
11229 uint_t reglen;
11230
11231 /*
11232 * Lookup the 'reg' property and extract the other data
11233 */
11234 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11235 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11236 DDI_PROP_SUCCESS) {
11237 /*
11238 * Extract the PCI data from the 'reg' property first DWORD.
11239 * The entry looks like the following:
11240 * First DWORD:
11241 * Bits 8 - 10 3-bit Function number
11242 * Bits 11 - 15 5-bit Device number
11243 * Bits 16 - 23 8-bit Bus number
11244 */
11245 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11246 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11247 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11248 ddi_prop_free((void *)reg_data);
11249 } else {
11250 /*
11251 * If we can't determine the PCI info then we fill in FF's for
11252 * the data to indicate this.
11253 */
11254 pci_info->BusNumber = 0xFFFFFFFF;
11255 pci_info->DeviceNumber = 0xFF;
11256 pci_info->FunctionNumber = 0xFF;
11257 }
11258
11259 /*
11260 * Now get the interrupt vector and the pci header. The vector can
11261 * only be 0 right now. The header is the first 256 bytes of config
11262 * space.
11263 */
11264 pci_info->InterruptVector = 0;
11265 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11266 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11267 i);
11268 }
11269 }
11270
11271 static int
11272 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11273 {
11274 int status = 0;
11275 mptsas_reg_access_t driverdata;
11276
11277 mutex_enter(&mpt->m_mutex);
11278 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11279 switch (driverdata.Command) {
11280 /*
11281 * IO access is not supported.
11282 */
11283 case REG_IO_READ:
11284 case REG_IO_WRITE:
11285 mptsas_log(mpt, CE_WARN, "IO access is not "
11286 "supported. Use memory access.");
11287 status = EINVAL;
11288 break;
11289
11290 case REG_MEM_READ:
11291 driverdata.RegData = ddi_get32(mpt->m_datap,
11292 (uint32_t *)(void *)mpt->m_reg +
11293 driverdata.RegOffset);
11294 if (ddi_copyout(&driverdata.RegData,
11295 &data->RegData,
11296 sizeof (driverdata.RegData), mode) != 0) {
11297 mptsas_log(mpt, CE_WARN, "Register "
11298 "Read Failed");
11299 status = EFAULT;
11300 }
11301 break;
11302
11303 case REG_MEM_WRITE:
11304 ddi_put32(mpt->m_datap,
11305 (uint32_t *)(void *)mpt->m_reg +
11306 driverdata.RegOffset,
11307 driverdata.RegData);
11308 break;
11309
11310 default:
11311 status = EINVAL;
11312 break;
11313 }
11314 } else {
11315 status = EFAULT;
11316 }
11317
11318 mutex_exit(&mpt->m_mutex);
11319 return (status);
11320 }
11321
11322 static int
11323 led_control(mptsas_t *mpt, intptr_t data, int mode)
11324 {
11325 int ret = 0;
11326 mptsas_led_control_t lc;
11327 mptsas_target_t *ptgt;
11328
11329 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
11330 return (EFAULT);
11331 }
11332
11333 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
11334 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
11335 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
11336 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
11337 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
11338 lc.LedStatus != 1)) {
11339 return (EINVAL);
11340 }
11341
11342 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
11343 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
11344 return (EACCES);
11345
11346 /* Locate the target we're interrogating... */
11347 mutex_enter(&mpt->m_mutex);
11348 ptgt = refhash_linear_search(mpt->m_targets,
11349 mptsas_target_eval_slot, &lc);
11350 if (ptgt == NULL) {
11351 /* We could not find a target for that enclosure/slot. */
11352 mutex_exit(&mpt->m_mutex);
11353 return (ENOENT);
11354 }
11355
11356 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
11357 /* Update our internal LED state. */
11358 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
11359 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
11360
11361 /* Flush it to the controller. */
11362 ret = mptsas_flush_led_status(mpt, ptgt);
11363 mutex_exit(&mpt->m_mutex);
11364 return (ret);
11365 }
11366
11367 /* Return our internal LED state. */
11368 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
11369 mutex_exit(&mpt->m_mutex);
11370
11371 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
11372 return (EFAULT);
11373 }
11374
11375 return (0);
11376 }
11377
11378 static int
11379 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
11380 {
11381 uint16_t i = 0;
11382 uint16_t count = 0;
11383 int ret = 0;
11384 mptsas_target_t *ptgt;
11385 mptsas_disk_info_t *di;
11386 STRUCT_DECL(mptsas_get_disk_info, gdi);
11387
11388 if ((mode & FREAD) == 0)
11389 return (EACCES);
11390
11391 STRUCT_INIT(gdi, get_udatamodel());
11392
11393 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
11394 mode) != 0) {
11395 return (EFAULT);
11396 }
11397
11398 /* Find out how many targets there are. */
11399 mutex_enter(&mpt->m_mutex);
11400 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11401 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11402 count++;
11403 }
11404 mutex_exit(&mpt->m_mutex);
11405
11406 /*
11407 * If we haven't been asked to copy out information on each target,
11408 * then just return the count.
11409 */
11410 STRUCT_FSET(gdi, DiskCount, count);
11411 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
11412 goto copy_out;
11413
11414 /*
11415 * If we haven't been given a large enough buffer to copy out into,
11416 * let the caller know.
11417 */
11418 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
11419 count * sizeof (mptsas_disk_info_t)) {
11420 ret = ENOSPC;
11421 goto copy_out;
11422 }
11423
11424 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
11425
11426 mutex_enter(&mpt->m_mutex);
11427 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11428 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11429 if (i >= count) {
11430 /*
11431 * The number of targets changed while we weren't
11432 * looking, so give up.
11433 */
11434 refhash_rele(mpt->m_targets, ptgt);
11435 mutex_exit(&mpt->m_mutex);
11436 kmem_free(di, count * sizeof (mptsas_disk_info_t));
11437 return (EAGAIN);
11438 }
11439 di[i].Instance = mpt->m_instance;
11440 di[i].Enclosure = ptgt->m_enclosure;
11441 di[i].Slot = ptgt->m_slot_num;
11442 di[i].SasAddress = ptgt->m_addr.mta_wwn;
11443 i++;
11444 }
11445 mutex_exit(&mpt->m_mutex);
11446 STRUCT_FSET(gdi, DiskCount, i);
11447
11448 /* Copy out the disk information to the caller. */
11449 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
11450 i * sizeof (mptsas_disk_info_t), mode) != 0) {
11451 ret = EFAULT;
11452 }
11453
11454 kmem_free(di, count * sizeof (mptsas_disk_info_t));
11455
11456 copy_out:
11457 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
11458 mode) != 0) {
11459 ret = EFAULT;
11460 }
11461
11462 return (ret);
11463 }
11464
11465 static int
11466 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11467 int *rval)
11468 {
11469 int status = 0;
11470 mptsas_t *mpt;
11471 mptsas_update_flash_t flashdata;
11472 mptsas_pass_thru_t passthru_data;
11473 mptsas_adapter_data_t adapter_data;
11474 mptsas_pci_info_t pci_info;
11475 int copylen;
11476
11477 int iport_flag = 0;
11478 dev_info_t *dip = NULL;
11479 mptsas_phymask_t phymask = 0;
11480 struct devctl_iocdata *dcp = NULL;
11481 char *addr = NULL;
11482 mptsas_target_t *ptgt = NULL;
11483
11484 *rval = MPTIOCTL_STATUS_GOOD;
11485 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11486 return (EPERM);
11487 }
11488
11489 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11490 if (mpt == NULL) {
11491 /*
11492 * Called from iport node, get the states
11493 */
11494 iport_flag = 1;
11495 dip = mptsas_get_dip_from_dev(dev, &phymask);
11496 if (dip == NULL) {
11497 return (ENXIO);
11498 }
11499 mpt = DIP2MPT(dip);
11500 }
11501 /* Make sure power level is D0 before accessing registers */
11502 mutex_enter(&mpt->m_mutex);
11503 if (mpt->m_options & MPTSAS_OPT_PM) {
11504 (void) pm_busy_component(mpt->m_dip, 0);
11505 if (mpt->m_power_level != PM_LEVEL_D0) {
11506 mutex_exit(&mpt->m_mutex);
11507 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11508 DDI_SUCCESS) {
11509 mptsas_log(mpt, CE_WARN,
11510 "mptsas%d: mptsas_ioctl: Raise power "
11511 "request failed.", mpt->m_instance);
11512 (void) pm_idle_component(mpt->m_dip, 0);
11513 return (ENXIO);
11514 }
11515 } else {
11516 mutex_exit(&mpt->m_mutex);
11517 }
11518 } else {
11519 mutex_exit(&mpt->m_mutex);
11520 }
11521
11522 if (iport_flag) {
11523 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11524 if (status != 0) {
11525 goto out;
11526 }
11527 /*
11528 * The following code control the OK2RM LED, it doesn't affect
11529 * the ioctl return status.
11530 */
11531 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11532 (cmd == DEVCTL_DEVICE_OFFLINE)) {
11533 if (ndi_dc_allochdl((void *)data, &dcp) !=
11534 NDI_SUCCESS) {
11535 goto out;
11536 }
11537 addr = ndi_dc_getaddr(dcp);
11538 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11539 if (ptgt == NULL) {
11540 NDBG14(("mptsas_ioctl led control: tgt %s not "
11541 "found", addr));
11542 ndi_dc_freehdl(dcp);
11543 goto out;
11544 }
11545 mutex_enter(&mpt->m_mutex);
11546 if (cmd == DEVCTL_DEVICE_ONLINE) {
11547 ptgt->m_tgt_unconfigured = 0;
11548 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11549 ptgt->m_tgt_unconfigured = 1;
11550 }
11551 if (cmd == DEVCTL_DEVICE_OFFLINE) {
11552 ptgt->m_led_status |=
11553 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
11554 } else {
11555 ptgt->m_led_status &=
11556 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
11557 }
11558 (void) mptsas_flush_led_status(mpt, ptgt);
11559 mutex_exit(&mpt->m_mutex);
11560 ndi_dc_freehdl(dcp);
11561 }
11562 goto out;
11563 }
11564 switch (cmd) {
11565 case MPTIOCTL_GET_DISK_INFO:
11566 status = get_disk_info(mpt, data, mode);
11567 break;
11568 case MPTIOCTL_LED_CONTROL:
11569 status = led_control(mpt, data, mode);
11570 break;
11571 case MPTIOCTL_UPDATE_FLASH:
11572 if (ddi_copyin((void *)data, &flashdata,
11573 sizeof (struct mptsas_update_flash), mode)) {
11574 status = EFAULT;
11575 break;
11576 }
11577
11578 mutex_enter(&mpt->m_mutex);
11579 if (mptsas_update_flash(mpt,
11580 (caddr_t)(long)flashdata.PtrBuffer,
11581 flashdata.ImageSize, flashdata.ImageType, mode)) {
11582 status = EFAULT;
11583 }
11584
11585 /*
11586 * Reset the chip to start using the new
11587 * firmware. Reset if failed also.
11588 */
11589 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11590 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11591 status = EFAULT;
11592 }
11593 mutex_exit(&mpt->m_mutex);
11594 break;
11595 case MPTIOCTL_PASS_THRU:
11596 /*
11597 * The user has requested to pass through a command to
11598 * be executed by the MPT firmware. Call our routine
11599 * which does this. Only allow one passthru IOCTL at
11600 * one time. Other threads will block on
11601 * m_passthru_mutex, which is of adaptive variant.
11602 */
11603 if (ddi_copyin((void *)data, &passthru_data,
11604 sizeof (mptsas_pass_thru_t), mode)) {
11605 status = EFAULT;
11606 break;
11607 }
11608 mutex_enter(&mpt->m_passthru_mutex);
11609 mutex_enter(&mpt->m_mutex);
11610 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11611 mutex_exit(&mpt->m_mutex);
11612 mutex_exit(&mpt->m_passthru_mutex);
11613
11614 break;
11615 case MPTIOCTL_GET_ADAPTER_DATA:
11616 /*
11617 * The user has requested to read adapter data. Call
11618 * our routine which does this.
11619 */
11620 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11621 if (ddi_copyin((void *)data, (void *)&adapter_data,
11622 sizeof (mptsas_adapter_data_t), mode)) {
11623 status = EFAULT;
11624 break;
11625 }
11626 if (adapter_data.StructureLength >=
11627 sizeof (mptsas_adapter_data_t)) {
11628 adapter_data.StructureLength = (uint32_t)
11629 sizeof (mptsas_adapter_data_t);
11630 copylen = sizeof (mptsas_adapter_data_t);
11631 mutex_enter(&mpt->m_mutex);
11632 mptsas_read_adapter_data(mpt, &adapter_data);
11633 mutex_exit(&mpt->m_mutex);
11634 } else {
11635 adapter_data.StructureLength = (uint32_t)
11636 sizeof (mptsas_adapter_data_t);
11637 copylen = sizeof (adapter_data.StructureLength);
11638 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11639 }
11640 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11641 copylen, mode) != 0) {
11642 status = EFAULT;
11643 }
11644 break;
11645 case MPTIOCTL_GET_PCI_INFO:
11646 /*
11647 * The user has requested to read pci info. Call
11648 * our routine which does this.
11649 */
11650 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11651 mutex_enter(&mpt->m_mutex);
11652 mptsas_read_pci_info(mpt, &pci_info);
11653 mutex_exit(&mpt->m_mutex);
11654 if (ddi_copyout((void *)(&pci_info), (void *)data,
11655 sizeof (mptsas_pci_info_t), mode) != 0) {
11656 status = EFAULT;
11657 }
11658 break;
11659 case MPTIOCTL_RESET_ADAPTER:
11660 mutex_enter(&mpt->m_mutex);
11661 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11662 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11663 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11664 "failed");
11665 status = EFAULT;
11666 }
11667 mutex_exit(&mpt->m_mutex);
11668 break;
11669 case MPTIOCTL_DIAG_ACTION:
11670 /*
11671 * The user has done a diag buffer action. Call our
11672 * routine which does this. Only allow one diag action
11673 * at one time.
11674 */
11675 mutex_enter(&mpt->m_mutex);
11676 if (mpt->m_diag_action_in_progress) {
11677 mutex_exit(&mpt->m_mutex);
11678 return (EBUSY);
11679 }
11680 mpt->m_diag_action_in_progress = 1;
11681 status = mptsas_diag_action(mpt,
11682 (mptsas_diag_action_t *)data, mode);
11683 mpt->m_diag_action_in_progress = 0;
11684 mutex_exit(&mpt->m_mutex);
11685 break;
11686 case MPTIOCTL_EVENT_QUERY:
11687 /*
11688 * The user has done an event query. Call our routine
11689 * which does this.
11690 */
11691 status = mptsas_event_query(mpt,
11692 (mptsas_event_query_t *)data, mode, rval);
11693 break;
11694 case MPTIOCTL_EVENT_ENABLE:
11695 /*
11696 * The user has done an event enable. Call our routine
11697 * which does this.
11698 */
11699 status = mptsas_event_enable(mpt,
11700 (mptsas_event_enable_t *)data, mode, rval);
11701 break;
11702 case MPTIOCTL_EVENT_REPORT:
11703 /*
11704 * The user has done an event report. Call our routine
11705 * which does this.
11706 */
11707 status = mptsas_event_report(mpt,
11708 (mptsas_event_report_t *)data, mode, rval);
11709 break;
11710 case MPTIOCTL_REG_ACCESS:
11711 /*
11712 * The user has requested register access. Call our
11713 * routine which does this.
11714 */
11715 status = mptsas_reg_access(mpt,
11716 (mptsas_reg_access_t *)data, mode);
11717 break;
11718 default:
11719 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11720 rval);
11721 break;
11722 }
11723
11724 out:
11725 return (status);
11726 }
11727
11728 int
11729 mptsas_restart_ioc(mptsas_t *mpt)
11730 {
11731 int rval = DDI_SUCCESS;
11732 mptsas_target_t *ptgt = NULL;
11733
11734 ASSERT(mutex_owned(&mpt->m_mutex));
11735
11736 /*
11737 * Set a flag telling I/O path that we're processing a reset. This is
11738 * needed because after the reset is complete, the hash table still
11739 * needs to be rebuilt. If I/Os are started before the hash table is
11740 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11741 * so that they can be retried.
11742 */
11743 mpt->m_in_reset = TRUE;
11744
11745 /*
11746 * Set all throttles to HOLD
11747 */
11748 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11749 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11750 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11751 }
11752
11753 /*
11754 * Disable interrupts
11755 */
11756 MPTSAS_DISABLE_INTR(mpt);
11757
11758 /*
11759 * Abort all commands: outstanding commands, commands in waitq and
11760 * tx_waitq.
11761 */
11762 mptsas_flush_hba(mpt);
11763
11764 /*
11765 * Reinitialize the chip.
11766 */
11767 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11768 rval = DDI_FAILURE;
11769 }
11770
11771 /*
11772 * Enable interrupts again
11773 */
11774 MPTSAS_ENABLE_INTR(mpt);
11775
11776 /*
11777 * If mptsas_init_chip was successful, update the driver data.
11778 */
11779 if (rval == DDI_SUCCESS) {
11780 mptsas_update_driver_data(mpt);
11781 }
11782
11783 /*
11784 * Reset the throttles
11785 */
11786 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11787 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11788 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11789 }
11790
11791 mptsas_doneq_empty(mpt);
11792 mptsas_restart_hba(mpt);
11793
11794 if (rval != DDI_SUCCESS) {
11795 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11796 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11797 }
11798
11799 /*
11800 * Clear the reset flag so that I/Os can continue.
11801 */
11802 mpt->m_in_reset = FALSE;
11803
11804 return (rval);
11805 }
11806
11807 static int
11808 mptsas_init_chip(mptsas_t *mpt, int first_time)
11809 {
11810 ddi_dma_cookie_t cookie;
11811 uint32_t i;
11812 int rval;
11813
11814 /*
11815 * Check to see if the firmware image is valid
11816 */
11817 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11818 MPI2_DIAG_FLASH_BAD_SIG) {
11819 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11820 goto fail;
11821 }
11822
11823 /*
11824 * Reset the chip
11825 */
11826 rval = mptsas_ioc_reset(mpt, first_time);
11827 if (rval == MPTSAS_RESET_FAIL) {
11828 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11829 goto fail;
11830 }
11831
11832 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11833 goto mur;
11834 }
11835 /*
11836 * Setup configuration space
11837 */
11838 if (mptsas_config_space_init(mpt) == FALSE) {
11839 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11840 "failed!");
11841 goto fail;
11842 }
11843
11844 /*
11845 * IOC facts can change after a diag reset so all buffers that are
11846 * based on these numbers must be de-allocated and re-allocated. Get
11847 * new IOC facts each time chip is initialized.
11848 */
11849 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11850 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11851 goto fail;
11852 }
11853
11854 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
11855 mptsas_target_addr_hash, mptsas_target_addr_cmp,
11856 mptsas_target_free, sizeof (mptsas_target_t),
11857 offsetof(mptsas_target_t, m_link),
11858 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
11859
11860 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11861 goto fail;
11862 }
11863 /*
11864 * Allocate request message frames, reply free queue, reply descriptor
11865 * post queue, and reply message frames using latest IOC facts.
11866 */
11867 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11868 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11869 goto fail;
11870 }
11871 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11872 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11873 goto fail;
11874 }
11875 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11876 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11877 goto fail;
11878 }
11879 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11880 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11881 goto fail;
11882 }
11883
11884 mur:
11885 /*
11886 * Re-Initialize ioc to operational state
11887 */
11888 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11889 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11890 goto fail;
11891 }
11892
11893 mptsas_alloc_reply_args(mpt);
11894
11895 /*
11896 * Initialize reply post index. Reply free index is initialized after
11897 * the next loop.
11898 */
11899 mpt->m_post_index = 0;
11900
11901 /*
11902 * Initialize the Reply Free Queue with the physical addresses of our
11903 * reply frames.
11904 */
11905 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11906 for (i = 0; i < mpt->m_max_replies; i++) {
11907 ddi_put32(mpt->m_acc_free_queue_hdl,
11908 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11909 cookie.dmac_address);
11910 cookie.dmac_address += mpt->m_reply_frame_size;
11911 }
11912 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11913 DDI_DMA_SYNC_FORDEV);
11914
11915 /*
11916 * Initialize the reply free index to one past the last frame on the
11917 * queue. This will signify that the queue is empty to start with.
11918 */
11919 mpt->m_free_index = i;
11920 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11921
11922 /*
11923 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11924 */
11925 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11926 ddi_put64(mpt->m_acc_post_queue_hdl,
11927 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11928 0xFFFFFFFFFFFFFFFF);
11929 }
11930 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11931 DDI_DMA_SYNC_FORDEV);
11932
11933 /*
11934 * Enable ports
11935 */
11936 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11937 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11938 goto fail;
11939 }
11940
11941 /*
11942 * enable events
11943 */
11944 if (mptsas_ioc_enable_event_notification(mpt)) {
11945 goto fail;
11946 }
11947
11948 /*
11949 * We need checks in attach and these.
11950 * chip_init is called in mult. places
11951 */
11952
11953 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11954 DDI_SUCCESS) ||
11955 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11956 DDI_SUCCESS) ||
11957 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11958 DDI_SUCCESS) ||
11959 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11960 DDI_SUCCESS) ||
11961 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11962 DDI_SUCCESS)) {
11963 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11964 goto fail;
11965 }
11966
11967 /* Check all acc handles */
11968 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11969 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11970 DDI_SUCCESS) ||
11971 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11972 DDI_SUCCESS) ||
11973 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11974 DDI_SUCCESS) ||
11975 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11976 DDI_SUCCESS) ||
11977 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11978 DDI_SUCCESS) ||
11979 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11980 DDI_SUCCESS)) {
11981 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11982 goto fail;
11983 }
11984
11985 return (DDI_SUCCESS);
11986
11987 fail:
11988 return (DDI_FAILURE);
11989 }
11990
11991 static int
11992 mptsas_get_pci_cap(mptsas_t *mpt)
11993 {
11994 ushort_t caps_ptr, cap, cap_count;
11995
11996 if (mpt->m_config_handle == NULL)
11997 return (FALSE);
11998 /*
11999 * Check if capabilities list is supported and if so,
12000 * get initial capabilities pointer and clear bits 0,1.
12001 */
12002 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12003 & PCI_STAT_CAP) {
12004 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12005 PCI_CONF_CAP_PTR), 4);
12006 } else {
12007 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12008 }
12009
12010 /*
12011 * Walk capabilities if supported.
12012 */
12013 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12014
12015 /*
12016 * Check that we haven't exceeded the maximum number of
12017 * capabilities and that the pointer is in a valid range.
12018 */
12019 if (++cap_count > 48) {
12020 mptsas_log(mpt, CE_WARN,
12021 "too many device capabilities.\n");
12022 break;
12023 }
12024 if (caps_ptr < 64) {
12025 mptsas_log(mpt, CE_WARN,
12026 "capabilities pointer 0x%x out of range.\n",
12027 caps_ptr);
12028 break;
12029 }
12030
12031 /*
12032 * Get next capability and check that it is valid.
12033 * For now, we only support power management.
12034 */
12035 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12036 switch (cap) {
12037 case PCI_CAP_ID_PM:
12038 mptsas_log(mpt, CE_NOTE,
12039 "?mptsas%d supports power management.\n",
12040 mpt->m_instance);
12041 mpt->m_options |= MPTSAS_OPT_PM;
12042
12043 /* Save PMCSR offset */
12044 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12045 break;
12046 /*
12047 * The following capabilities are valid. Any others
12048 * will cause a message to be logged.
12049 */
12050 case PCI_CAP_ID_VPD:
12051 case PCI_CAP_ID_MSI:
12052 case PCI_CAP_ID_PCIX:
12053 case PCI_CAP_ID_PCI_E:
12054 case PCI_CAP_ID_MSI_X:
12055 break;
12056 default:
12057 mptsas_log(mpt, CE_NOTE,
12058 "?mptsas%d unrecognized capability "
12059 "0x%x.\n", mpt->m_instance, cap);
12060 break;
12061 }
12062
12063 /*
12064 * Get next capabilities pointer and clear bits 0,1.
12065 */
12066 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12067 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12068 }
12069 return (TRUE);
12070 }
12071
12072 static int
12073 mptsas_init_pm(mptsas_t *mpt)
12074 {
12075 char pmc_name[16];
12076 char *pmc[] = {
12077 NULL,
12078 "0=Off (PCI D3 State)",
12079 "3=On (PCI D0 State)",
12080 NULL
12081 };
12082 uint16_t pmcsr_stat;
12083
12084 if (mptsas_get_pci_cap(mpt) == FALSE) {
12085 return (DDI_FAILURE);
12086 }
12087 /*
12088 * If PCI's capability does not support PM, then don't need
12089 * to registe the pm-components
12090 */
12091 if (!(mpt->m_options & MPTSAS_OPT_PM))
12092 return (DDI_SUCCESS);
12093 /*
12094 * If power management is supported by this chip, create
12095 * pm-components property for the power management framework
12096 */
12097 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12098 pmc[0] = pmc_name;
12099 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12100 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12101 mpt->m_options &= ~MPTSAS_OPT_PM;
12102 mptsas_log(mpt, CE_WARN,
12103 "mptsas%d: pm-component property creation failed.",
12104 mpt->m_instance);
12105 return (DDI_FAILURE);
12106 }
12107
12108 /*
12109 * Power on device.
12110 */
12111 (void) pm_busy_component(mpt->m_dip, 0);
12112 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12113 mpt->m_pmcsr_offset);
12114 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12115 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12116 mpt->m_instance);
12117 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12118 PCI_PMCSR_D0);
12119 }
12120 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12121 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12122 return (DDI_FAILURE);
12123 }
12124 mpt->m_power_level = PM_LEVEL_D0;
12125 /*
12126 * Set pm idle delay.
12127 */
12128 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12129 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12130
12131 return (DDI_SUCCESS);
12132 }
12133
12134 static int
12135 mptsas_register_intrs(mptsas_t *mpt)
12136 {
12137 dev_info_t *dip;
12138 int intr_types;
12139
12140 dip = mpt->m_dip;
12141
12142 /* Get supported interrupt types */
12143 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12144 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12145 "failed\n");
12146 return (FALSE);
12147 }
12148
12149 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12150
12151 /*
12152 * Try MSI, but fall back to FIXED
12153 */
12154 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12155 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12156 NDBG0(("Using MSI interrupt type"));
12157 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12158 return (TRUE);
12159 }
12160 }
12161 if (intr_types & DDI_INTR_TYPE_FIXED) {
12162 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12163 NDBG0(("Using FIXED interrupt type"));
12164 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12165 return (TRUE);
12166 } else {
12167 NDBG0(("FIXED interrupt registration failed"));
12168 return (FALSE);
12169 }
12170 }
12171
12172 return (FALSE);
12173 }
12174
12175 static void
12176 mptsas_unregister_intrs(mptsas_t *mpt)
12177 {
12178 mptsas_rem_intrs(mpt);
12179 }
12180
12181 /*
12182 * mptsas_add_intrs:
12183 *
12184 * Register FIXED or MSI interrupts.
12185 */
12186 static int
12187 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12188 {
12189 dev_info_t *dip = mpt->m_dip;
12190 int avail, actual, count = 0;
12191 int i, flag, ret;
12192
12193 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12194
12195 /* Get number of interrupts */
12196 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12197 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12198 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12199 "ret %d count %d\n", ret, count);
12200
12201 return (DDI_FAILURE);
12202 }
12203
12204 /* Get number of available interrupts */
12205 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12206 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12207 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12208 "ret %d avail %d\n", ret, avail);
12209
12210 return (DDI_FAILURE);
12211 }
12212
12213 if (avail < count) {
12214 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12215 "navail() returned %d", count, avail);
12216 }
12217
12218 /* Mpt only have one interrupt routine */
12219 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12220 count = 1;
12221 }
12222
12223 /* Allocate an array of interrupt handles */
12224 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12225 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12226
12227 flag = DDI_INTR_ALLOC_NORMAL;
12228
12229 /* call ddi_intr_alloc() */
12230 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12231 count, &actual, flag);
12232
12233 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12234 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12235 ret);
12236 kmem_free(mpt->m_htable, mpt->m_intr_size);
12237 return (DDI_FAILURE);
12238 }
12239
12240 /* use interrupt count returned or abort? */
12241 if (actual < count) {
12242 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12243 count, actual);
12244 }
12245
12246 mpt->m_intr_cnt = actual;
12247
12248 /*
12249 * Get priority for first msi, assume remaining are all the same
12250 */
12251 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12252 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12253 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12254
12255 /* Free already allocated intr */
12256 for (i = 0; i < actual; i++) {
12257 (void) ddi_intr_free(mpt->m_htable[i]);
12258 }
12259
12260 kmem_free(mpt->m_htable, mpt->m_intr_size);
12261 return (DDI_FAILURE);
12262 }
12263
12264 /* Test for high level mutex */
12265 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12266 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12267 "Hi level interrupt not supported\n");
12268
12269 /* Free already allocated intr */
12270 for (i = 0; i < actual; i++) {
12271 (void) ddi_intr_free(mpt->m_htable[i]);
12272 }
12273
12274 kmem_free(mpt->m_htable, mpt->m_intr_size);
12275 return (DDI_FAILURE);
12276 }
12277
12278 /* Call ddi_intr_add_handler() */
12279 for (i = 0; i < actual; i++) {
12280 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12281 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12282 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12283 "failed %d\n", ret);
12284
12285 /* Free already allocated intr */
12286 for (i = 0; i < actual; i++) {
12287 (void) ddi_intr_free(mpt->m_htable[i]);
12288 }
12289
12290 kmem_free(mpt->m_htable, mpt->m_intr_size);
12291 return (DDI_FAILURE);
12292 }
12293 }
12294
12295 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12296 != DDI_SUCCESS) {
12297 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12298
12299 /* Free already allocated intr */
12300 for (i = 0; i < actual; i++) {
12301 (void) ddi_intr_free(mpt->m_htable[i]);
12302 }
12303
12304 kmem_free(mpt->m_htable, mpt->m_intr_size);
12305 return (DDI_FAILURE);
12306 }
12307
12308 /*
12309 * Enable interrupts
12310 */
12311 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12312 /* Call ddi_intr_block_enable() for MSI interrupts */
12313 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12314 } else {
12315 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12316 for (i = 0; i < mpt->m_intr_cnt; i++) {
12317 (void) ddi_intr_enable(mpt->m_htable[i]);
12318 }
12319 }
12320 return (DDI_SUCCESS);
12321 }
12322
12323 /*
12324 * mptsas_rem_intrs:
12325 *
12326 * Unregister FIXED or MSI interrupts
12327 */
12328 static void
12329 mptsas_rem_intrs(mptsas_t *mpt)
12330 {
12331 int i;
12332
12333 NDBG6(("mptsas_rem_intrs"));
12334
12335 /* Disable all interrupts */
12336 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12337 /* Call ddi_intr_block_disable() */
12338 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12339 } else {
12340 for (i = 0; i < mpt->m_intr_cnt; i++) {
12341 (void) ddi_intr_disable(mpt->m_htable[i]);
12342 }
12343 }
12344
12345 /* Call ddi_intr_remove_handler() */
12346 for (i = 0; i < mpt->m_intr_cnt; i++) {
12347 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12348 (void) ddi_intr_free(mpt->m_htable[i]);
12349 }
12350
12351 kmem_free(mpt->m_htable, mpt->m_intr_size);
12352 }
12353
12354 /*
12355 * The IO fault service error handling callback function
12356 */
12357 /*ARGSUSED*/
12358 static int
12359 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12360 {
12361 /*
12362 * as the driver can always deal with an error in any dma or
12363 * access handle, we can just return the fme_status value.
12364 */
12365 pci_ereport_post(dip, err, NULL);
12366 return (err->fme_status);
12367 }
12368
12369 /*
12370 * mptsas_fm_init - initialize fma capabilities and register with IO
12371 * fault services.
12372 */
12373 static void
12374 mptsas_fm_init(mptsas_t *mpt)
12375 {
12376 /*
12377 * Need to change iblock to priority for new MSI intr
12378 */
12379 ddi_iblock_cookie_t fm_ibc;
12380
12381 /* Only register with IO Fault Services if we have some capability */
12382 if (mpt->m_fm_capabilities) {
12383 /* Adjust access and dma attributes for FMA */
12384 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12385 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12386 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12387
12388 /*
12389 * Register capabilities with IO Fault Services.
12390 * mpt->m_fm_capabilities will be updated to indicate
12391 * capabilities actually supported (not requested.)
12392 */
12393 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12394
12395 /*
12396 * Initialize pci ereport capabilities if ereport
12397 * capable (should always be.)
12398 */
12399 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12400 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12401 pci_ereport_setup(mpt->m_dip);
12402 }
12403
12404 /*
12405 * Register error callback if error callback capable.
12406 */
12407 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12408 ddi_fm_handler_register(mpt->m_dip,
12409 mptsas_fm_error_cb, (void *) mpt);
12410 }
12411 }
12412 }
12413
12414 /*
12415 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12416 * fault services.
12417 *
12418 */
12419 static void
12420 mptsas_fm_fini(mptsas_t *mpt)
12421 {
12422 /* Only unregister FMA capabilities if registered */
12423 if (mpt->m_fm_capabilities) {
12424
12425 /*
12426 * Un-register error callback if error callback capable.
12427 */
12428
12429 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12430 ddi_fm_handler_unregister(mpt->m_dip);
12431 }
12432
12433 /*
12434 * Release any resources allocated by pci_ereport_setup()
12435 */
12436
12437 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12438 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12439 pci_ereport_teardown(mpt->m_dip);
12440 }
12441
12442 /* Unregister from IO Fault Services */
12443 ddi_fm_fini(mpt->m_dip);
12444
12445 /* Adjust access and dma attributes for FMA */
12446 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12447 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12448 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12449
12450 }
12451 }
12452
12453 int
12454 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12455 {
12456 ddi_fm_error_t de;
12457
12458 if (handle == NULL)
12459 return (DDI_FAILURE);
12460 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12461 return (de.fme_status);
12462 }
12463
12464 int
12465 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12466 {
12467 ddi_fm_error_t de;
12468
12469 if (handle == NULL)
12470 return (DDI_FAILURE);
12471 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12472 return (de.fme_status);
12473 }
12474
12475 void
12476 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12477 {
12478 uint64_t ena;
12479 char buf[FM_MAX_CLASS];
12480
12481 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12482 ena = fm_ena_generate(0, FM_ENA_FMT1);
12483 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12484 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12485 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12486 }
12487 }
12488
12489 static int
12490 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12491 uint16_t *dev_handle, mptsas_target_t **pptgt)
12492 {
12493 int rval;
12494 uint32_t dev_info;
12495 uint64_t sas_wwn;
12496 mptsas_phymask_t phymask;
12497 uint8_t physport, phynum, config, disk;
12498 uint64_t devicename;
12499 uint16_t pdev_hdl;
12500 mptsas_target_t *tmp_tgt = NULL;
12501 uint16_t bay_num, enclosure;
12502
12503 ASSERT(*pptgt == NULL);
12504
12505 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12506 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12507 &bay_num, &enclosure);
12508 if (rval != DDI_SUCCESS) {
12509 rval = DEV_INFO_FAIL_PAGE0;
12510 return (rval);
12511 }
12512
12513 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12514 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12515 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12516 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12517 return (rval);
12518 }
12519
12520 /*
12521 * Check if the dev handle is for a Phys Disk. If so, set return value
12522 * and exit. Don't add Phys Disks to hash.
12523 */
12524 for (config = 0; config < mpt->m_num_raid_configs; config++) {
12525 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12526 if (*dev_handle == mpt->m_raidconfig[config].
12527 m_physdisk_devhdl[disk]) {
12528 rval = DEV_INFO_PHYS_DISK;
12529 return (rval);
12530 }
12531 }
12532 }
12533
12534 /*
12535 * Get SATA Device Name from SAS device page0 for
12536 * sata device, if device name doesn't exist, set mta_wwn to
12537 * 0 for direct attached SATA. For the device behind the expander
12538 * we still can use STP address assigned by expander.
12539 */
12540 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12541 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12542 mutex_exit(&mpt->m_mutex);
12543 /* alloc a tmp_tgt to send the cmd */
12544 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12545 KM_SLEEP);
12546 tmp_tgt->m_devhdl = *dev_handle;
12547 tmp_tgt->m_deviceinfo = dev_info;
12548 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12549 tmp_tgt->m_qfull_retry_interval =
12550 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12551 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12552 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12553 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12554 mutex_enter(&mpt->m_mutex);
12555 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12556 sas_wwn = devicename;
12557 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12558 sas_wwn = 0;
12559 }
12560 }
12561
12562 phymask = mptsas_physport_to_phymask(mpt, physport);
12563 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
12564 dev_info, phymask, phynum);
12565 if (*pptgt == NULL) {
12566 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12567 "structure!");
12568 rval = DEV_INFO_FAIL_ALLOC;
12569 return (rval);
12570 }
12571 (*pptgt)->m_enclosure = enclosure;
12572 (*pptgt)->m_slot_num = bay_num;
12573 return (DEV_INFO_SUCCESS);
12574 }
12575
12576 uint64_t
12577 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12578 {
12579 uint64_t sata_guid = 0, *pwwn = NULL;
12580 int target = ptgt->m_devhdl;
12581 uchar_t *inq83 = NULL;
12582 int inq83_len = 0xFF;
12583 uchar_t *dblk = NULL;
12584 int inq83_retry = 3;
12585 int rval = DDI_FAILURE;
12586
12587 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12588
12589 inq83_retry:
12590 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12591 inq83_len, NULL, 1);
12592 if (rval != DDI_SUCCESS) {
12593 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12594 "0x83 for target:%x, lun:%x failed!", target, lun);
12595 goto out;
12596 }
12597 /* According to SAT2, the first descriptor is logic unit name */
12598 dblk = &inq83[4];
12599 if ((dblk[1] & 0x30) != 0) {
12600 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12601 goto out;
12602 }
12603 pwwn = (uint64_t *)(void *)(&dblk[4]);
12604 if ((dblk[4] & 0xf0) == 0x50) {
12605 sata_guid = BE_64(*pwwn);
12606 goto out;
12607 } else if (dblk[4] == 'A') {
12608 NDBG20(("SATA drive has no NAA format GUID."));
12609 goto out;
12610 } else {
12611 /* The data is not ready, wait and retry */
12612 inq83_retry--;
12613 if (inq83_retry <= 0) {
12614 goto out;
12615 }
12616 NDBG20(("The GUID is not ready, retry..."));
12617 delay(1 * drv_usectohz(1000000));
12618 goto inq83_retry;
12619 }
12620 out:
12621 kmem_free(inq83, inq83_len);
12622 return (sata_guid);
12623 }
12624
12625 static int
12626 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12627 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12628 {
12629 uchar_t cdb[CDB_GROUP0];
12630 struct scsi_address ap;
12631 struct buf *data_bp = NULL;
12632 int resid = 0;
12633 int ret = DDI_FAILURE;
12634
12635 ASSERT(len <= 0xffff);
12636
12637 ap.a_target = MPTSAS_INVALID_DEVHDL;
12638 ap.a_lun = (uchar_t)(lun);
12639 ap.a_hba_tran = mpt->m_tran;
12640
12641 data_bp = scsi_alloc_consistent_buf(&ap,
12642 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12643 if (data_bp == NULL) {
12644 return (ret);
12645 }
12646 bzero(cdb, CDB_GROUP0);
12647 cdb[0] = SCMD_INQUIRY;
12648 cdb[1] = evpd;
12649 cdb[2] = page;
12650 cdb[3] = (len & 0xff00) >> 8;
12651 cdb[4] = (len & 0x00ff);
12652 cdb[5] = 0;
12653
12654 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12655 &resid);
12656 if (ret == DDI_SUCCESS) {
12657 if (reallen) {
12658 *reallen = len - resid;
12659 }
12660 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12661 }
12662 if (data_bp) {
12663 scsi_free_consistent_buf(data_bp);
12664 }
12665 return (ret);
12666 }
12667
12668 static int
12669 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12670 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12671 int *resid)
12672 {
12673 struct scsi_pkt *pktp = NULL;
12674 scsi_hba_tran_t *tran_clone = NULL;
12675 mptsas_tgt_private_t *tgt_private = NULL;
12676 int ret = DDI_FAILURE;
12677
12678 /*
12679 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12680 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12681 * to simulate the cmds from sd
12682 */
12683 tran_clone = kmem_alloc(
12684 sizeof (scsi_hba_tran_t), KM_SLEEP);
12685 if (tran_clone == NULL) {
12686 goto out;
12687 }
12688 bcopy((caddr_t)mpt->m_tran,
12689 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12690 tgt_private = kmem_alloc(
12691 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12692 if (tgt_private == NULL) {
12693 goto out;
12694 }
12695 tgt_private->t_lun = ap->a_lun;
12696 tgt_private->t_private = ptgt;
12697 tran_clone->tran_tgt_private = tgt_private;
12698 ap->a_hba_tran = tran_clone;
12699
12700 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12701 data_bp, cdblen, sizeof (struct scsi_arq_status),
12702 0, PKT_CONSISTENT, NULL, NULL);
12703 if (pktp == NULL) {
12704 goto out;
12705 }
12706 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12707 pktp->pkt_flags = FLAG_NOPARITY;
12708 if (scsi_poll(pktp) < 0) {
12709 goto out;
12710 }
12711 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12712 goto out;
12713 }
12714 if (resid != NULL) {
12715 *resid = pktp->pkt_resid;
12716 }
12717
12718 ret = DDI_SUCCESS;
12719 out:
12720 if (pktp) {
12721 scsi_destroy_pkt(pktp);
12722 }
12723 if (tran_clone) {
12724 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12725 }
12726 if (tgt_private) {
12727 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12728 }
12729 return (ret);
12730 }
12731 static int
12732 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12733 {
12734 char *cp = NULL;
12735 char *ptr = NULL;
12736 size_t s = 0;
12737 char *wwid_str = NULL;
12738 char *lun_str = NULL;
12739 long lunnum;
12740 long phyid = -1;
12741 int rc = DDI_FAILURE;
12742
12743 ptr = name;
12744 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12745 ptr++;
12746 if ((cp = strchr(ptr, ',')) == NULL) {
12747 return (DDI_FAILURE);
12748 }
12749
12750 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12751 s = (uintptr_t)cp - (uintptr_t)ptr;
12752
12753 bcopy(ptr, wwid_str, s);
12754 wwid_str[s] = '\0';
12755
12756 ptr = ++cp;
12757
12758 if ((cp = strchr(ptr, '\0')) == NULL) {
12759 goto out;
12760 }
12761 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12762 s = (uintptr_t)cp - (uintptr_t)ptr;
12763
12764 bcopy(ptr, lun_str, s);
12765 lun_str[s] = '\0';
12766
12767 if (name[0] == 'p') {
12768 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12769 } else {
12770 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12771 }
12772 if (rc != DDI_SUCCESS)
12773 goto out;
12774
12775 if (phyid != -1) {
12776 ASSERT(phyid < MPTSAS_MAX_PHYS);
12777 *phy = (uint8_t)phyid;
12778 }
12779 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12780 if (rc != 0)
12781 goto out;
12782
12783 *lun = (int)lunnum;
12784 rc = DDI_SUCCESS;
12785 out:
12786 if (wwid_str)
12787 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12788 if (lun_str)
12789 kmem_free(lun_str, SCSI_MAXNAMELEN);
12790
12791 return (rc);
12792 }
12793
12794 /*
12795 * mptsas_parse_smp_name() is to parse sas wwn string
12796 * which format is "wWWN"
12797 */
12798 static int
12799 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12800 {
12801 char *ptr = name;
12802
12803 if (*ptr != 'w') {
12804 return (DDI_FAILURE);
12805 }
12806
12807 ptr++;
12808 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12809 return (DDI_FAILURE);
12810 }
12811 return (DDI_SUCCESS);
12812 }
12813
12814 static int
12815 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12816 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12817 {
12818 int ret = NDI_FAILURE;
12819 int circ = 0;
12820 int circ1 = 0;
12821 mptsas_t *mpt;
12822 char *ptr = NULL;
12823 char *devnm = NULL;
12824 uint64_t wwid = 0;
12825 uint8_t phy = 0xFF;
12826 int lun = 0;
12827 uint_t mflags = flag;
12828 int bconfig = TRUE;
12829
12830 if (scsi_hba_iport_unit_address(pdip) == 0) {
12831 return (DDI_FAILURE);
12832 }
12833
12834 mpt = DIP2MPT(pdip);
12835 if (!mpt) {
12836 return (DDI_FAILURE);
12837 }
12838 /*
12839 * Hold the nexus across the bus_config
12840 */
12841 ndi_devi_enter(scsi_vhci_dip, &circ);
12842 ndi_devi_enter(pdip, &circ1);
12843 switch (op) {
12844 case BUS_CONFIG_ONE:
12845 /* parse wwid/target name out of name given */
12846 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12847 ret = NDI_FAILURE;
12848 break;
12849 }
12850 ptr++;
12851 if (strncmp((char *)arg, "smp", 3) == 0) {
12852 /*
12853 * This is a SMP target device
12854 */
12855 ret = mptsas_parse_smp_name(ptr, &wwid);
12856 if (ret != DDI_SUCCESS) {
12857 ret = NDI_FAILURE;
12858 break;
12859 }
12860 ret = mptsas_config_smp(pdip, wwid, childp);
12861 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12862 /*
12863 * OBP could pass down a non-canonical form
12864 * bootpath without LUN part when LUN is 0.
12865 * So driver need adjust the string.
12866 */
12867 if (strchr(ptr, ',') == NULL) {
12868 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12869 (void) sprintf(devnm, "%s,0", (char *)arg);
12870 ptr = strchr(devnm, '@');
12871 ptr++;
12872 }
12873
12874 /*
12875 * The device path is wWWID format and the device
12876 * is not SMP target device.
12877 */
12878 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12879 if (ret != DDI_SUCCESS) {
12880 ret = NDI_FAILURE;
12881 break;
12882 }
12883 *childp = NULL;
12884 if (ptr[0] == 'w') {
12885 ret = mptsas_config_one_addr(pdip, wwid,
12886 lun, childp);
12887 } else if (ptr[0] == 'p') {
12888 ret = mptsas_config_one_phy(pdip, phy, lun,
12889 childp);
12890 }
12891
12892 /*
12893 * If this is CD/DVD device in OBP path, the
12894 * ndi_busop_bus_config can be skipped as config one
12895 * operation is done above.
12896 */
12897 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12898 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12899 (strncmp((char *)arg, "disk", 4) == 0)) {
12900 bconfig = FALSE;
12901 ndi_hold_devi(*childp);
12902 }
12903 } else {
12904 ret = NDI_FAILURE;
12905 break;
12906 }
12907
12908 /*
12909 * DDI group instructed us to use this flag.
12910 */
12911 mflags |= NDI_MDI_FALLBACK;
12912 break;
12913 case BUS_CONFIG_DRIVER:
12914 case BUS_CONFIG_ALL:
12915 mptsas_config_all(pdip);
12916 ret = NDI_SUCCESS;
12917 break;
12918 }
12919
12920 if ((ret == NDI_SUCCESS) && bconfig) {
12921 ret = ndi_busop_bus_config(pdip, mflags, op,
12922 (devnm == NULL) ? arg : devnm, childp, 0);
12923 }
12924
12925 ndi_devi_exit(pdip, circ1);
12926 ndi_devi_exit(scsi_vhci_dip, circ);
12927 if (devnm != NULL)
12928 kmem_free(devnm, SCSI_MAXNAMELEN);
12929 return (ret);
12930 }
12931
12932 static int
12933 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12934 mptsas_target_t *ptgt)
12935 {
12936 int rval = DDI_FAILURE;
12937 struct scsi_inquiry *sd_inq = NULL;
12938 mptsas_t *mpt = DIP2MPT(pdip);
12939
12940 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12941
12942 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12943 SUN_INQSIZE, 0, (uchar_t)0);
12944
12945 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12946 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12947 } else {
12948 rval = DDI_FAILURE;
12949 }
12950
12951 kmem_free(sd_inq, SUN_INQSIZE);
12952 return (rval);
12953 }
12954
12955 static int
12956 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12957 dev_info_t **lundip)
12958 {
12959 int rval;
12960 mptsas_t *mpt = DIP2MPT(pdip);
12961 int phymask;
12962 mptsas_target_t *ptgt = NULL;
12963
12964 /*
12965 * Get the physical port associated to the iport
12966 */
12967 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12968 "phymask", 0);
12969
12970 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12971 if (ptgt == NULL) {
12972 /*
12973 * didn't match any device by searching
12974 */
12975 return (DDI_FAILURE);
12976 }
12977 /*
12978 * If the LUN already exists and the status is online,
12979 * we just return the pointer to dev_info_t directly.
12980 * For the mdi_pathinfo node, we'll handle it in
12981 * mptsas_create_virt_lun()
12982 * TODO should be also in mptsas_handle_dr
12983 */
12984
12985 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12986 if (*lundip != NULL) {
12987 /*
12988 * TODO Another senario is, we hotplug the same disk
12989 * on the same slot, the devhdl changed, is this
12990 * possible?
12991 * tgt_private->t_private != ptgt
12992 */
12993 if (sasaddr != ptgt->m_addr.mta_wwn) {
12994 /*
12995 * The device has changed although the devhdl is the
12996 * same (Enclosure mapping mode, change drive on the
12997 * same slot)
12998 */
12999 return (DDI_FAILURE);
13000 }
13001 return (DDI_SUCCESS);
13002 }
13003
13004 if (phymask == 0) {
13005 /*
13006 * Configure IR volume
13007 */
13008 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13009 return (rval);
13010 }
13011 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13012
13013 return (rval);
13014 }
13015
13016 static int
13017 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13018 dev_info_t **lundip)
13019 {
13020 int rval;
13021 mptsas_t *mpt = DIP2MPT(pdip);
13022 mptsas_phymask_t phymask;
13023 mptsas_target_t *ptgt = NULL;
13024
13025 /*
13026 * Get the physical port associated to the iport
13027 */
13028 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13029 "phymask", 0);
13030
13031 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13032 if (ptgt == NULL) {
13033 /*
13034 * didn't match any device by searching
13035 */
13036 return (DDI_FAILURE);
13037 }
13038
13039 /*
13040 * If the LUN already exists and the status is online,
13041 * we just return the pointer to dev_info_t directly.
13042 * For the mdi_pathinfo node, we'll handle it in
13043 * mptsas_create_virt_lun().
13044 */
13045
13046 *lundip = mptsas_find_child_phy(pdip, phy);
13047 if (*lundip != NULL) {
13048 return (DDI_SUCCESS);
13049 }
13050
13051 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13052
13053 return (rval);
13054 }
13055
13056 static int
13057 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13058 uint8_t *lun_addr_type)
13059 {
13060 uint32_t lun_idx = 0;
13061
13062 ASSERT(lun_num != NULL);
13063 ASSERT(lun_addr_type != NULL);
13064
13065 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13066 /* determine report luns addressing type */
13067 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13068 /*
13069 * Vendors in the field have been found to be concatenating
13070 * bus/target/lun to equal the complete lun value instead
13071 * of switching to flat space addressing
13072 */
13073 /* 00b - peripheral device addressing method */
13074 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13075 /* FALLTHRU */
13076 /* 10b - logical unit addressing method */
13077 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13078 /* FALLTHRU */
13079 /* 01b - flat space addressing method */
13080 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13081 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13082 *lun_addr_type = (buf[lun_idx] &
13083 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13084 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13085 *lun_num |= buf[lun_idx + 1];
13086 return (DDI_SUCCESS);
13087 default:
13088 return (DDI_FAILURE);
13089 }
13090 }
13091
13092 static int
13093 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13094 {
13095 struct buf *repluns_bp = NULL;
13096 struct scsi_address ap;
13097 uchar_t cdb[CDB_GROUP5];
13098 int ret = DDI_FAILURE;
13099 int retry = 0;
13100 int lun_list_len = 0;
13101 uint16_t lun_num = 0;
13102 uint8_t lun_addr_type = 0;
13103 uint32_t lun_cnt = 0;
13104 uint32_t lun_total = 0;
13105 dev_info_t *cdip = NULL;
13106 uint16_t *saved_repluns = NULL;
13107 char *buffer = NULL;
13108 int buf_len = 128;
13109 mptsas_t *mpt = DIP2MPT(pdip);
13110 uint64_t sas_wwn = 0;
13111 uint8_t phy = 0xFF;
13112 uint32_t dev_info = 0;
13113
13114 mutex_enter(&mpt->m_mutex);
13115 sas_wwn = ptgt->m_addr.mta_wwn;
13116 phy = ptgt->m_phynum;
13117 dev_info = ptgt->m_deviceinfo;
13118 mutex_exit(&mpt->m_mutex);
13119
13120 if (sas_wwn == 0) {
13121 /*
13122 * It's a SATA without Device Name
13123 * So don't try multi-LUNs
13124 */
13125 if (mptsas_find_child_phy(pdip, phy)) {
13126 return (DDI_SUCCESS);
13127 } else {
13128 /*
13129 * need configure and create node
13130 */
13131 return (DDI_FAILURE);
13132 }
13133 }
13134
13135 /*
13136 * WWN (SAS address or Device Name exist)
13137 */
13138 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13139 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13140 /*
13141 * SATA device with Device Name
13142 * So don't try multi-LUNs
13143 */
13144 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13145 return (DDI_SUCCESS);
13146 } else {
13147 return (DDI_FAILURE);
13148 }
13149 }
13150
13151 do {
13152 ap.a_target = MPTSAS_INVALID_DEVHDL;
13153 ap.a_lun = 0;
13154 ap.a_hba_tran = mpt->m_tran;
13155 repluns_bp = scsi_alloc_consistent_buf(&ap,
13156 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13157 if (repluns_bp == NULL) {
13158 retry++;
13159 continue;
13160 }
13161 bzero(cdb, CDB_GROUP5);
13162 cdb[0] = SCMD_REPORT_LUNS;
13163 cdb[6] = (buf_len & 0xff000000) >> 24;
13164 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13165 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13166 cdb[9] = (buf_len & 0x000000ff);
13167
13168 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13169 repluns_bp, NULL);
13170 if (ret != DDI_SUCCESS) {
13171 scsi_free_consistent_buf(repluns_bp);
13172 retry++;
13173 continue;
13174 }
13175 lun_list_len = BE_32(*(int *)((void *)(
13176 repluns_bp->b_un.b_addr)));
13177 if (buf_len >= lun_list_len + 8) {
13178 ret = DDI_SUCCESS;
13179 break;
13180 }
13181 scsi_free_consistent_buf(repluns_bp);
13182 buf_len = lun_list_len + 8;
13183
13184 } while (retry < 3);
13185
13186 if (ret != DDI_SUCCESS)
13187 return (ret);
13188 buffer = (char *)repluns_bp->b_un.b_addr;
13189 /*
13190 * find out the number of luns returned by the SCSI ReportLun call
13191 * and allocate buffer space
13192 */
13193 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13194 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13195 if (saved_repluns == NULL) {
13196 scsi_free_consistent_buf(repluns_bp);
13197 return (DDI_FAILURE);
13198 }
13199 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13200 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13201 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13202 continue;
13203 }
13204 saved_repluns[lun_cnt] = lun_num;
13205 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13206 ret = DDI_SUCCESS;
13207 else
13208 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13209 ptgt);
13210 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13211 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13212 MPTSAS_DEV_GONE);
13213 }
13214 }
13215 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13216 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13217 scsi_free_consistent_buf(repluns_bp);
13218 return (DDI_SUCCESS);
13219 }
13220
13221 static int
13222 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13223 {
13224 int rval = DDI_FAILURE;
13225 struct scsi_inquiry *sd_inq = NULL;
13226 mptsas_t *mpt = DIP2MPT(pdip);
13227 mptsas_target_t *ptgt = NULL;
13228
13229 mutex_enter(&mpt->m_mutex);
13230 ptgt = refhash_linear_search(mpt->m_targets,
13231 mptsas_target_eval_devhdl, &target);
13232 mutex_exit(&mpt->m_mutex);
13233 if (ptgt == NULL) {
13234 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13235 "not found.", target);
13236 return (rval);
13237 }
13238
13239 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13240 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13241 SUN_INQSIZE, 0, (uchar_t)0);
13242
13243 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13244 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13245 0);
13246 } else {
13247 rval = DDI_FAILURE;
13248 }
13249
13250 kmem_free(sd_inq, SUN_INQSIZE);
13251 return (rval);
13252 }
13253
13254 /*
13255 * configure all RAID volumes for virtual iport
13256 */
13257 static void
13258 mptsas_config_all_viport(dev_info_t *pdip)
13259 {
13260 mptsas_t *mpt = DIP2MPT(pdip);
13261 int config, vol;
13262 int target;
13263 dev_info_t *lundip = NULL;
13264
13265 /*
13266 * Get latest RAID info and search for any Volume DevHandles. If any
13267 * are found, configure the volume.
13268 */
13269 mutex_enter(&mpt->m_mutex);
13270 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13271 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13272 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
13273 == 1) {
13274 target = mpt->m_raidconfig[config].
13275 m_raidvol[vol].m_raidhandle;
13276 mutex_exit(&mpt->m_mutex);
13277 (void) mptsas_config_raid(pdip, target,
13278 &lundip);
13279 mutex_enter(&mpt->m_mutex);
13280 }
13281 }
13282 }
13283 mutex_exit(&mpt->m_mutex);
13284 }
13285
13286 static void
13287 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13288 int lun_cnt, mptsas_target_t *ptgt)
13289 {
13290 dev_info_t *child = NULL, *savechild = NULL;
13291 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13292 uint64_t sas_wwn, wwid;
13293 uint8_t phy;
13294 int lun;
13295 int i;
13296 int find;
13297 char *addr;
13298 char *nodename;
13299 mptsas_t *mpt = DIP2MPT(pdip);
13300
13301 mutex_enter(&mpt->m_mutex);
13302 wwid = ptgt->m_addr.mta_wwn;
13303 mutex_exit(&mpt->m_mutex);
13304
13305 child = ddi_get_child(pdip);
13306 while (child) {
13307 find = 0;
13308 savechild = child;
13309 child = ddi_get_next_sibling(child);
13310
13311 nodename = ddi_node_name(savechild);
13312 if (strcmp(nodename, "smp") == 0) {
13313 continue;
13314 }
13315
13316 addr = ddi_get_name_addr(savechild);
13317 if (addr == NULL) {
13318 continue;
13319 }
13320
13321 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13322 DDI_SUCCESS) {
13323 continue;
13324 }
13325
13326 if (wwid == sas_wwn) {
13327 for (i = 0; i < lun_cnt; i++) {
13328 if (repluns[i] == lun) {
13329 find = 1;
13330 break;
13331 }
13332 }
13333 } else {
13334 continue;
13335 }
13336 if (find == 0) {
13337 /*
13338 * The lun has not been there already
13339 */
13340 (void) mptsas_offline_lun(pdip, savechild, NULL,
13341 NDI_DEVI_REMOVE);
13342 }
13343 }
13344
13345 pip = mdi_get_next_client_path(pdip, NULL);
13346 while (pip) {
13347 find = 0;
13348 savepip = pip;
13349 addr = MDI_PI(pip)->pi_addr;
13350
13351 pip = mdi_get_next_client_path(pdip, pip);
13352
13353 if (addr == NULL) {
13354 continue;
13355 }
13356
13357 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13358 &lun) != DDI_SUCCESS) {
13359 continue;
13360 }
13361
13362 if (sas_wwn == wwid) {
13363 for (i = 0; i < lun_cnt; i++) {
13364 if (repluns[i] == lun) {
13365 find = 1;
13366 break;
13367 }
13368 }
13369 } else {
13370 continue;
13371 }
13372
13373 if (find == 0) {
13374 /*
13375 * The lun has not been there already
13376 */
13377 (void) mptsas_offline_lun(pdip, NULL, savepip,
13378 NDI_DEVI_REMOVE);
13379 }
13380 }
13381 }
13382
13383 void
13384 mptsas_update_hashtab(struct mptsas *mpt)
13385 {
13386 uint32_t page_address;
13387 int rval = 0;
13388 uint16_t dev_handle;
13389 mptsas_target_t *ptgt = NULL;
13390 mptsas_smp_t smp_node;
13391
13392 /*
13393 * Get latest RAID info.
13394 */
13395 (void) mptsas_get_raid_info(mpt);
13396
13397 dev_handle = mpt->m_smp_devhdl;
13398 for (; mpt->m_done_traverse_smp == 0; ) {
13399 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13400 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13401 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13402 != DDI_SUCCESS) {
13403 break;
13404 }
13405 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13406 (void) mptsas_smp_alloc(mpt, &smp_node);
13407 }
13408
13409 /*
13410 * Config target devices
13411 */
13412 dev_handle = mpt->m_dev_handle;
13413
13414 /*
13415 * Do loop to get sas device page 0 by GetNextHandle till the
13416 * the last handle. If the sas device is a SATA/SSP target,
13417 * we try to config it.
13418 */
13419 for (; mpt->m_done_traverse_dev == 0; ) {
13420 ptgt = NULL;
13421 page_address =
13422 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13423 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13424 (uint32_t)dev_handle;
13425 rval = mptsas_get_target_device_info(mpt, page_address,
13426 &dev_handle, &ptgt);
13427 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13428 (rval == DEV_INFO_FAIL_ALLOC)) {
13429 break;
13430 }
13431
13432 mpt->m_dev_handle = dev_handle;
13433 }
13434
13435 }
13436
13437 void
13438 mptsas_update_driver_data(struct mptsas *mpt)
13439 {
13440 mptsas_target_t *tp;
13441 mptsas_smp_t *sp;
13442
13443 ASSERT(MUTEX_HELD(&mpt->m_mutex));
13444
13445 /*
13446 * TODO after hard reset, update the driver data structures
13447 * 1. update port/phymask mapping table mpt->m_phy_info
13448 * 2. invalid all the entries in hash table
13449 * m_devhdl = 0xffff and m_deviceinfo = 0
13450 * 3. call sas_device_page/expander_page to update hash table
13451 */
13452 mptsas_update_phymask(mpt);
13453 /*
13454 * Invalid the existing entries
13455 *
13456 * XXX - It seems like we should just delete everything here. We are
13457 * holding the lock and are about to refresh all the targets in both
13458 * hashes anyway. Given the path we're in, what outstanding async
13459 * event could possibly be trying to reference one of these things
13460 * without taking the lock, and how would that be useful anyway?
13461 */
13462 for (tp = refhash_first(mpt->m_targets); tp != NULL;
13463 tp = refhash_next(mpt->m_targets, tp)) {
13464 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
13465 tp->m_deviceinfo = 0;
13466 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
13467 }
13468 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
13469 sp = refhash_next(mpt->m_smp_targets, sp)) {
13470 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
13471 sp->m_deviceinfo = 0;
13472 }
13473 mpt->m_done_traverse_dev = 0;
13474 mpt->m_done_traverse_smp = 0;
13475 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13476 mptsas_update_hashtab(mpt);
13477 }
13478
13479 static void
13480 mptsas_config_all(dev_info_t *pdip)
13481 {
13482 dev_info_t *smpdip = NULL;
13483 mptsas_t *mpt = DIP2MPT(pdip);
13484 int phymask = 0;
13485 mptsas_phymask_t phy_mask;
13486 mptsas_target_t *ptgt = NULL;
13487 mptsas_smp_t *psmp;
13488
13489 /*
13490 * Get the phymask associated to the iport
13491 */
13492 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13493 "phymask", 0);
13494
13495 /*
13496 * Enumerate RAID volumes here (phymask == 0).
13497 */
13498 if (phymask == 0) {
13499 mptsas_config_all_viport(pdip);
13500 return;
13501 }
13502
13503 mutex_enter(&mpt->m_mutex);
13504
13505 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13506 mptsas_update_hashtab(mpt);
13507 }
13508
13509 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
13510 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
13511 phy_mask = psmp->m_addr.mta_phymask;
13512 if (phy_mask == phymask) {
13513 smpdip = NULL;
13514 mutex_exit(&mpt->m_mutex);
13515 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13516 mutex_enter(&mpt->m_mutex);
13517 }
13518 }
13519
13520 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13521 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13522 phy_mask = ptgt->m_addr.mta_phymask;
13523 if (phy_mask == phymask) {
13524 mutex_exit(&mpt->m_mutex);
13525 (void) mptsas_config_target(pdip, ptgt);
13526 mutex_enter(&mpt->m_mutex);
13527 }
13528 }
13529 mutex_exit(&mpt->m_mutex);
13530 }
13531
13532 static int
13533 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13534 {
13535 int rval = DDI_FAILURE;
13536 dev_info_t *tdip;
13537
13538 rval = mptsas_config_luns(pdip, ptgt);
13539 if (rval != DDI_SUCCESS) {
13540 /*
13541 * The return value means the SCMD_REPORT_LUNS
13542 * did not execute successfully. The target maybe
13543 * doesn't support such command.
13544 */
13545 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13546 }
13547 return (rval);
13548 }
13549
13550 /*
13551 * Return fail if not all the childs/paths are freed.
13552 * if there is any path under the HBA, the return value will be always fail
13553 * because we didn't call mdi_pi_free for path
13554 */
13555 static int
13556 mptsas_offline_target(dev_info_t *pdip, char *name)
13557 {
13558 dev_info_t *child = NULL, *prechild = NULL;
13559 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13560 int tmp_rval, rval = DDI_SUCCESS;
13561 char *addr, *cp;
13562 size_t s;
13563 mptsas_t *mpt = DIP2MPT(pdip);
13564
13565 child = ddi_get_child(pdip);
13566 while (child) {
13567 addr = ddi_get_name_addr(child);
13568 prechild = child;
13569 child = ddi_get_next_sibling(child);
13570
13571 if (addr == NULL) {
13572 continue;
13573 }
13574 if ((cp = strchr(addr, ',')) == NULL) {
13575 continue;
13576 }
13577
13578 s = (uintptr_t)cp - (uintptr_t)addr;
13579
13580 if (strncmp(addr, name, s) != 0) {
13581 continue;
13582 }
13583
13584 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13585 NDI_DEVI_REMOVE);
13586 if (tmp_rval != DDI_SUCCESS) {
13587 rval = DDI_FAILURE;
13588 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13589 prechild, MPTSAS_DEV_GONE) !=
13590 DDI_PROP_SUCCESS) {
13591 mptsas_log(mpt, CE_WARN, "mptsas driver "
13592 "unable to create property for "
13593 "SAS %s (MPTSAS_DEV_GONE)", addr);
13594 }
13595 }
13596 }
13597
13598 pip = mdi_get_next_client_path(pdip, NULL);
13599 while (pip) {
13600 addr = MDI_PI(pip)->pi_addr;
13601 savepip = pip;
13602 pip = mdi_get_next_client_path(pdip, pip);
13603 if (addr == NULL) {
13604 continue;
13605 }
13606
13607 if ((cp = strchr(addr, ',')) == NULL) {
13608 continue;
13609 }
13610
13611 s = (uintptr_t)cp - (uintptr_t)addr;
13612
13613 if (strncmp(addr, name, s) != 0) {
13614 continue;
13615 }
13616
13617 (void) mptsas_offline_lun(pdip, NULL, savepip,
13618 NDI_DEVI_REMOVE);
13619 /*
13620 * driver will not invoke mdi_pi_free, so path will not
13621 * be freed forever, return DDI_FAILURE.
13622 */
13623 rval = DDI_FAILURE;
13624 }
13625 return (rval);
13626 }
13627
13628 static int
13629 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13630 mdi_pathinfo_t *rpip, uint_t flags)
13631 {
13632 int rval = DDI_FAILURE;
13633 char *devname;
13634 dev_info_t *cdip, *parent;
13635
13636 if (rpip != NULL) {
13637 parent = scsi_vhci_dip;
13638 cdip = mdi_pi_get_client(rpip);
13639 } else if (rdip != NULL) {
13640 parent = pdip;
13641 cdip = rdip;
13642 } else {
13643 return (DDI_FAILURE);
13644 }
13645
13646 /*
13647 * Make sure node is attached otherwise
13648 * it won't have related cache nodes to
13649 * clean up. i_ddi_devi_attached is
13650 * similiar to i_ddi_node_state(cdip) >=
13651 * DS_ATTACHED.
13652 */
13653 if (i_ddi_devi_attached(cdip)) {
13654
13655 /* Get full devname */
13656 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13657 (void) ddi_deviname(cdip, devname);
13658 /* Clean cache */
13659 (void) devfs_clean(parent, devname + 1,
13660 DV_CLEAN_FORCE);
13661 kmem_free(devname, MAXNAMELEN + 1);
13662 }
13663 if (rpip != NULL) {
13664 if (MDI_PI_IS_OFFLINE(rpip)) {
13665 rval = DDI_SUCCESS;
13666 } else {
13667 rval = mdi_pi_offline(rpip, 0);
13668 }
13669 } else {
13670 rval = ndi_devi_offline(cdip, flags);
13671 }
13672
13673 return (rval);
13674 }
13675
13676 static dev_info_t *
13677 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13678 {
13679 dev_info_t *child = NULL;
13680 char *smp_wwn = NULL;
13681
13682 child = ddi_get_child(parent);
13683 while (child) {
13684 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13685 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13686 != DDI_SUCCESS) {
13687 child = ddi_get_next_sibling(child);
13688 continue;
13689 }
13690
13691 if (strcmp(smp_wwn, str_wwn) == 0) {
13692 ddi_prop_free(smp_wwn);
13693 break;
13694 }
13695 child = ddi_get_next_sibling(child);
13696 ddi_prop_free(smp_wwn);
13697 }
13698 return (child);
13699 }
13700
13701 static int
13702 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13703 {
13704 int rval = DDI_FAILURE;
13705 char *devname;
13706 char wwn_str[MPTSAS_WWN_STRLEN];
13707 dev_info_t *cdip;
13708
13709 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
13710
13711 cdip = mptsas_find_smp_child(pdip, wwn_str);
13712
13713 if (cdip == NULL)
13714 return (DDI_SUCCESS);
13715
13716 /*
13717 * Make sure node is attached otherwise
13718 * it won't have related cache nodes to
13719 * clean up. i_ddi_devi_attached is
13720 * similiar to i_ddi_node_state(cdip) >=
13721 * DS_ATTACHED.
13722 */
13723 if (i_ddi_devi_attached(cdip)) {
13724
13725 /* Get full devname */
13726 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13727 (void) ddi_deviname(cdip, devname);
13728 /* Clean cache */
13729 (void) devfs_clean(pdip, devname + 1,
13730 DV_CLEAN_FORCE);
13731 kmem_free(devname, MAXNAMELEN + 1);
13732 }
13733
13734 rval = ndi_devi_offline(cdip, flags);
13735
13736 return (rval);
13737 }
13738
13739 static dev_info_t *
13740 mptsas_find_child(dev_info_t *pdip, char *name)
13741 {
13742 dev_info_t *child = NULL;
13743 char *rname = NULL;
13744 int rval = DDI_FAILURE;
13745
13746 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13747
13748 child = ddi_get_child(pdip);
13749 while (child) {
13750 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13751 if (rval != DDI_SUCCESS) {
13752 child = ddi_get_next_sibling(child);
13753 bzero(rname, SCSI_MAXNAMELEN);
13754 continue;
13755 }
13756
13757 if (strcmp(rname, name) == 0) {
13758 break;
13759 }
13760 child = ddi_get_next_sibling(child);
13761 bzero(rname, SCSI_MAXNAMELEN);
13762 }
13763
13764 kmem_free(rname, SCSI_MAXNAMELEN);
13765
13766 return (child);
13767 }
13768
13769
13770 static dev_info_t *
13771 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13772 {
13773 dev_info_t *child = NULL;
13774 char *name = NULL;
13775 char *addr = NULL;
13776
13777 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13778 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13779 (void) sprintf(name, "%016"PRIx64, sasaddr);
13780 (void) sprintf(addr, "w%s,%x", name, lun);
13781 child = mptsas_find_child(pdip, addr);
13782 kmem_free(name, SCSI_MAXNAMELEN);
13783 kmem_free(addr, SCSI_MAXNAMELEN);
13784 return (child);
13785 }
13786
13787 static dev_info_t *
13788 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13789 {
13790 dev_info_t *child;
13791 char *addr;
13792
13793 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13794 (void) sprintf(addr, "p%x,0", phy);
13795 child = mptsas_find_child(pdip, addr);
13796 kmem_free(addr, SCSI_MAXNAMELEN);
13797 return (child);
13798 }
13799
13800 static mdi_pathinfo_t *
13801 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13802 {
13803 mdi_pathinfo_t *path;
13804 char *addr = NULL;
13805
13806 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13807 (void) sprintf(addr, "p%x,0", phy);
13808 path = mdi_pi_find(pdip, NULL, addr);
13809 kmem_free(addr, SCSI_MAXNAMELEN);
13810 return (path);
13811 }
13812
13813 static mdi_pathinfo_t *
13814 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13815 {
13816 mdi_pathinfo_t *path;
13817 char *name = NULL;
13818 char *addr = NULL;
13819
13820 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13821 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13822 (void) sprintf(name, "%016"PRIx64, sasaddr);
13823 (void) sprintf(addr, "w%s,%x", name, lun);
13824 path = mdi_pi_find(parent, NULL, addr);
13825 kmem_free(name, SCSI_MAXNAMELEN);
13826 kmem_free(addr, SCSI_MAXNAMELEN);
13827
13828 return (path);
13829 }
13830
13831 static int
13832 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13833 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13834 {
13835 int i = 0;
13836 uchar_t *inq83 = NULL;
13837 int inq83_len1 = 0xFF;
13838 int inq83_len = 0;
13839 int rval = DDI_FAILURE;
13840 ddi_devid_t devid;
13841 char *guid = NULL;
13842 int target = ptgt->m_devhdl;
13843 mdi_pathinfo_t *pip = NULL;
13844 mptsas_t *mpt = DIP2MPT(pdip);
13845
13846 /*
13847 * For DVD/CD ROM and tape devices and optical
13848 * devices, we won't try to enumerate them under
13849 * scsi_vhci, so no need to try page83
13850 */
13851 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13852 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13853 sd_inq->inq_dtype == DTYPE_ESI))
13854 goto create_lun;
13855
13856 /*
13857 * The LCA returns good SCSI status, but corrupt page 83 data the first
13858 * time it is queried. The solution is to keep trying to request page83
13859 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13860 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13861 * give up to get VPD page at this stage and fail the enumeration.
13862 */
13863
13864 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13865
13866 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13867 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13868 inq83_len1, &inq83_len, 1);
13869 if (rval != 0) {
13870 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13871 "0x83 for target:%x, lun:%x failed!", target, lun);
13872 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13873 goto create_lun;
13874 goto out;
13875 }
13876 /*
13877 * create DEVID from inquiry data
13878 */
13879 if ((rval = ddi_devid_scsi_encode(
13880 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13881 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13882 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13883 /*
13884 * extract GUID from DEVID
13885 */
13886 guid = ddi_devid_to_guid(devid);
13887
13888 /*
13889 * Do not enable MPXIO if the strlen(guid) is greater
13890 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13891 * handled by framework later.
13892 */
13893 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13894 ddi_devid_free_guid(guid);
13895 guid = NULL;
13896 if (mpt->m_mpxio_enable == TRUE) {
13897 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13898 "lun:%x doesn't have a valid GUID, "
13899 "multipathing for this drive is "
13900 "not enabled", target, lun);
13901 }
13902 }
13903
13904 /*
13905 * devid no longer needed
13906 */
13907 ddi_devid_free(devid);
13908 break;
13909 } else if (rval == DDI_NOT_WELL_FORMED) {
13910 /*
13911 * return value of ddi_devid_scsi_encode equal to
13912 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13913 * to retry inquiry page 0x83 and get GUID.
13914 */
13915 NDBG20(("Not well formed devid, retry..."));
13916 delay(1 * drv_usectohz(1000000));
13917 continue;
13918 } else {
13919 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13920 "path target:%x, lun:%x", target, lun);
13921 rval = DDI_FAILURE;
13922 goto create_lun;
13923 }
13924 }
13925
13926 if (i == mptsas_inq83_retry_timeout) {
13927 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13928 "for path target:%x, lun:%x", target, lun);
13929 }
13930
13931 rval = DDI_FAILURE;
13932
13933 create_lun:
13934 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13935 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13936 ptgt, lun);
13937 }
13938 if (rval != DDI_SUCCESS) {
13939 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13940 ptgt, lun);
13941
13942 }
13943 out:
13944 if (guid != NULL) {
13945 /*
13946 * guid no longer needed
13947 */
13948 ddi_devid_free_guid(guid);
13949 }
13950 if (inq83 != NULL)
13951 kmem_free(inq83, inq83_len1);
13952 return (rval);
13953 }
13954
13955 static int
13956 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13957 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13958 {
13959 int target;
13960 char *nodename = NULL;
13961 char **compatible = NULL;
13962 int ncompatible = 0;
13963 int mdi_rtn = MDI_FAILURE;
13964 int rval = DDI_FAILURE;
13965 char *old_guid = NULL;
13966 mptsas_t *mpt = DIP2MPT(pdip);
13967 char *lun_addr = NULL;
13968 char *wwn_str = NULL;
13969 char *attached_wwn_str = NULL;
13970 char *component = NULL;
13971 uint8_t phy = 0xFF;
13972 uint64_t sas_wwn;
13973 int64_t lun64 = 0;
13974 uint32_t devinfo;
13975 uint16_t dev_hdl;
13976 uint16_t pdev_hdl;
13977 uint64_t dev_sas_wwn;
13978 uint64_t pdev_sas_wwn;
13979 uint32_t pdev_info;
13980 uint8_t physport;
13981 uint8_t phy_id;
13982 uint32_t page_address;
13983 uint16_t bay_num, enclosure;
13984 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
13985 uint32_t dev_info;
13986
13987 mutex_enter(&mpt->m_mutex);
13988 target = ptgt->m_devhdl;
13989 sas_wwn = ptgt->m_addr.mta_wwn;
13990 devinfo = ptgt->m_deviceinfo;
13991 phy = ptgt->m_phynum;
13992 mutex_exit(&mpt->m_mutex);
13993
13994 if (sas_wwn) {
13995 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13996 } else {
13997 *pip = mptsas_find_path_phy(pdip, phy);
13998 }
13999
14000 if (*pip != NULL) {
14001 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14002 ASSERT(*lun_dip != NULL);
14003 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14004 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14005 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14006 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14007 /*
14008 * Same path back online again.
14009 */
14010 (void) ddi_prop_free(old_guid);
14011 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14012 (!MDI_PI_IS_STANDBY(*pip)) &&
14013 (ptgt->m_tgt_unconfigured == 0)) {
14014 rval = mdi_pi_online(*pip, 0);
14015 mutex_enter(&mpt->m_mutex);
14016 ptgt->m_led_status = 0;
14017 (void) mptsas_flush_led_status(mpt,
14018 ptgt);
14019 mutex_exit(&mpt->m_mutex);
14020 } else {
14021 rval = DDI_SUCCESS;
14022 }
14023 if (rval != DDI_SUCCESS) {
14024 mptsas_log(mpt, CE_WARN, "path:target: "
14025 "%x, lun:%x online failed!", target,
14026 lun);
14027 *pip = NULL;
14028 *lun_dip = NULL;
14029 }
14030 return (rval);
14031 } else {
14032 /*
14033 * The GUID of the LUN has changed which maybe
14034 * because customer mapped another volume to the
14035 * same LUN.
14036 */
14037 mptsas_log(mpt, CE_WARN, "The GUID of the "
14038 "target:%x, lun:%x was changed, maybe "
14039 "because someone mapped another volume "
14040 "to the same LUN", target, lun);
14041 (void) ddi_prop_free(old_guid);
14042 if (!MDI_PI_IS_OFFLINE(*pip)) {
14043 rval = mdi_pi_offline(*pip, 0);
14044 if (rval != MDI_SUCCESS) {
14045 mptsas_log(mpt, CE_WARN, "path:"
14046 "target:%x, lun:%x offline "
14047 "failed!", target, lun);
14048 *pip = NULL;
14049 *lun_dip = NULL;
14050 return (DDI_FAILURE);
14051 }
14052 }
14053 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14054 mptsas_log(mpt, CE_WARN, "path:target:"
14055 "%x, lun:%x free failed!", target,
14056 lun);
14057 *pip = NULL;
14058 *lun_dip = NULL;
14059 return (DDI_FAILURE);
14060 }
14061 }
14062 } else {
14063 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14064 "property for path:target:%x, lun:%x", target, lun);
14065 *pip = NULL;
14066 *lun_dip = NULL;
14067 return (DDI_FAILURE);
14068 }
14069 }
14070 scsi_hba_nodename_compatible_get(inq, NULL,
14071 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14072
14073 /*
14074 * if nodename can't be determined then print a message and skip it
14075 */
14076 if (nodename == NULL) {
14077 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14078 "driver for target%d lun %d dtype:0x%02x", target, lun,
14079 inq->inq_dtype);
14080 return (DDI_FAILURE);
14081 }
14082
14083 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14084 /* The property is needed by MPAPI */
14085 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14086
14087 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14088 if (guid) {
14089 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14090 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14091 } else {
14092 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14093 (void) sprintf(wwn_str, "p%x", phy);
14094 }
14095
14096 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14097 guid, lun_addr, compatible, ncompatible,
14098 0, pip);
14099 if (mdi_rtn == MDI_SUCCESS) {
14100
14101 if (mdi_prop_update_string(*pip, MDI_GUID,
14102 guid) != DDI_SUCCESS) {
14103 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14104 "create prop for target %d lun %d (MDI_GUID)",
14105 target, lun);
14106 mdi_rtn = MDI_FAILURE;
14107 goto virt_create_done;
14108 }
14109
14110 if (mdi_prop_update_int(*pip, LUN_PROP,
14111 lun) != DDI_SUCCESS) {
14112 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14113 "create prop for target %d lun %d (LUN_PROP)",
14114 target, lun);
14115 mdi_rtn = MDI_FAILURE;
14116 goto virt_create_done;
14117 }
14118 lun64 = (int64_t)lun;
14119 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14120 lun64) != DDI_SUCCESS) {
14121 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14122 "create prop for target %d (LUN64_PROP)",
14123 target);
14124 mdi_rtn = MDI_FAILURE;
14125 goto virt_create_done;
14126 }
14127 if (mdi_prop_update_string_array(*pip, "compatible",
14128 compatible, ncompatible) !=
14129 DDI_PROP_SUCCESS) {
14130 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14131 "create prop for target %d lun %d (COMPATIBLE)",
14132 target, lun);
14133 mdi_rtn = MDI_FAILURE;
14134 goto virt_create_done;
14135 }
14136 if (sas_wwn && (mdi_prop_update_string(*pip,
14137 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14138 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14139 "create prop for target %d lun %d "
14140 "(target-port)", target, lun);
14141 mdi_rtn = MDI_FAILURE;
14142 goto virt_create_done;
14143 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14144 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14145 /*
14146 * Direct attached SATA device without DeviceName
14147 */
14148 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14149 "create prop for SAS target %d lun %d "
14150 "(sata-phy)", target, lun);
14151 mdi_rtn = MDI_FAILURE;
14152 goto virt_create_done;
14153 }
14154 mutex_enter(&mpt->m_mutex);
14155
14156 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14157 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14158 (uint32_t)ptgt->m_devhdl;
14159 rval = mptsas_get_sas_device_page0(mpt, page_address,
14160 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14161 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14162 if (rval != DDI_SUCCESS) {
14163 mutex_exit(&mpt->m_mutex);
14164 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14165 "parent device for handle %d", page_address);
14166 mdi_rtn = MDI_FAILURE;
14167 goto virt_create_done;
14168 }
14169
14170 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14171 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14172 rval = mptsas_get_sas_device_page0(mpt, page_address,
14173 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14174 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14175 if (rval != DDI_SUCCESS) {
14176 mutex_exit(&mpt->m_mutex);
14177 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14178 "device info for handle %d", page_address);
14179 mdi_rtn = MDI_FAILURE;
14180 goto virt_create_done;
14181 }
14182
14183 mutex_exit(&mpt->m_mutex);
14184
14185 /*
14186 * If this device direct attached to the controller
14187 * set the attached-port to the base wwid
14188 */
14189 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14190 != DEVINFO_DIRECT_ATTACHED) {
14191 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14192 pdev_sas_wwn);
14193 } else {
14194 /*
14195 * Update the iport's attached-port to guid
14196 */
14197 if (sas_wwn == 0) {
14198 (void) sprintf(wwn_str, "p%x", phy);
14199 } else {
14200 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14201 }
14202 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14203 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14204 DDI_PROP_SUCCESS) {
14205 mptsas_log(mpt, CE_WARN,
14206 "mptsas unable to create "
14207 "property for iport target-port"
14208 " %s (sas_wwn)",
14209 wwn_str);
14210 mdi_rtn = MDI_FAILURE;
14211 goto virt_create_done;
14212 }
14213
14214 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14215 mpt->un.m_base_wwid);
14216 }
14217
14218 if (mdi_prop_update_string(*pip,
14219 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14220 DDI_PROP_SUCCESS) {
14221 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14222 "property for iport attached-port %s (sas_wwn)",
14223 attached_wwn_str);
14224 mdi_rtn = MDI_FAILURE;
14225 goto virt_create_done;
14226 }
14227
14228
14229 if (inq->inq_dtype == 0) {
14230 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14231 /*
14232 * set obp path for pathinfo
14233 */
14234 (void) snprintf(component, MAXPATHLEN,
14235 "disk@%s", lun_addr);
14236
14237 if (mdi_pi_pathname_obp_set(*pip, component) !=
14238 DDI_SUCCESS) {
14239 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14240 "unable to set obp-path for object %s",
14241 component);
14242 mdi_rtn = MDI_FAILURE;
14243 goto virt_create_done;
14244 }
14245 }
14246
14247 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14248 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14249 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14250 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14251 "pm-capable", 1)) !=
14252 DDI_PROP_SUCCESS) {
14253 mptsas_log(mpt, CE_WARN, "mptsas driver"
14254 "failed to create pm-capable "
14255 "property, target %d", target);
14256 mdi_rtn = MDI_FAILURE;
14257 goto virt_create_done;
14258 }
14259 }
14260 /*
14261 * Create the phy-num property
14262 */
14263 if (mdi_prop_update_int(*pip, "phy-num",
14264 ptgt->m_phynum) != DDI_SUCCESS) {
14265 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14266 "create phy-num property for target %d lun %d",
14267 target, lun);
14268 mdi_rtn = MDI_FAILURE;
14269 goto virt_create_done;
14270 }
14271 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14272 mdi_rtn = mdi_pi_online(*pip, 0);
14273 if (mdi_rtn == MDI_SUCCESS) {
14274 mutex_enter(&mpt->m_mutex);
14275 ptgt->m_led_status = 0;
14276 (void) mptsas_flush_led_status(mpt, ptgt);
14277 mutex_exit(&mpt->m_mutex);
14278 }
14279 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14280 mdi_rtn = MDI_FAILURE;
14281 }
14282 virt_create_done:
14283 if (*pip && mdi_rtn != MDI_SUCCESS) {
14284 (void) mdi_pi_free(*pip, 0);
14285 *pip = NULL;
14286 *lun_dip = NULL;
14287 }
14288 }
14289
14290 scsi_hba_nodename_compatible_free(nodename, compatible);
14291 if (lun_addr != NULL) {
14292 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14293 }
14294 if (wwn_str != NULL) {
14295 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14296 }
14297 if (component != NULL) {
14298 kmem_free(component, MAXPATHLEN);
14299 }
14300
14301 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14302 }
14303
14304 static int
14305 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14306 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14307 {
14308 int target;
14309 int rval;
14310 int ndi_rtn = NDI_FAILURE;
14311 uint64_t be_sas_wwn;
14312 char *nodename = NULL;
14313 char **compatible = NULL;
14314 int ncompatible = 0;
14315 int instance = 0;
14316 mptsas_t *mpt = DIP2MPT(pdip);
14317 char *wwn_str = NULL;
14318 char *component = NULL;
14319 char *attached_wwn_str = NULL;
14320 uint8_t phy = 0xFF;
14321 uint64_t sas_wwn;
14322 uint32_t devinfo;
14323 uint16_t dev_hdl;
14324 uint16_t pdev_hdl;
14325 uint64_t pdev_sas_wwn;
14326 uint64_t dev_sas_wwn;
14327 uint32_t pdev_info;
14328 uint8_t physport;
14329 uint8_t phy_id;
14330 uint32_t page_address;
14331 uint16_t bay_num, enclosure;
14332 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14333 uint32_t dev_info;
14334 int64_t lun64 = 0;
14335
14336 mutex_enter(&mpt->m_mutex);
14337 target = ptgt->m_devhdl;
14338 sas_wwn = ptgt->m_addr.mta_wwn;
14339 devinfo = ptgt->m_deviceinfo;
14340 phy = ptgt->m_phynum;
14341 mutex_exit(&mpt->m_mutex);
14342
14343 /*
14344 * generate compatible property with binding-set "mpt"
14345 */
14346 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14347 &nodename, &compatible, &ncompatible);
14348
14349 /*
14350 * if nodename can't be determined then print a message and skip it
14351 */
14352 if (nodename == NULL) {
14353 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14354 "for target %d lun %d", target, lun);
14355 return (DDI_FAILURE);
14356 }
14357
14358 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14359 DEVI_SID_NODEID, lun_dip);
14360
14361 /*
14362 * if lun alloc success, set props
14363 */
14364 if (ndi_rtn == NDI_SUCCESS) {
14365
14366 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14367 *lun_dip, LUN_PROP, lun) !=
14368 DDI_PROP_SUCCESS) {
14369 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14370 "property for target %d lun %d (LUN_PROP)",
14371 target, lun);
14372 ndi_rtn = NDI_FAILURE;
14373 goto phys_create_done;
14374 }
14375
14376 lun64 = (int64_t)lun;
14377 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14378 *lun_dip, LUN64_PROP, lun64) !=
14379 DDI_PROP_SUCCESS) {
14380 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14381 "property for target %d lun64 %d (LUN64_PROP)",
14382 target, lun);
14383 ndi_rtn = NDI_FAILURE;
14384 goto phys_create_done;
14385 }
14386 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14387 *lun_dip, "compatible", compatible, ncompatible)
14388 != DDI_PROP_SUCCESS) {
14389 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14390 "property for target %d lun %d (COMPATIBLE)",
14391 target, lun);
14392 ndi_rtn = NDI_FAILURE;
14393 goto phys_create_done;
14394 }
14395
14396 /*
14397 * We need the SAS WWN for non-multipath devices, so
14398 * we'll use the same property as that multipathing
14399 * devices need to present for MPAPI. If we don't have
14400 * a WWN (e.g. parallel SCSI), don't create the prop.
14401 */
14402 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14403 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14404 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14405 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14406 != DDI_PROP_SUCCESS) {
14407 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14408 "create property for SAS target %d lun %d "
14409 "(target-port)", target, lun);
14410 ndi_rtn = NDI_FAILURE;
14411 goto phys_create_done;
14412 }
14413
14414 be_sas_wwn = BE_64(sas_wwn);
14415 if (sas_wwn && ndi_prop_update_byte_array(
14416 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14417 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14418 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14419 "create property for SAS target %d lun %d "
14420 "(port-wwn)", target, lun);
14421 ndi_rtn = NDI_FAILURE;
14422 goto phys_create_done;
14423 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14424 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14425 DDI_PROP_SUCCESS)) {
14426 /*
14427 * Direct attached SATA device without DeviceName
14428 */
14429 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14430 "create property for SAS target %d lun %d "
14431 "(sata-phy)", target, lun);
14432 ndi_rtn = NDI_FAILURE;
14433 goto phys_create_done;
14434 }
14435
14436 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14437 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14438 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14439 "create property for SAS target %d lun %d"
14440 " (SAS_PROP)", target, lun);
14441 ndi_rtn = NDI_FAILURE;
14442 goto phys_create_done;
14443 }
14444 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14445 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14446 mptsas_log(mpt, CE_WARN, "mptsas unable "
14447 "to create guid property for target %d "
14448 "lun %d", target, lun);
14449 ndi_rtn = NDI_FAILURE;
14450 goto phys_create_done;
14451 }
14452
14453 /*
14454 * The following code is to set properties for SM-HBA support,
14455 * it doesn't apply to RAID volumes
14456 */
14457 if (ptgt->m_addr.mta_phymask == 0)
14458 goto phys_raid_lun;
14459
14460 mutex_enter(&mpt->m_mutex);
14461
14462 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14463 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14464 (uint32_t)ptgt->m_devhdl;
14465 rval = mptsas_get_sas_device_page0(mpt, page_address,
14466 &dev_hdl, &dev_sas_wwn, &dev_info,
14467 &physport, &phy_id, &pdev_hdl,
14468 &bay_num, &enclosure);
14469 if (rval != DDI_SUCCESS) {
14470 mutex_exit(&mpt->m_mutex);
14471 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14472 "parent device for handle %d.", page_address);
14473 ndi_rtn = NDI_FAILURE;
14474 goto phys_create_done;
14475 }
14476
14477 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14478 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14479 rval = mptsas_get_sas_device_page0(mpt, page_address,
14480 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14481 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14482 if (rval != DDI_SUCCESS) {
14483 mutex_exit(&mpt->m_mutex);
14484 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14485 "device for handle %d.", page_address);
14486 ndi_rtn = NDI_FAILURE;
14487 goto phys_create_done;
14488 }
14489
14490 mutex_exit(&mpt->m_mutex);
14491
14492 /*
14493 * If this device direct attached to the controller
14494 * set the attached-port to the base wwid
14495 */
14496 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14497 != DEVINFO_DIRECT_ATTACHED) {
14498 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14499 pdev_sas_wwn);
14500 } else {
14501 /*
14502 * Update the iport's attached-port to guid
14503 */
14504 if (sas_wwn == 0) {
14505 (void) sprintf(wwn_str, "p%x", phy);
14506 } else {
14507 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14508 }
14509 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14510 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14511 DDI_PROP_SUCCESS) {
14512 mptsas_log(mpt, CE_WARN,
14513 "mptsas unable to create "
14514 "property for iport target-port"
14515 " %s (sas_wwn)",
14516 wwn_str);
14517 ndi_rtn = NDI_FAILURE;
14518 goto phys_create_done;
14519 }
14520
14521 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14522 mpt->un.m_base_wwid);
14523 }
14524
14525 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14526 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14527 DDI_PROP_SUCCESS) {
14528 mptsas_log(mpt, CE_WARN,
14529 "mptsas unable to create "
14530 "property for iport attached-port %s (sas_wwn)",
14531 attached_wwn_str);
14532 ndi_rtn = NDI_FAILURE;
14533 goto phys_create_done;
14534 }
14535
14536 if (IS_SATA_DEVICE(dev_info)) {
14537 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14538 *lun_dip, MPTSAS_VARIANT, "sata") !=
14539 DDI_PROP_SUCCESS) {
14540 mptsas_log(mpt, CE_WARN,
14541 "mptsas unable to create "
14542 "property for device variant ");
14543 ndi_rtn = NDI_FAILURE;
14544 goto phys_create_done;
14545 }
14546 }
14547
14548 if (IS_ATAPI_DEVICE(dev_info)) {
14549 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14550 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14551 DDI_PROP_SUCCESS) {
14552 mptsas_log(mpt, CE_WARN,
14553 "mptsas unable to create "
14554 "property for device variant ");
14555 ndi_rtn = NDI_FAILURE;
14556 goto phys_create_done;
14557 }
14558 }
14559
14560 phys_raid_lun:
14561 /*
14562 * if this is a SAS controller, and the target is a SATA
14563 * drive, set the 'pm-capable' property for sd and if on
14564 * an OPL platform, also check if this is an ATAPI
14565 * device.
14566 */
14567 instance = ddi_get_instance(mpt->m_dip);
14568 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14569 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14570 NDBG2(("mptsas%d: creating pm-capable property, "
14571 "target %d", instance, target));
14572
14573 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14574 *lun_dip, "pm-capable", 1)) !=
14575 DDI_PROP_SUCCESS) {
14576 mptsas_log(mpt, CE_WARN, "mptsas "
14577 "failed to create pm-capable "
14578 "property, target %d", target);
14579 ndi_rtn = NDI_FAILURE;
14580 goto phys_create_done;
14581 }
14582
14583 }
14584
14585 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14586 /*
14587 * add 'obp-path' properties for devinfo
14588 */
14589 bzero(wwn_str, sizeof (wwn_str));
14590 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14591 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14592 if (guid) {
14593 (void) snprintf(component, MAXPATHLEN,
14594 "disk@w%s,%x", wwn_str, lun);
14595 } else {
14596 (void) snprintf(component, MAXPATHLEN,
14597 "disk@p%x,%x", phy, lun);
14598 }
14599 if (ddi_pathname_obp_set(*lun_dip, component)
14600 != DDI_SUCCESS) {
14601 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14602 "unable to set obp-path for SAS "
14603 "object %s", component);
14604 ndi_rtn = NDI_FAILURE;
14605 goto phys_create_done;
14606 }
14607 }
14608 /*
14609 * Create the phy-num property for non-raid disk
14610 */
14611 if (ptgt->m_addr.mta_phymask != 0) {
14612 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14613 *lun_dip, "phy-num", ptgt->m_phynum) !=
14614 DDI_PROP_SUCCESS) {
14615 mptsas_log(mpt, CE_WARN, "mptsas driver "
14616 "failed to create phy-num property for "
14617 "target %d", target);
14618 ndi_rtn = NDI_FAILURE;
14619 goto phys_create_done;
14620 }
14621 }
14622 phys_create_done:
14623 /*
14624 * If props were setup ok, online the lun
14625 */
14626 if (ndi_rtn == NDI_SUCCESS) {
14627 /*
14628 * Try to online the new node
14629 */
14630 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14631 }
14632 if (ndi_rtn == NDI_SUCCESS) {
14633 mutex_enter(&mpt->m_mutex);
14634 ptgt->m_led_status = 0;
14635 (void) mptsas_flush_led_status(mpt, ptgt);
14636 mutex_exit(&mpt->m_mutex);
14637 }
14638
14639 /*
14640 * If success set rtn flag, else unwire alloc'd lun
14641 */
14642 if (ndi_rtn != NDI_SUCCESS) {
14643 NDBG12(("mptsas driver unable to online "
14644 "target %d lun %d", target, lun));
14645 ndi_prop_remove_all(*lun_dip);
14646 (void) ndi_devi_free(*lun_dip);
14647 *lun_dip = NULL;
14648 }
14649 }
14650
14651 scsi_hba_nodename_compatible_free(nodename, compatible);
14652
14653 if (wwn_str != NULL) {
14654 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14655 }
14656 if (component != NULL) {
14657 kmem_free(component, MAXPATHLEN);
14658 }
14659
14660
14661 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14662 }
14663
14664 static int
14665 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14666 {
14667 mptsas_t *mpt = DIP2MPT(pdip);
14668 struct smp_device smp_sd;
14669
14670 /* XXX An HBA driver should not be allocating an smp_device. */
14671 bzero(&smp_sd, sizeof (struct smp_device));
14672 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14673 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14674
14675 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14676 return (NDI_FAILURE);
14677 return (NDI_SUCCESS);
14678 }
14679
14680 static int
14681 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14682 {
14683 mptsas_t *mpt = DIP2MPT(pdip);
14684 mptsas_smp_t *psmp = NULL;
14685 int rval;
14686 int phymask;
14687
14688 /*
14689 * Get the physical port associated to the iport
14690 * PHYMASK TODO
14691 */
14692 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14693 "phymask", 0);
14694 /*
14695 * Find the smp node in hash table with specified sas address and
14696 * physical port
14697 */
14698 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14699 if (psmp == NULL) {
14700 return (DDI_FAILURE);
14701 }
14702
14703 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14704
14705 return (rval);
14706 }
14707
14708 static int
14709 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14710 dev_info_t **smp_dip)
14711 {
14712 char wwn_str[MPTSAS_WWN_STRLEN];
14713 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14714 int ndi_rtn = NDI_FAILURE;
14715 int rval = 0;
14716 mptsas_smp_t dev_info;
14717 uint32_t page_address;
14718 mptsas_t *mpt = DIP2MPT(pdip);
14719 uint16_t dev_hdl;
14720 uint64_t sas_wwn;
14721 uint64_t smp_sas_wwn;
14722 uint8_t physport;
14723 uint8_t phy_id;
14724 uint16_t pdev_hdl;
14725 uint8_t numphys = 0;
14726 uint16_t i = 0;
14727 char phymask[MPTSAS_MAX_PHYS];
14728 char *iport = NULL;
14729 mptsas_phymask_t phy_mask = 0;
14730 uint16_t attached_devhdl;
14731 uint16_t bay_num, enclosure;
14732
14733 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14734
14735 /*
14736 * Probe smp device, prevent the node of removed device from being
14737 * configured succesfully
14738 */
14739 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
14740 return (DDI_FAILURE);
14741 }
14742
14743 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14744 return (DDI_SUCCESS);
14745 }
14746
14747 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14748
14749 /*
14750 * if lun alloc success, set props
14751 */
14752 if (ndi_rtn == NDI_SUCCESS) {
14753 /*
14754 * Set the flavor of the child to be SMP flavored
14755 */
14756 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14757
14758 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14759 *smp_dip, SMP_WWN, wwn_str) !=
14760 DDI_PROP_SUCCESS) {
14761 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14762 "property for smp device %s (sas_wwn)",
14763 wwn_str);
14764 ndi_rtn = NDI_FAILURE;
14765 goto smp_create_done;
14766 }
14767 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
14768 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14769 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14770 DDI_PROP_SUCCESS) {
14771 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14772 "property for iport target-port %s (sas_wwn)",
14773 wwn_str);
14774 ndi_rtn = NDI_FAILURE;
14775 goto smp_create_done;
14776 }
14777
14778 mutex_enter(&mpt->m_mutex);
14779
14780 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14781 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14782 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14783 &dev_info);
14784 if (rval != DDI_SUCCESS) {
14785 mutex_exit(&mpt->m_mutex);
14786 mptsas_log(mpt, CE_WARN,
14787 "mptsas unable to get expander "
14788 "parent device info for %x", page_address);
14789 ndi_rtn = NDI_FAILURE;
14790 goto smp_create_done;
14791 }
14792
14793 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14794 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14795 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14796 (uint32_t)dev_info.m_pdevhdl;
14797 rval = mptsas_get_sas_device_page0(mpt, page_address,
14798 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14799 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14800 if (rval != DDI_SUCCESS) {
14801 mutex_exit(&mpt->m_mutex);
14802 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14803 "device info for %x", page_address);
14804 ndi_rtn = NDI_FAILURE;
14805 goto smp_create_done;
14806 }
14807
14808 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14809 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14810 (uint32_t)dev_info.m_devhdl;
14811 rval = mptsas_get_sas_device_page0(mpt, page_address,
14812 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14813 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14814 if (rval != DDI_SUCCESS) {
14815 mutex_exit(&mpt->m_mutex);
14816 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14817 "device info for %x", page_address);
14818 ndi_rtn = NDI_FAILURE;
14819 goto smp_create_done;
14820 }
14821 mutex_exit(&mpt->m_mutex);
14822
14823 /*
14824 * If this smp direct attached to the controller
14825 * set the attached-port to the base wwid
14826 */
14827 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14828 != DEVINFO_DIRECT_ATTACHED) {
14829 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14830 sas_wwn);
14831 } else {
14832 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14833 mpt->un.m_base_wwid);
14834 }
14835
14836 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14837 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14838 DDI_PROP_SUCCESS) {
14839 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14840 "property for smp attached-port %s (sas_wwn)",
14841 attached_wwn_str);
14842 ndi_rtn = NDI_FAILURE;
14843 goto smp_create_done;
14844 }
14845
14846 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14847 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14848 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14849 "create property for SMP %s (SMP_PROP) ",
14850 wwn_str);
14851 ndi_rtn = NDI_FAILURE;
14852 goto smp_create_done;
14853 }
14854
14855 /*
14856 * check the smp to see whether it direct
14857 * attached to the controller
14858 */
14859 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14860 != DEVINFO_DIRECT_ATTACHED) {
14861 goto smp_create_done;
14862 }
14863 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14864 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14865 if (numphys > 0) {
14866 goto smp_create_done;
14867 }
14868 /*
14869 * this iport is an old iport, we need to
14870 * reconfig the props for it.
14871 */
14872 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14873 MPTSAS_VIRTUAL_PORT, 0) !=
14874 DDI_PROP_SUCCESS) {
14875 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14876 MPTSAS_VIRTUAL_PORT);
14877 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14878 "prop update failed");
14879 goto smp_create_done;
14880 }
14881
14882 mutex_enter(&mpt->m_mutex);
14883 numphys = 0;
14884 iport = ddi_get_name_addr(pdip);
14885 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14886 bzero(phymask, sizeof (phymask));
14887 (void) sprintf(phymask,
14888 "%x", mpt->m_phy_info[i].phy_mask);
14889 if (strcmp(phymask, iport) == 0) {
14890 phy_mask = mpt->m_phy_info[i].phy_mask;
14891 break;
14892 }
14893 }
14894
14895 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14896 if ((phy_mask >> i) & 0x01) {
14897 numphys++;
14898 }
14899 }
14900 /*
14901 * Update PHY info for smhba
14902 */
14903 if (mptsas_smhba_phy_init(mpt)) {
14904 mutex_exit(&mpt->m_mutex);
14905 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14906 "failed");
14907 goto smp_create_done;
14908 }
14909 mutex_exit(&mpt->m_mutex);
14910
14911 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
14912 &attached_devhdl);
14913
14914 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14915 MPTSAS_NUM_PHYS, numphys) !=
14916 DDI_PROP_SUCCESS) {
14917 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14918 MPTSAS_NUM_PHYS);
14919 mptsas_log(mpt, CE_WARN, "mptsas update "
14920 "num phys props failed");
14921 goto smp_create_done;
14922 }
14923 /*
14924 * Add parent's props for SMHBA support
14925 */
14926 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14927 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14928 DDI_PROP_SUCCESS) {
14929 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14930 SCSI_ADDR_PROP_ATTACHED_PORT);
14931 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14932 "attached-port failed");
14933 goto smp_create_done;
14934 }
14935
14936 smp_create_done:
14937 /*
14938 * If props were setup ok, online the lun
14939 */
14940 if (ndi_rtn == NDI_SUCCESS) {
14941 /*
14942 * Try to online the new node
14943 */
14944 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14945 }
14946
14947 /*
14948 * If success set rtn flag, else unwire alloc'd lun
14949 */
14950 if (ndi_rtn != NDI_SUCCESS) {
14951 NDBG12(("mptsas unable to online "
14952 "SMP target %s", wwn_str));
14953 ndi_prop_remove_all(*smp_dip);
14954 (void) ndi_devi_free(*smp_dip);
14955 }
14956 }
14957
14958 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14959 }
14960
14961 /* smp transport routine */
14962 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14963 {
14964 uint64_t wwn;
14965 Mpi2SmpPassthroughRequest_t req;
14966 Mpi2SmpPassthroughReply_t rep;
14967 uint32_t direction = 0;
14968 mptsas_t *mpt;
14969 int ret;
14970 uint64_t tmp64;
14971
14972 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14973 smp_a_hba_tran->smp_tran_hba_private;
14974
14975 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14976 /*
14977 * Need to compose a SMP request message
14978 * and call mptsas_do_passthru() function
14979 */
14980 bzero(&req, sizeof (req));
14981 bzero(&rep, sizeof (rep));
14982 req.PassthroughFlags = 0;
14983 req.PhysicalPort = 0xff;
14984 req.ChainOffset = 0;
14985 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14986
14987 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14988 smp_pkt->smp_pkt_reason = ERANGE;
14989 return (DDI_FAILURE);
14990 }
14991 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14992
14993 req.MsgFlags = 0;
14994 tmp64 = LE_64(wwn);
14995 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14996 if (smp_pkt->smp_pkt_rspsize > 0) {
14997 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14998 }
14999 if (smp_pkt->smp_pkt_reqsize > 0) {
15000 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15001 }
15002
15003 mutex_enter(&mpt->m_mutex);
15004 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15005 (uint8_t *)smp_pkt->smp_pkt_rsp,
15006 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15007 smp_pkt->smp_pkt_rspsize - 4, direction,
15008 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15009 smp_pkt->smp_pkt_timeout, FKIOCTL);
15010 mutex_exit(&mpt->m_mutex);
15011 if (ret != 0) {
15012 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15013 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15014 return (DDI_FAILURE);
15015 }
15016 /* do passthrough success, check the smp status */
15017 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15018 switch (LE_16(rep.IOCStatus)) {
15019 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15020 smp_pkt->smp_pkt_reason = ENODEV;
15021 break;
15022 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15023 smp_pkt->smp_pkt_reason = EOVERFLOW;
15024 break;
15025 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15026 smp_pkt->smp_pkt_reason = EIO;
15027 break;
15028 default:
15029 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15030 "status:%x", LE_16(rep.IOCStatus));
15031 smp_pkt->smp_pkt_reason = EIO;
15032 break;
15033 }
15034 return (DDI_FAILURE);
15035 }
15036 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15037 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15038 rep.SASStatus);
15039 smp_pkt->smp_pkt_reason = EIO;
15040 return (DDI_FAILURE);
15041 }
15042
15043 return (DDI_SUCCESS);
15044 }
15045
15046 /*
15047 * If we didn't get a match, we need to get sas page0 for each device, and
15048 * untill we get a match. If failed, return NULL
15049 */
15050 static mptsas_target_t *
15051 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
15052 {
15053 int i, j = 0;
15054 int rval = 0;
15055 uint16_t cur_handle;
15056 uint32_t page_address;
15057 mptsas_target_t *ptgt = NULL;
15058
15059 /*
15060 * PHY named device must be direct attached and attaches to
15061 * narrow port, if the iport is not parent of the device which
15062 * we are looking for.
15063 */
15064 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15065 if ((1 << i) & phymask)
15066 j++;
15067 }
15068
15069 if (j > 1)
15070 return (NULL);
15071
15072 /*
15073 * Must be a narrow port and single device attached to the narrow port
15074 * So the physical port num of device which is equal to the iport's
15075 * port num is the device what we are looking for.
15076 */
15077
15078 if (mpt->m_phy_info[phy].phy_mask != phymask)
15079 return (NULL);
15080
15081 mutex_enter(&mpt->m_mutex);
15082
15083 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
15084 &phy);
15085 if (ptgt != NULL) {
15086 mutex_exit(&mpt->m_mutex);
15087 return (ptgt);
15088 }
15089
15090 if (mpt->m_done_traverse_dev) {
15091 mutex_exit(&mpt->m_mutex);
15092 return (NULL);
15093 }
15094
15095 /* If didn't get a match, come here */
15096 cur_handle = mpt->m_dev_handle;
15097 for (; ; ) {
15098 ptgt = NULL;
15099 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15100 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15101 rval = mptsas_get_target_device_info(mpt, page_address,
15102 &cur_handle, &ptgt);
15103 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15104 (rval == DEV_INFO_FAIL_ALLOC)) {
15105 break;
15106 }
15107 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15108 (rval == DEV_INFO_PHYS_DISK)) {
15109 continue;
15110 }
15111 mpt->m_dev_handle = cur_handle;
15112
15113 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
15114 break;
15115 }
15116 }
15117
15118 mutex_exit(&mpt->m_mutex);
15119 return (ptgt);
15120 }
15121
15122 /*
15123 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
15124 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15125 * If we didn't get a match, we need to get sas page0 for each device, and
15126 * untill we get a match
15127 * If failed, return NULL
15128 */
15129 static mptsas_target_t *
15130 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15131 {
15132 int rval = 0;
15133 uint16_t cur_handle;
15134 uint32_t page_address;
15135 mptsas_target_t *tmp_tgt = NULL;
15136 mptsas_target_addr_t addr;
15137
15138 addr.mta_wwn = wwid;
15139 addr.mta_phymask = phymask;
15140 mutex_enter(&mpt->m_mutex);
15141 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15142 if (tmp_tgt != NULL) {
15143 mutex_exit(&mpt->m_mutex);
15144 return (tmp_tgt);
15145 }
15146
15147 if (phymask == 0) {
15148 /*
15149 * It's IR volume
15150 */
15151 rval = mptsas_get_raid_info(mpt);
15152 if (rval) {
15153 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15154 }
15155 mutex_exit(&mpt->m_mutex);
15156 return (tmp_tgt);
15157 }
15158
15159 if (mpt->m_done_traverse_dev) {
15160 mutex_exit(&mpt->m_mutex);
15161 return (NULL);
15162 }
15163
15164 /* If didn't get a match, come here */
15165 cur_handle = mpt->m_dev_handle;
15166 for (;;) {
15167 tmp_tgt = NULL;
15168 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15169 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15170 rval = mptsas_get_target_device_info(mpt, page_address,
15171 &cur_handle, &tmp_tgt);
15172 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15173 (rval == DEV_INFO_FAIL_ALLOC)) {
15174 tmp_tgt = NULL;
15175 break;
15176 }
15177 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15178 (rval == DEV_INFO_PHYS_DISK)) {
15179 continue;
15180 }
15181 mpt->m_dev_handle = cur_handle;
15182 if ((tmp_tgt->m_addr.mta_wwn) &&
15183 (tmp_tgt->m_addr.mta_wwn == wwid) &&
15184 (tmp_tgt->m_addr.mta_phymask == phymask)) {
15185 break;
15186 }
15187 }
15188
15189 mutex_exit(&mpt->m_mutex);
15190 return (tmp_tgt);
15191 }
15192
15193 static mptsas_smp_t *
15194 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15195 {
15196 int rval = 0;
15197 uint16_t cur_handle;
15198 uint32_t page_address;
15199 mptsas_smp_t smp_node, *psmp = NULL;
15200 mptsas_target_addr_t addr;
15201
15202 addr.mta_wwn = wwid;
15203 addr.mta_phymask = phymask;
15204 mutex_enter(&mpt->m_mutex);
15205 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
15206 if (psmp != NULL) {
15207 mutex_exit(&mpt->m_mutex);
15208 return (psmp);
15209 }
15210
15211 if (mpt->m_done_traverse_smp) {
15212 mutex_exit(&mpt->m_mutex);
15213 return (NULL);
15214 }
15215
15216 /* If didn't get a match, come here */
15217 cur_handle = mpt->m_smp_devhdl;
15218 for (;;) {
15219 psmp = NULL;
15220 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15221 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15222 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15223 &smp_node);
15224 if (rval != DDI_SUCCESS) {
15225 break;
15226 }
15227 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15228 psmp = mptsas_smp_alloc(mpt, &smp_node);
15229 ASSERT(psmp);
15230 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
15231 (psmp->m_addr.mta_phymask == phymask)) {
15232 break;
15233 }
15234 }
15235
15236 mutex_exit(&mpt->m_mutex);
15237 return (psmp);
15238 }
15239
15240 mptsas_target_t *
15241 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
15242 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15243 {
15244 mptsas_target_t *tmp_tgt = NULL;
15245 mptsas_target_addr_t addr;
15246
15247 addr.mta_wwn = wwid;
15248 addr.mta_phymask = phymask;
15249 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15250 if (tmp_tgt != NULL) {
15251 NDBG20(("Hash item already exist"));
15252 tmp_tgt->m_deviceinfo = devinfo;
15253 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
15254 return (tmp_tgt);
15255 }
15256 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15257 if (tmp_tgt == NULL) {
15258 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15259 return (NULL);
15260 }
15261 tmp_tgt->m_devhdl = devhdl;
15262 tmp_tgt->m_addr.mta_wwn = wwid;
15263 tmp_tgt->m_deviceinfo = devinfo;
15264 tmp_tgt->m_addr.mta_phymask = phymask;
15265 tmp_tgt->m_phynum = phynum;
15266 /* Initialized the tgt structure */
15267 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15268 tmp_tgt->m_qfull_retry_interval =
15269 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15270 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15271
15272 refhash_insert(mpt->m_targets, tmp_tgt);
15273
15274 return (tmp_tgt);
15275 }
15276
15277 static void
15278 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
15279 {
15280 dst->m_devhdl = src->m_devhdl;
15281 dst->m_deviceinfo = src->m_deviceinfo;
15282 dst->m_pdevhdl = src->m_pdevhdl;
15283 dst->m_pdevinfo = src->m_pdevinfo;
15284 }
15285
15286 static mptsas_smp_t *
15287 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
15288 {
15289 mptsas_target_addr_t addr;
15290 mptsas_smp_t *ret_data;
15291
15292 addr.mta_wwn = data->m_addr.mta_wwn;
15293 addr.mta_phymask = data->m_addr.mta_phymask;
15294 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
15295 /*
15296 * If there's already a matching SMP target, update its fields
15297 * in place. Since the address is not changing, it's safe to do
15298 * this. We cannot just bcopy() here because the structure we've
15299 * been given has invalid hash links.
15300 */
15301 if (ret_data != NULL) {
15302 mptsas_smp_target_copy(data, ret_data);
15303 return (ret_data);
15304 }
15305
15306 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15307 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15308 refhash_insert(mpt->m_smp_targets, ret_data);
15309 return (ret_data);
15310 }
15311
15312 /*
15313 * Functions for SGPIO LED support
15314 */
15315 static dev_info_t *
15316 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15317 {
15318 dev_info_t *dip;
15319 int prop;
15320 dip = e_ddi_hold_devi_by_dev(dev, 0);
15321 if (dip == NULL)
15322 return (dip);
15323 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15324 "phymask", 0);
15325 *phymask = (mptsas_phymask_t)prop;
15326 ddi_release_devi(dip);
15327 return (dip);
15328 }
15329 static mptsas_target_t *
15330 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15331 {
15332 uint8_t phynum;
15333 uint64_t wwn;
15334 int lun;
15335 mptsas_target_t *ptgt = NULL;
15336
15337 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
15338 return (NULL);
15339 }
15340 if (addr[0] == 'w') {
15341 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15342 } else {
15343 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15344 }
15345 return (ptgt);
15346 }
15347
15348 static int
15349 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
15350 {
15351 uint32_t slotstatus = 0;
15352
15353 /* Build an MPI2 Slot Status based on our view of the world */
15354 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
15355 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
15356 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
15357 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
15358 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
15359 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
15360
15361 /* Write it to the controller */
15362 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
15363 slotstatus, ptgt->m_slot_num));
15364 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
15365 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
15366 }
15367
15368 /*
15369 * send sep request, use enclosure/slot addressing
15370 */
15371 static int
15372 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
15373 uint32_t *status, uint8_t act)
15374 {
15375 Mpi2SepRequest_t req;
15376 Mpi2SepReply_t rep;
15377 int ret;
15378
15379 ASSERT(mutex_owned(&mpt->m_mutex));
15380
15381 /*
15382 * We only support SEP control of directly-attached targets, in which
15383 * case the "SEP" we're talking to is a virtual one contained within
15384 * the HBA itself. This is necessary because DA targets typically have
15385 * no other mechanism for LED control. Targets for which a separate
15386 * enclosure service processor exists should be controlled via ses(7d)
15387 * or sgen(7d). Furthermore, since such requests can time out, they
15388 * should be made in user context rather than in response to
15389 * asynchronous fabric changes.
15390 *
15391 * In addition, we do not support this operation for RAID volumes,
15392 * since there is no slot associated with them.
15393 */
15394 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
15395 ptgt->m_addr.mta_phymask == 0) {
15396 return (ENOTTY);
15397 }
15398
15399 bzero(&req, sizeof (req));
15400 bzero(&rep, sizeof (rep));
15401
15402 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
15403 req.Action = act;
15404 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
15405 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
15406 req.Slot = LE_16(ptgt->m_slot_num);
15407 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15408 req.SlotStatus = LE_32(*status);
15409 }
15410 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
15411 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
15412 if (ret != 0) {
15413 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
15414 "Processor Request message error %d", ret);
15415 return (ret);
15416 }
15417 /* do passthrough success, check the ioc status */
15418 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15419 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
15420 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
15421 LE_32(rep.IOCLogInfo));
15422 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
15423 case MPI2_IOCSTATUS_INVALID_FUNCTION:
15424 case MPI2_IOCSTATUS_INVALID_VPID:
15425 case MPI2_IOCSTATUS_INVALID_FIELD:
15426 case MPI2_IOCSTATUS_INVALID_STATE:
15427 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
15428 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
15429 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
15430 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
15431 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
15432 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
15433 return (EINVAL);
15434 case MPI2_IOCSTATUS_BUSY:
15435 return (EBUSY);
15436 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
15437 return (EAGAIN);
15438 case MPI2_IOCSTATUS_INVALID_SGL:
15439 case MPI2_IOCSTATUS_INTERNAL_ERROR:
15440 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
15441 default:
15442 return (EIO);
15443 }
15444 }
15445 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15446 *status = LE_32(rep.SlotStatus);
15447 }
15448
15449 return (0);
15450 }
15451
15452 int
15453 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15454 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15455 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15456 {
15457 ddi_dma_cookie_t new_cookie;
15458 size_t alloc_len;
15459 uint_t ncookie;
15460
15461 if (cookiep == NULL)
15462 cookiep = &new_cookie;
15463
15464 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15465 NULL, dma_hdp) != DDI_SUCCESS) {
15466 return (FALSE);
15467 }
15468
15469 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15470 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15471 acc_hdp) != DDI_SUCCESS) {
15472 ddi_dma_free_handle(dma_hdp);
15473 return (FALSE);
15474 }
15475
15476 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15477 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15478 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15479 (void) ddi_dma_mem_free(acc_hdp);
15480 ddi_dma_free_handle(dma_hdp);
15481 return (FALSE);
15482 }
15483
15484 return (TRUE);
15485 }
15486
15487 void
15488 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15489 {
15490 if (*dma_hdp == NULL)
15491 return;
15492
15493 (void) ddi_dma_unbind_handle(*dma_hdp);
15494 (void) ddi_dma_mem_free(acc_hdp);
15495 ddi_dma_free_handle(dma_hdp);
15496 }