1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 * Copyright (c) 2000 to 2010, LSI Corporation.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms of all code within
32 * this file that is exclusively owned by LSI, with or without
33 * modification, is permitted provided that, in addition to the CDDL 1.0
34 * License requirements, the following conditions are met:
35 *
36 * Neither the name of the author nor the names of its contributors may be
37 * used to endorse or promote products derived from this software without
38 * specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
52 */
53
54 /*
55 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
56 *
57 */
58
59 #if defined(lint) || defined(DEBUG)
60 #define MPTSAS_DEBUG
61 #endif
62
63 /*
64 * standard header files.
65 */
66 #include <sys/note.h>
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/file.h>
70 #include <sys/policy.h>
71 #include <sys/sysevent.h>
72 #include <sys/sysevent/eventdefs.h>
73 #include <sys/sysevent/dr.h>
74 #include <sys/sata/sata_defs.h>
75 #include <sys/scsi/generic/sas.h>
76 #include <sys/scsi/impl/scsi_sas.h>
77
78 #pragma pack(1)
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
87 #pragma pack()
88
89 /*
90 * private header files.
91 *
92 */
93 #include <sys/scsi/impl/scsi_reset_notify.h>
94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
97 #include <sys/raidioctl.h>
98
99 #include <sys/fs/dv_node.h> /* devfs_clean */
100
101 /*
102 * FMA header files
103 */
104 #include <sys/ddifm.h>
105 #include <sys/fm/protocol.h>
106 #include <sys/fm/util.h>
107 #include <sys/fm/io/ddi.h>
108
109 /*
110 * autoconfiguration data and routines.
111 */
112 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
113 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
114 static int mptsas_power(dev_info_t *dip, int component, int level);
115
116 /*
117 * cb_ops function
118 */
119 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
120 cred_t *credp, int *rval);
121 #ifdef __sparc
122 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
123 #else /* __sparc */
124 static int mptsas_quiesce(dev_info_t *devi);
125 #endif /* __sparc */
126
127 /*
128 * Resource initilaization for hardware
129 */
130 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
131 static void mptsas_disable_bus_master(mptsas_t *mpt);
132 static void mptsas_hba_fini(mptsas_t *mpt);
133 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
134 static int mptsas_hba_setup(mptsas_t *mpt);
135 static void mptsas_hba_teardown(mptsas_t *mpt);
136 static int mptsas_config_space_init(mptsas_t *mpt);
137 static void mptsas_config_space_fini(mptsas_t *mpt);
138 static void mptsas_iport_register(mptsas_t *mpt);
139 static int mptsas_smp_setup(mptsas_t *mpt);
140 static void mptsas_smp_teardown(mptsas_t *mpt);
141 static int mptsas_cache_create(mptsas_t *mpt);
142 static void mptsas_cache_destroy(mptsas_t *mpt);
143 static int mptsas_alloc_request_frames(mptsas_t *mpt);
144 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
145 static int mptsas_alloc_free_queue(mptsas_t *mpt);
146 static int mptsas_alloc_post_queue(mptsas_t *mpt);
147 static void mptsas_alloc_reply_args(mptsas_t *mpt);
148 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
149 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
150 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
151
152 /*
153 * SCSA function prototypes
154 */
155 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
156 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
157 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
158 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
159 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
160 int tgtonly);
161 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
162 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
163 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
164 int tgtlen, int flags, int (*callback)(), caddr_t arg);
165 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
166 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
167 struct scsi_pkt *pkt);
168 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
169 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
170 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
172 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
173 void (*callback)(caddr_t), caddr_t arg);
174 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
175 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
176 static int mptsas_scsi_quiesce(dev_info_t *dip);
177 static int mptsas_scsi_unquiesce(dev_info_t *dip);
178 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
179 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
180
181 /*
182 * SMP functions
183 */
184 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
185
186 /*
187 * internal function prototypes.
188 */
189 static void mptsas_list_add(mptsas_t *mpt);
190 static void mptsas_list_del(mptsas_t *mpt);
191
192 static int mptsas_quiesce_bus(mptsas_t *mpt);
193 static int mptsas_unquiesce_bus(mptsas_t *mpt);
194
195 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
196 static void mptsas_free_handshake_msg(mptsas_t *mpt);
197
198 static void mptsas_ncmds_checkdrain(void *arg);
199
200 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
201 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
202 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
203 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
204
205 static int mptsas_do_detach(dev_info_t *dev);
206 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
207 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
208 struct scsi_pkt *pkt);
209 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
210
211 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
212 static void mptsas_handle_event(void *args);
213 static int mptsas_handle_event_sync(void *args);
214 static void mptsas_handle_dr(void *args);
215 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
216 dev_info_t *pdip);
217
218 static void mptsas_restart_cmd(void *);
219
220 static void mptsas_flush_hba(mptsas_t *mpt);
221 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
222 uint8_t tasktype);
223 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
224 uchar_t reason, uint_t stat);
225
226 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
227 static void mptsas_process_intr(mptsas_t *mpt,
228 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
229 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
230 pMpi2ReplyDescriptorsUnion_t reply_desc);
231 static void mptsas_handle_address_reply(mptsas_t *mpt,
232 pMpi2ReplyDescriptorsUnion_t reply_desc);
233 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
234 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
235 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
236
237 static void mptsas_watch(void *arg);
238 static void mptsas_watchsubr(mptsas_t *mpt);
239 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
240
241 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
242 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
243 uint8_t *data, uint32_t request_size, uint32_t reply_size,
244 uint32_t data_size, uint32_t direction, uint8_t *dataout,
245 uint32_t dataout_size, short timeout, int mode);
246 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
247
248 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
249 uint32_t unique_id);
250 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
251 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
252 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
253 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
254 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
255 uint32_t diag_type);
256 static int mptsas_diag_register(mptsas_t *mpt,
257 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
258 static int mptsas_diag_unregister(mptsas_t *mpt,
259 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
260 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
261 uint32_t *return_code);
262 static int mptsas_diag_read_buffer(mptsas_t *mpt,
263 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
264 uint32_t *return_code, int ioctl_mode);
265 static int mptsas_diag_release(mptsas_t *mpt,
266 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
267 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
268 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
269 int ioctl_mode);
270 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
271 int mode);
272
273 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
274 int cmdlen, int tgtlen, int statuslen, int kf);
275 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
276
277 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
278 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
279
280 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
281 int kmflags);
282 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
283
284 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
285 mptsas_cmd_t *cmd);
286 static void mptsas_check_task_mgt(mptsas_t *mpt,
287 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
288 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
289 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
290 int *resid);
291
292 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
293 static void mptsas_free_active_slots(mptsas_t *mpt);
294 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
295
296 static void mptsas_restart_hba(mptsas_t *mpt);
297 static void mptsas_restart_waitq(mptsas_t *mpt);
298
299 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
300 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
301 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
302
303 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
304 static void mptsas_doneq_empty(mptsas_t *mpt);
305 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
306
307 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
308 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
309 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
310 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
311
312
313 static void mptsas_start_watch_reset_delay();
314 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
315 static void mptsas_watch_reset_delay(void *arg);
316 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
317
318 /*
319 * helper functions
320 */
321 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
322
323 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
324 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
325 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
326 int lun);
327 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
328 int lun);
329 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
330 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
331
332 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
333 int *lun);
334 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
335
336 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
337 uint8_t phy);
338 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
339 uint64_t wwid);
340 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
341 uint64_t wwid);
342
343 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
344 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
345
346 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
347 uint16_t *handle, mptsas_target_t **pptgt);
348 static void mptsas_update_phymask(mptsas_t *mpt);
349
350 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
351 uint32_t *status, uint8_t cmd);
352 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
353 mptsas_phymask_t *phymask);
354 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
355 mptsas_phymask_t phymask);
356 static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
357 uint32_t slotstatus);
358
359
360 /*
361 * Enumeration / DR functions
362 */
363 static void mptsas_config_all(dev_info_t *pdip);
364 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
365 dev_info_t **lundip);
366 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
367 dev_info_t **lundip);
368
369 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
370 static int mptsas_offline_target(dev_info_t *pdip, char *name);
371
372 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
373 dev_info_t **dip);
374
375 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
376 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
377 dev_info_t **dip, mptsas_target_t *ptgt);
378
379 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
380 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
381
382 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
383 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
384 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
385 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
386 int lun);
387
388 static void mptsas_offline_missed_luns(dev_info_t *pdip,
389 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
390 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
391 mdi_pathinfo_t *rpip, uint_t flags);
392
393 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
394 dev_info_t **smp_dip);
395 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
396 uint_t flags);
397
398 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
399 int mode, int *rval);
400 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
401 int mode, int *rval);
402 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
403 int mode, int *rval);
404 static void mptsas_record_event(void *args);
405 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
406 int mode);
407
408 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
409 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
410 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
411 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
412 mptsas_phymask_t key2);
413 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
414 mptsas_phymask_t key2);
415 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
416
417 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
418 uint32_t, mptsas_phymask_t, uint8_t);
419 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
420 mptsas_smp_t *data);
421 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
422 mptsas_phymask_t phymask);
423 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
424 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
425 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
426 dev_info_t **smp_dip);
427
428 /*
429 * Power management functions
430 */
431 static int mptsas_get_pci_cap(mptsas_t *mpt);
432 static int mptsas_init_pm(mptsas_t *mpt);
433
434 /*
435 * MPT MSI tunable:
436 *
437 * By default MSI is enabled on all supported platforms.
438 */
439 boolean_t mptsas_enable_msi = B_TRUE;
440 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
441
442 static int mptsas_register_intrs(mptsas_t *);
443 static void mptsas_unregister_intrs(mptsas_t *);
444 static int mptsas_add_intrs(mptsas_t *, int);
445 static void mptsas_rem_intrs(mptsas_t *);
446
447 /*
448 * FMA Prototypes
449 */
450 static void mptsas_fm_init(mptsas_t *mpt);
451 static void mptsas_fm_fini(mptsas_t *mpt);
452 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
453
454 extern pri_t minclsyspri, maxclsyspri;
455
456 /*
457 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
458 * under this device that the paths to a physical device are created when
459 * MPxIO is used.
460 */
461 extern dev_info_t *scsi_vhci_dip;
462
463 /*
464 * Tunable timeout value for Inquiry VPD page 0x83
465 * By default the value is 30 seconds.
466 */
467 int mptsas_inq83_retry_timeout = 30;
468
469 /*
470 * This is used to allocate memory for message frame storage, not for
471 * data I/O DMA. All message frames must be stored in the first 4G of
472 * physical memory.
473 */
474 ddi_dma_attr_t mptsas_dma_attrs = {
475 DMA_ATTR_V0, /* attribute layout version */
476 0x0ull, /* address low - should be 0 (longlong) */
477 0xffffffffull, /* address high - 32-bit max range */
478 0x00ffffffull, /* count max - max DMA object size */
479 4, /* allocation alignment requirements */
480 0x78, /* burstsizes - binary encoded values */
481 1, /* minxfer - gran. of DMA engine */
482 0x00ffffffull, /* maxxfer - gran. of DMA engine */
483 0xffffffffull, /* max segment size (DMA boundary) */
484 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
485 512, /* granularity - device transfer size */
486 0 /* flags, set to 0 */
487 };
488
489 /*
490 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
491 * physical addresses are supported.)
492 */
493 ddi_dma_attr_t mptsas_dma_attrs64 = {
494 DMA_ATTR_V0, /* attribute layout version */
495 0x0ull, /* address low - should be 0 (longlong) */
496 0xffffffffffffffffull, /* address high - 64-bit max */
497 0x00ffffffull, /* count max - max DMA object size */
498 4, /* allocation alignment requirements */
499 0x78, /* burstsizes - binary encoded values */
500 1, /* minxfer - gran. of DMA engine */
501 0x00ffffffull, /* maxxfer - gran. of DMA engine */
502 0xffffffffull, /* max segment size (DMA boundary) */
503 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
504 512, /* granularity - device transfer size */
505 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
506 };
507
508 ddi_device_acc_attr_t mptsas_dev_attr = {
509 DDI_DEVICE_ATTR_V1,
510 DDI_STRUCTURE_LE_ACC,
511 DDI_STRICTORDER_ACC,
512 DDI_DEFAULT_ACC
513 };
514
515 static struct cb_ops mptsas_cb_ops = {
516 scsi_hba_open, /* open */
517 scsi_hba_close, /* close */
518 nodev, /* strategy */
519 nodev, /* print */
520 nodev, /* dump */
521 nodev, /* read */
522 nodev, /* write */
523 mptsas_ioctl, /* ioctl */
524 nodev, /* devmap */
525 nodev, /* mmap */
526 nodev, /* segmap */
527 nochpoll, /* chpoll */
528 ddi_prop_op, /* cb_prop_op */
529 NULL, /* streamtab */
530 D_MP, /* cb_flag */
531 CB_REV, /* rev */
532 nodev, /* aread */
533 nodev /* awrite */
534 };
535
536 static struct dev_ops mptsas_ops = {
537 DEVO_REV, /* devo_rev, */
538 0, /* refcnt */
539 ddi_no_info, /* info */
540 nulldev, /* identify */
541 nulldev, /* probe */
542 mptsas_attach, /* attach */
543 mptsas_detach, /* detach */
544 #ifdef __sparc
545 mptsas_reset,
546 #else
547 nodev, /* reset */
548 #endif /* __sparc */
549 &mptsas_cb_ops, /* driver operations */
550 NULL, /* bus operations */
551 mptsas_power, /* power management */
552 #ifdef __sparc
553 ddi_quiesce_not_needed
554 #else
555 mptsas_quiesce /* quiesce */
556 #endif /* __sparc */
557 };
558
559
560 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
561
562 static struct modldrv modldrv = {
563 &mod_driverops, /* Type of module. This one is a driver */
564 MPTSAS_MOD_STRING, /* Name of the module. */
565 &mptsas_ops, /* driver ops */
566 };
567
568 static struct modlinkage modlinkage = {
569 MODREV_1, &modldrv, NULL
570 };
571 #define TARGET_PROP "target"
572 #define LUN_PROP "lun"
573 #define LUN64_PROP "lun64"
574 #define SAS_PROP "sas-mpt"
575 #define MDI_GUID "wwn"
576 #define NDI_GUID "guid"
577 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
578
579 /*
580 * Local static data
581 */
582 #if defined(MPTSAS_DEBUG)
583 uint32_t mptsas_debug_flags = 0;
584 #endif /* defined(MPTSAS_DEBUG) */
585 uint32_t mptsas_debug_resets = 0;
586
587 static kmutex_t mptsas_global_mutex;
588 static void *mptsas_state; /* soft state ptr */
589 static krwlock_t mptsas_global_rwlock;
590
591 static kmutex_t mptsas_log_mutex;
592 static char mptsas_log_buf[256];
593 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
594
595 static mptsas_t *mptsas_head, *mptsas_tail;
596 static clock_t mptsas_scsi_watchdog_tick;
597 static clock_t mptsas_tick;
598 static timeout_id_t mptsas_reset_watch;
599 static timeout_id_t mptsas_timeout_id;
600 static int mptsas_timeouts_enabled = 0;
601 /*
602 * warlock directives
603 */
604 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
605 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
606 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
607 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
608 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
609 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
610
611 /*
612 * SM - HBA statics
613 */
614 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
615
616 #ifdef MPTSAS_DEBUG
617 void debug_enter(char *);
618 #endif
619
620 /*
621 * Notes:
622 * - scsi_hba_init(9F) initializes SCSI HBA modules
623 * - must call scsi_hba_fini(9F) if modload() fails
624 */
625 int
626 _init(void)
627 {
628 int status;
629 /* CONSTCOND */
630 ASSERT(NO_COMPETING_THREADS);
631
632 NDBG0(("_init"));
633
634 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
635 MPTSAS_INITIAL_SOFT_SPACE);
636 if (status != 0) {
637 return (status);
638 }
639
640 if ((status = scsi_hba_init(&modlinkage)) != 0) {
641 ddi_soft_state_fini(&mptsas_state);
642 return (status);
643 }
644
645 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
646 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
647 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
648
649 if ((status = mod_install(&modlinkage)) != 0) {
650 mutex_destroy(&mptsas_log_mutex);
651 rw_destroy(&mptsas_global_rwlock);
652 mutex_destroy(&mptsas_global_mutex);
653 ddi_soft_state_fini(&mptsas_state);
654 scsi_hba_fini(&modlinkage);
655 }
656
657 return (status);
658 }
659
660 /*
661 * Notes:
662 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
663 */
664 int
665 _fini(void)
666 {
667 int status;
668 /* CONSTCOND */
669 ASSERT(NO_COMPETING_THREADS);
670
671 NDBG0(("_fini"));
672
673 if ((status = mod_remove(&modlinkage)) == 0) {
674 ddi_soft_state_fini(&mptsas_state);
675 scsi_hba_fini(&modlinkage);
676 mutex_destroy(&mptsas_global_mutex);
677 rw_destroy(&mptsas_global_rwlock);
678 mutex_destroy(&mptsas_log_mutex);
679 }
680 return (status);
681 }
682
683 /*
684 * The loadable-module _info(9E) entry point
685 */
686 int
687 _info(struct modinfo *modinfop)
688 {
689 /* CONSTCOND */
690 ASSERT(NO_COMPETING_THREADS);
691 NDBG0(("mptsas _info"));
692
693 return (mod_info(&modlinkage, modinfop));
694 }
695
696
697 static int
698 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
699 {
700 dev_info_t *pdip;
701 mptsas_t *mpt;
702 scsi_hba_tran_t *hba_tran;
703 char *iport = NULL;
704 char phymask[MPTSAS_MAX_PHYS];
705 mptsas_phymask_t phy_mask = 0;
706 int dynamic_port = 0;
707 uint32_t page_address;
708 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
709 int rval = DDI_FAILURE;
710 int i = 0;
711 uint8_t numphys = 0;
712 uint8_t phy_id;
713 uint8_t phy_port = 0;
714 uint16_t attached_devhdl = 0;
715 uint32_t dev_info;
716 uint64_t attached_sas_wwn;
717 uint16_t dev_hdl;
718 uint16_t pdev_hdl;
719 uint16_t bay_num, enclosure;
720 char attached_wwnstr[MPTSAS_WWN_STRLEN];
721
722 /* CONSTCOND */
723 ASSERT(NO_COMPETING_THREADS);
724
725 switch (cmd) {
726 case DDI_ATTACH:
727 break;
728
729 case DDI_RESUME:
730 /*
731 * If this a scsi-iport node, nothing to do here.
732 */
733 return (DDI_SUCCESS);
734
735 default:
736 return (DDI_FAILURE);
737 }
738
739 pdip = ddi_get_parent(dip);
740
741 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
742 NULL) {
743 cmn_err(CE_WARN, "Failed attach iport because fail to "
744 "get tran vector for the HBA node");
745 return (DDI_FAILURE);
746 }
747
748 mpt = TRAN2MPT(hba_tran);
749 ASSERT(mpt != NULL);
750 if (mpt == NULL)
751 return (DDI_FAILURE);
752
753 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
754 NULL) {
755 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
756 "get tran vector for the iport node");
757 return (DDI_FAILURE);
758 }
759
760 /*
761 * Overwrite parent's tran_hba_private to iport's tran vector
762 */
763 hba_tran->tran_hba_private = mpt;
764
765 ddi_report_dev(dip);
766
767 /*
768 * Get SAS address for initiator port according dev_handle
769 */
770 iport = ddi_get_name_addr(dip);
771 if (iport && strncmp(iport, "v0", 2) == 0) {
772 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
773 MPTSAS_VIRTUAL_PORT, 1) !=
774 DDI_PROP_SUCCESS) {
775 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
776 MPTSAS_VIRTUAL_PORT);
777 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
778 "prop update failed");
779 return (DDI_FAILURE);
780 }
781 return (DDI_SUCCESS);
782 }
783
784 mutex_enter(&mpt->m_mutex);
785 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
786 bzero(phymask, sizeof (phymask));
787 (void) sprintf(phymask,
788 "%x", mpt->m_phy_info[i].phy_mask);
789 if (strcmp(phymask, iport) == 0) {
790 break;
791 }
792 }
793
794 if (i == MPTSAS_MAX_PHYS) {
795 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
796 "seems not exist", iport);
797 mutex_exit(&mpt->m_mutex);
798 return (DDI_FAILURE);
799 }
800
801 phy_mask = mpt->m_phy_info[i].phy_mask;
802
803 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
804 dynamic_port = 1;
805 else
806 dynamic_port = 0;
807
808 /*
809 * Update PHY info for smhba
810 */
811 if (mptsas_smhba_phy_init(mpt)) {
812 mutex_exit(&mpt->m_mutex);
813 mptsas_log(mpt, CE_WARN, "mptsas phy update "
814 "failed");
815 return (DDI_FAILURE);
816 }
817
818 mutex_exit(&mpt->m_mutex);
819
820 numphys = 0;
821 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
822 if ((phy_mask >> i) & 0x01) {
823 numphys++;
824 }
825 }
826
827 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
828 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
829 mpt->un.m_base_wwid);
830
831 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
832 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
833 DDI_PROP_SUCCESS) {
834 (void) ddi_prop_remove(DDI_DEV_T_NONE,
835 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
836 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
837 "prop update failed");
838 return (DDI_FAILURE);
839 }
840 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
841 MPTSAS_NUM_PHYS, numphys) !=
842 DDI_PROP_SUCCESS) {
843 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
844 return (DDI_FAILURE);
845 }
846
847 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
848 "phymask", phy_mask) !=
849 DDI_PROP_SUCCESS) {
850 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
851 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
852 "prop update failed");
853 return (DDI_FAILURE);
854 }
855
856 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
857 "dynamic-port", dynamic_port) !=
858 DDI_PROP_SUCCESS) {
859 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
860 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
861 "prop update failed");
862 return (DDI_FAILURE);
863 }
864 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
865 MPTSAS_VIRTUAL_PORT, 0) !=
866 DDI_PROP_SUCCESS) {
867 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
868 MPTSAS_VIRTUAL_PORT);
869 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
870 "prop update failed");
871 return (DDI_FAILURE);
872 }
873 mptsas_smhba_set_phy_props(mpt,
874 iport, dip, numphys, &attached_devhdl);
875
876 mutex_enter(&mpt->m_mutex);
877 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
878 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
879 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
880 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
881 &pdev_hdl, &bay_num, &enclosure);
882 if (rval != DDI_SUCCESS) {
883 mptsas_log(mpt, CE_WARN,
884 "Failed to get device page0 for handle:%d",
885 attached_devhdl);
886 mutex_exit(&mpt->m_mutex);
887 return (DDI_FAILURE);
888 }
889
890 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
891 bzero(phymask, sizeof (phymask));
892 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
893 if (strcmp(phymask, iport) == 0) {
894 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
895 "%x",
896 mpt->m_phy_info[i].phy_mask);
897 }
898 }
899 mutex_exit(&mpt->m_mutex);
900
901 bzero(attached_wwnstr, sizeof (attached_wwnstr));
902 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
903 attached_sas_wwn);
904 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
905 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
906 DDI_PROP_SUCCESS) {
907 (void) ddi_prop_remove(DDI_DEV_T_NONE,
908 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
909 return (DDI_FAILURE);
910 }
911
912 /* Create kstats for each phy on this iport */
913
914 mptsas_create_phy_stats(mpt, iport, dip);
915
916 /*
917 * register sas hba iport with mdi (MPxIO/vhci)
918 */
919 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
920 dip, 0) == MDI_SUCCESS) {
921 mpt->m_mpxio_enable = TRUE;
922 }
923 return (DDI_SUCCESS);
924 }
925
926 /*
927 * Notes:
928 * Set up all device state and allocate data structures,
929 * mutexes, condition variables, etc. for device operation.
930 * Add interrupts needed.
931 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
932 */
933 static int
934 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
935 {
936 mptsas_t *mpt = NULL;
937 int instance, i, j;
938 int doneq_thread_num;
939 char intr_added = 0;
940 char map_setup = 0;
941 char config_setup = 0;
942 char hba_attach_setup = 0;
943 char smp_attach_setup = 0;
944 char mutex_init_done = 0;
945 char event_taskq_create = 0;
946 char dr_taskq_create = 0;
947 char doneq_thread_create = 0;
948 scsi_hba_tran_t *hba_tran;
949 uint_t mem_bar = MEM_SPACE;
950 int rval = DDI_FAILURE;
951
952 /* CONSTCOND */
953 ASSERT(NO_COMPETING_THREADS);
954
955 if (scsi_hba_iport_unit_address(dip)) {
956 return (mptsas_iport_attach(dip, cmd));
957 }
958
959 switch (cmd) {
960 case DDI_ATTACH:
961 break;
962
963 case DDI_RESUME:
964 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
965 return (DDI_FAILURE);
966
967 mpt = TRAN2MPT(hba_tran);
968
969 if (!mpt) {
970 return (DDI_FAILURE);
971 }
972
973 /*
974 * Reset hardware and softc to "no outstanding commands"
975 * Note that a check condition can result on first command
976 * to a target.
977 */
978 mutex_enter(&mpt->m_mutex);
979
980 /*
981 * raise power.
982 */
983 if (mpt->m_options & MPTSAS_OPT_PM) {
984 mutex_exit(&mpt->m_mutex);
985 (void) pm_busy_component(dip, 0);
986 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
987 if (rval == DDI_SUCCESS) {
988 mutex_enter(&mpt->m_mutex);
989 } else {
990 /*
991 * The pm_raise_power() call above failed,
992 * and that can only occur if we were unable
993 * to reset the hardware. This is probably
994 * due to unhealty hardware, and because
995 * important filesystems(such as the root
996 * filesystem) could be on the attached disks,
997 * it would not be a good idea to continue,
998 * as we won't be entirely certain we are
999 * writing correct data. So we panic() here
1000 * to not only prevent possible data corruption,
1001 * but to give developers or end users a hope
1002 * of identifying and correcting any problems.
1003 */
1004 fm_panic("mptsas could not reset hardware "
1005 "during resume");
1006 }
1007 }
1008
1009 mpt->m_suspended = 0;
1010
1011 /*
1012 * Reinitialize ioc
1013 */
1014 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1015 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1016 mutex_exit(&mpt->m_mutex);
1017 if (mpt->m_options & MPTSAS_OPT_PM) {
1018 (void) pm_idle_component(dip, 0);
1019 }
1020 fm_panic("mptsas init chip fail during resume");
1021 }
1022 /*
1023 * mptsas_update_driver_data needs interrupts so enable them
1024 * first.
1025 */
1026 MPTSAS_ENABLE_INTR(mpt);
1027 mptsas_update_driver_data(mpt);
1028
1029 /* start requests, if possible */
1030 mptsas_restart_hba(mpt);
1031
1032 mutex_exit(&mpt->m_mutex);
1033
1034 /*
1035 * Restart watch thread
1036 */
1037 mutex_enter(&mptsas_global_mutex);
1038 if (mptsas_timeout_id == 0) {
1039 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1040 mptsas_tick);
1041 mptsas_timeouts_enabled = 1;
1042 }
1043 mutex_exit(&mptsas_global_mutex);
1044
1045 /* report idle status to pm framework */
1046 if (mpt->m_options & MPTSAS_OPT_PM) {
1047 (void) pm_idle_component(dip, 0);
1048 }
1049
1050 return (DDI_SUCCESS);
1051
1052 default:
1053 return (DDI_FAILURE);
1054
1055 }
1056
1057 instance = ddi_get_instance(dip);
1058
1059 /*
1060 * Allocate softc information.
1061 */
1062 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1063 mptsas_log(NULL, CE_WARN,
1064 "mptsas%d: cannot allocate soft state", instance);
1065 goto fail;
1066 }
1067
1068 mpt = ddi_get_soft_state(mptsas_state, instance);
1069
1070 if (mpt == NULL) {
1071 mptsas_log(NULL, CE_WARN,
1072 "mptsas%d: cannot get soft state", instance);
1073 goto fail;
1074 }
1075
1076 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1077 scsi_size_clean(dip);
1078
1079 mpt->m_dip = dip;
1080 mpt->m_instance = instance;
1081
1082 /* Make a per-instance copy of the structures */
1083 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1084 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1085 mpt->m_reg_acc_attr = mptsas_dev_attr;
1086 mpt->m_dev_acc_attr = mptsas_dev_attr;
1087
1088 /*
1089 * Initialize FMA
1090 */
1091 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1092 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1093 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1094 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1095
1096 mptsas_fm_init(mpt);
1097
1098 if (mptsas_alloc_handshake_msg(mpt,
1099 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1100 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1101 goto fail;
1102 }
1103
1104 /*
1105 * Setup configuration space
1106 */
1107 if (mptsas_config_space_init(mpt) == FALSE) {
1108 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1109 goto fail;
1110 }
1111 config_setup++;
1112
1113 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1114 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1115 mptsas_log(mpt, CE_WARN, "map setup failed");
1116 goto fail;
1117 }
1118 map_setup++;
1119
1120 /*
1121 * A taskq is created for dealing with the event handler
1122 */
1123 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1124 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1125 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1126 goto fail;
1127 }
1128 event_taskq_create++;
1129
1130 /*
1131 * A taskq is created for dealing with dr events
1132 */
1133 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1134 "mptsas_dr_taskq",
1135 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1136 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1137 "failed");
1138 goto fail;
1139 }
1140 dr_taskq_create++;
1141
1142 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1143 0, "mptsas_doneq_thread_threshold_prop", 10);
1144 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1145 0, "mptsas_doneq_length_threshold_prop", 8);
1146 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1147 0, "mptsas_doneq_thread_n_prop", 8);
1148
1149 if (mpt->m_doneq_thread_n) {
1150 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1151 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1152
1153 mutex_enter(&mpt->m_doneq_mutex);
1154 mpt->m_doneq_thread_id =
1155 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1156 * mpt->m_doneq_thread_n, KM_SLEEP);
1157
1158 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1159 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1160 CV_DRIVER, NULL);
1161 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1162 MUTEX_DRIVER, NULL);
1163 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1164 mpt->m_doneq_thread_id[j].flag |=
1165 MPTSAS_DONEQ_THREAD_ACTIVE;
1166 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1167 mpt->m_doneq_thread_id[j].arg.t = j;
1168 mpt->m_doneq_thread_id[j].threadp =
1169 thread_create(NULL, 0, mptsas_doneq_thread,
1170 &mpt->m_doneq_thread_id[j].arg,
1171 0, &p0, TS_RUN, minclsyspri);
1172 mpt->m_doneq_thread_id[j].donetail =
1173 &mpt->m_doneq_thread_id[j].doneq;
1174 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1175 }
1176 mutex_exit(&mpt->m_doneq_mutex);
1177 doneq_thread_create++;
1178 }
1179
1180 /* Initialize mutex used in interrupt handler */
1181 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1182 DDI_INTR_PRI(mpt->m_intr_pri));
1183 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1184 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1185 DDI_INTR_PRI(mpt->m_intr_pri));
1186 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1187 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1188 NULL, MUTEX_DRIVER,
1189 DDI_INTR_PRI(mpt->m_intr_pri));
1190 }
1191
1192 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1193 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1194 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1195 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1196 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1197 mutex_init_done++;
1198
1199 /*
1200 * Disable hardware interrupt since we're not ready to
1201 * handle it yet.
1202 */
1203 MPTSAS_DISABLE_INTR(mpt);
1204 if (mptsas_register_intrs(mpt) == FALSE)
1205 goto fail;
1206 intr_added++;
1207
1208 mutex_enter(&mpt->m_mutex);
1209 /*
1210 * Initialize power management component
1211 */
1212 if (mpt->m_options & MPTSAS_OPT_PM) {
1213 if (mptsas_init_pm(mpt)) {
1214 mutex_exit(&mpt->m_mutex);
1215 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1216 "failed");
1217 goto fail;
1218 }
1219 }
1220
1221 /*
1222 * Initialize chip using Message Unit Reset, if allowed
1223 */
1224 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1225 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1226 mutex_exit(&mpt->m_mutex);
1227 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1228 goto fail;
1229 }
1230
1231 /*
1232 * Fill in the phy_info structure and get the base WWID
1233 */
1234 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1235 mptsas_log(mpt, CE_WARN,
1236 "mptsas_get_manufacture_page5 failed!");
1237 goto fail;
1238 }
1239
1240 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1241 mptsas_log(mpt, CE_WARN,
1242 "mptsas_get_sas_io_unit_page_hndshk failed!");
1243 goto fail;
1244 }
1245
1246 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1247 mptsas_log(mpt, CE_WARN,
1248 "mptsas_get_manufacture_page0 failed!");
1249 goto fail;
1250 }
1251
1252 mutex_exit(&mpt->m_mutex);
1253
1254 /*
1255 * Register the iport for multiple port HBA
1256 */
1257 mptsas_iport_register(mpt);
1258
1259 /*
1260 * initialize SCSI HBA transport structure
1261 */
1262 if (mptsas_hba_setup(mpt) == FALSE)
1263 goto fail;
1264 hba_attach_setup++;
1265
1266 if (mptsas_smp_setup(mpt) == FALSE)
1267 goto fail;
1268 smp_attach_setup++;
1269
1270 if (mptsas_cache_create(mpt) == FALSE)
1271 goto fail;
1272
1273 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1274 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1275 if (mpt->m_scsi_reset_delay == 0) {
1276 mptsas_log(mpt, CE_NOTE,
1277 "scsi_reset_delay of 0 is not recommended,"
1278 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1279 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1280 }
1281
1282 /*
1283 * Initialize the wait and done FIFO queue
1284 */
1285 mpt->m_donetail = &mpt->m_doneq;
1286 mpt->m_waitqtail = &mpt->m_waitq;
1287 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1288 mpt->m_tx_draining = 0;
1289
1290 /*
1291 * ioc cmd queue initialize
1292 */
1293 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1294 mpt->m_dev_handle = 0xFFFF;
1295
1296 MPTSAS_ENABLE_INTR(mpt);
1297
1298 /*
1299 * enable event notification
1300 */
1301 mutex_enter(&mpt->m_mutex);
1302 if (mptsas_ioc_enable_event_notification(mpt)) {
1303 mutex_exit(&mpt->m_mutex);
1304 goto fail;
1305 }
1306 mutex_exit(&mpt->m_mutex);
1307
1308 /*
1309 * Initialize PHY info for smhba
1310 */
1311 if (mptsas_smhba_setup(mpt)) {
1312 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1313 "failed");
1314 goto fail;
1315 }
1316
1317 /* Check all dma handles allocated in attach */
1318 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1319 != DDI_SUCCESS) ||
1320 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1321 != DDI_SUCCESS) ||
1322 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1323 != DDI_SUCCESS) ||
1324 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1325 != DDI_SUCCESS) ||
1326 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1327 != DDI_SUCCESS)) {
1328 goto fail;
1329 }
1330
1331 /* Check all acc handles allocated in attach */
1332 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1333 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1334 != DDI_SUCCESS) ||
1335 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1336 != DDI_SUCCESS) ||
1337 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1338 != DDI_SUCCESS) ||
1339 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1340 != DDI_SUCCESS) ||
1341 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1342 != DDI_SUCCESS) ||
1343 (mptsas_check_acc_handle(mpt->m_config_handle)
1344 != DDI_SUCCESS)) {
1345 goto fail;
1346 }
1347
1348 /*
1349 * After this point, we are not going to fail the attach.
1350 */
1351 /*
1352 * used for mptsas_watch
1353 */
1354 mptsas_list_add(mpt);
1355
1356 mutex_enter(&mptsas_global_mutex);
1357 if (mptsas_timeouts_enabled == 0) {
1358 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1359 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1360
1361 mptsas_tick = mptsas_scsi_watchdog_tick *
1362 drv_usectohz((clock_t)1000000);
1363
1364 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1365 mptsas_timeouts_enabled = 1;
1366 }
1367 mutex_exit(&mptsas_global_mutex);
1368
1369 /* Print message of HBA present */
1370 ddi_report_dev(dip);
1371
1372 /* report idle status to pm framework */
1373 if (mpt->m_options & MPTSAS_OPT_PM) {
1374 (void) pm_idle_component(dip, 0);
1375 }
1376
1377 return (DDI_SUCCESS);
1378
1379 fail:
1380 mptsas_log(mpt, CE_WARN, "attach failed");
1381 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1382 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1383 if (mpt) {
1384 mutex_enter(&mptsas_global_mutex);
1385
1386 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1387 timeout_id_t tid = mptsas_timeout_id;
1388 mptsas_timeouts_enabled = 0;
1389 mptsas_timeout_id = 0;
1390 mutex_exit(&mptsas_global_mutex);
1391 (void) untimeout(tid);
1392 mutex_enter(&mptsas_global_mutex);
1393 }
1394 mutex_exit(&mptsas_global_mutex);
1395 /* deallocate in reverse order */
1396 mptsas_cache_destroy(mpt);
1397
1398 if (smp_attach_setup) {
1399 mptsas_smp_teardown(mpt);
1400 }
1401 if (hba_attach_setup) {
1402 mptsas_hba_teardown(mpt);
1403 }
1404
1405 if (mpt->m_active) {
1406 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1407 sizeof (mptsas_smp_t));
1408 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1409 sizeof (mptsas_target_t));
1410 mptsas_free_active_slots(mpt);
1411 }
1412 if (intr_added) {
1413 mptsas_unregister_intrs(mpt);
1414 }
1415
1416 if (doneq_thread_create) {
1417 mutex_enter(&mpt->m_doneq_mutex);
1418 doneq_thread_num = mpt->m_doneq_thread_n;
1419 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1420 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1421 mpt->m_doneq_thread_id[j].flag &=
1422 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1423 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1424 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1425 }
1426 while (mpt->m_doneq_thread_n) {
1427 cv_wait(&mpt->m_doneq_thread_cv,
1428 &mpt->m_doneq_mutex);
1429 }
1430 for (j = 0; j < doneq_thread_num; j++) {
1431 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1432 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1433 }
1434 kmem_free(mpt->m_doneq_thread_id,
1435 sizeof (mptsas_doneq_thread_list_t)
1436 * doneq_thread_num);
1437 mutex_exit(&mpt->m_doneq_mutex);
1438 cv_destroy(&mpt->m_doneq_thread_cv);
1439 mutex_destroy(&mpt->m_doneq_mutex);
1440 }
1441 if (event_taskq_create) {
1442 ddi_taskq_destroy(mpt->m_event_taskq);
1443 }
1444 if (dr_taskq_create) {
1445 ddi_taskq_destroy(mpt->m_dr_taskq);
1446 }
1447 if (mutex_init_done) {
1448 mutex_destroy(&mpt->m_tx_waitq_mutex);
1449 mutex_destroy(&mpt->m_passthru_mutex);
1450 mutex_destroy(&mpt->m_mutex);
1451 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1452 mutex_destroy(
1453 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1454 }
1455 cv_destroy(&mpt->m_cv);
1456 cv_destroy(&mpt->m_passthru_cv);
1457 cv_destroy(&mpt->m_fw_cv);
1458 cv_destroy(&mpt->m_config_cv);
1459 cv_destroy(&mpt->m_fw_diag_cv);
1460 }
1461
1462 if (map_setup) {
1463 mptsas_cfg_fini(mpt);
1464 }
1465 if (config_setup) {
1466 mptsas_config_space_fini(mpt);
1467 }
1468 mptsas_free_handshake_msg(mpt);
1469 mptsas_hba_fini(mpt);
1470
1471 mptsas_fm_fini(mpt);
1472 ddi_soft_state_free(mptsas_state, instance);
1473 ddi_prop_remove_all(dip);
1474 }
1475 return (DDI_FAILURE);
1476 }
1477
1478 static int
1479 mptsas_suspend(dev_info_t *devi)
1480 {
1481 mptsas_t *mpt, *g;
1482 scsi_hba_tran_t *tran;
1483
1484 if (scsi_hba_iport_unit_address(devi)) {
1485 return (DDI_SUCCESS);
1486 }
1487
1488 if ((tran = ddi_get_driver_private(devi)) == NULL)
1489 return (DDI_SUCCESS);
1490
1491 mpt = TRAN2MPT(tran);
1492 if (!mpt) {
1493 return (DDI_SUCCESS);
1494 }
1495
1496 mutex_enter(&mpt->m_mutex);
1497
1498 if (mpt->m_suspended++) {
1499 mutex_exit(&mpt->m_mutex);
1500 return (DDI_SUCCESS);
1501 }
1502
1503 /*
1504 * Cancel timeout threads for this mpt
1505 */
1506 if (mpt->m_quiesce_timeid) {
1507 timeout_id_t tid = mpt->m_quiesce_timeid;
1508 mpt->m_quiesce_timeid = 0;
1509 mutex_exit(&mpt->m_mutex);
1510 (void) untimeout(tid);
1511 mutex_enter(&mpt->m_mutex);
1512 }
1513
1514 if (mpt->m_restart_cmd_timeid) {
1515 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1516 mpt->m_restart_cmd_timeid = 0;
1517 mutex_exit(&mpt->m_mutex);
1518 (void) untimeout(tid);
1519 mutex_enter(&mpt->m_mutex);
1520 }
1521
1522 mutex_exit(&mpt->m_mutex);
1523
1524 (void) pm_idle_component(mpt->m_dip, 0);
1525
1526 /*
1527 * Cancel watch threads if all mpts suspended
1528 */
1529 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1530 for (g = mptsas_head; g != NULL; g = g->m_next) {
1531 if (!g->m_suspended)
1532 break;
1533 }
1534 rw_exit(&mptsas_global_rwlock);
1535
1536 mutex_enter(&mptsas_global_mutex);
1537 if (g == NULL) {
1538 timeout_id_t tid;
1539
1540 mptsas_timeouts_enabled = 0;
1541 if (mptsas_timeout_id) {
1542 tid = mptsas_timeout_id;
1543 mptsas_timeout_id = 0;
1544 mutex_exit(&mptsas_global_mutex);
1545 (void) untimeout(tid);
1546 mutex_enter(&mptsas_global_mutex);
1547 }
1548 if (mptsas_reset_watch) {
1549 tid = mptsas_reset_watch;
1550 mptsas_reset_watch = 0;
1551 mutex_exit(&mptsas_global_mutex);
1552 (void) untimeout(tid);
1553 mutex_enter(&mptsas_global_mutex);
1554 }
1555 }
1556 mutex_exit(&mptsas_global_mutex);
1557
1558 mutex_enter(&mpt->m_mutex);
1559
1560 /*
1561 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1562 */
1563 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1564 (mpt->m_power_level != PM_LEVEL_D0)) {
1565 mutex_exit(&mpt->m_mutex);
1566 return (DDI_SUCCESS);
1567 }
1568
1569 /* Disable HBA interrupts in hardware */
1570 MPTSAS_DISABLE_INTR(mpt);
1571 /*
1572 * Send RAID action system shutdown to sync IR
1573 */
1574 mptsas_raid_action_system_shutdown(mpt);
1575
1576 mutex_exit(&mpt->m_mutex);
1577
1578 /* drain the taskq */
1579 ddi_taskq_wait(mpt->m_event_taskq);
1580 ddi_taskq_wait(mpt->m_dr_taskq);
1581
1582 return (DDI_SUCCESS);
1583 }
1584
1585 #ifdef __sparc
1586 /*ARGSUSED*/
1587 static int
1588 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1589 {
1590 mptsas_t *mpt;
1591 scsi_hba_tran_t *tran;
1592
1593 /*
1594 * If this call is for iport, just return.
1595 */
1596 if (scsi_hba_iport_unit_address(devi))
1597 return (DDI_SUCCESS);
1598
1599 if ((tran = ddi_get_driver_private(devi)) == NULL)
1600 return (DDI_SUCCESS);
1601
1602 if ((mpt = TRAN2MPT(tran)) == NULL)
1603 return (DDI_SUCCESS);
1604
1605 /*
1606 * Send RAID action system shutdown to sync IR. Disable HBA
1607 * interrupts in hardware first.
1608 */
1609 MPTSAS_DISABLE_INTR(mpt);
1610 mptsas_raid_action_system_shutdown(mpt);
1611
1612 return (DDI_SUCCESS);
1613 }
1614 #else /* __sparc */
1615 /*
1616 * quiesce(9E) entry point.
1617 *
1618 * This function is called when the system is single-threaded at high
1619 * PIL with preemption disabled. Therefore, this function must not be
1620 * blocked.
1621 *
1622 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1623 * DDI_FAILURE indicates an error condition and should almost never happen.
1624 */
1625 static int
1626 mptsas_quiesce(dev_info_t *devi)
1627 {
1628 mptsas_t *mpt;
1629 scsi_hba_tran_t *tran;
1630
1631 /*
1632 * If this call is for iport, just return.
1633 */
1634 if (scsi_hba_iport_unit_address(devi))
1635 return (DDI_SUCCESS);
1636
1637 if ((tran = ddi_get_driver_private(devi)) == NULL)
1638 return (DDI_SUCCESS);
1639
1640 if ((mpt = TRAN2MPT(tran)) == NULL)
1641 return (DDI_SUCCESS);
1642
1643 /* Disable HBA interrupts in hardware */
1644 MPTSAS_DISABLE_INTR(mpt);
1645 /* Send RAID action system shutdonw to sync IR */
1646 mptsas_raid_action_system_shutdown(mpt);
1647
1648 return (DDI_SUCCESS);
1649 }
1650 #endif /* __sparc */
1651
1652 /*
1653 * detach(9E). Remove all device allocations and system resources;
1654 * disable device interrupts.
1655 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1656 */
1657 static int
1658 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1659 {
1660 /* CONSTCOND */
1661 ASSERT(NO_COMPETING_THREADS);
1662 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1663
1664 switch (cmd) {
1665 case DDI_DETACH:
1666 return (mptsas_do_detach(devi));
1667
1668 case DDI_SUSPEND:
1669 return (mptsas_suspend(devi));
1670
1671 default:
1672 return (DDI_FAILURE);
1673 }
1674 /* NOTREACHED */
1675 }
1676
1677 static int
1678 mptsas_do_detach(dev_info_t *dip)
1679 {
1680 mptsas_t *mpt;
1681 scsi_hba_tran_t *tran;
1682 int circ = 0;
1683 int circ1 = 0;
1684 mdi_pathinfo_t *pip = NULL;
1685 int i;
1686 int doneq_thread_num = 0;
1687
1688 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1689
1690 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1691 return (DDI_FAILURE);
1692
1693 mpt = TRAN2MPT(tran);
1694 if (!mpt) {
1695 return (DDI_FAILURE);
1696 }
1697 /*
1698 * Still have pathinfo child, should not detach mpt driver
1699 */
1700 if (scsi_hba_iport_unit_address(dip)) {
1701 if (mpt->m_mpxio_enable) {
1702 /*
1703 * MPxIO enabled for the iport
1704 */
1705 ndi_devi_enter(scsi_vhci_dip, &circ1);
1706 ndi_devi_enter(dip, &circ);
1707 while (pip = mdi_get_next_client_path(dip, NULL)) {
1708 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1709 continue;
1710 }
1711 ndi_devi_exit(dip, circ);
1712 ndi_devi_exit(scsi_vhci_dip, circ1);
1713 NDBG12(("detach failed because of "
1714 "outstanding path info"));
1715 return (DDI_FAILURE);
1716 }
1717 ndi_devi_exit(dip, circ);
1718 ndi_devi_exit(scsi_vhci_dip, circ1);
1719 (void) mdi_phci_unregister(dip, 0);
1720 }
1721
1722 ddi_prop_remove_all(dip);
1723
1724 return (DDI_SUCCESS);
1725 }
1726
1727 /* Make sure power level is D0 before accessing registers */
1728 if (mpt->m_options & MPTSAS_OPT_PM) {
1729 (void) pm_busy_component(dip, 0);
1730 if (mpt->m_power_level != PM_LEVEL_D0) {
1731 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1732 DDI_SUCCESS) {
1733 mptsas_log(mpt, CE_WARN,
1734 "mptsas%d: Raise power request failed.",
1735 mpt->m_instance);
1736 (void) pm_idle_component(dip, 0);
1737 return (DDI_FAILURE);
1738 }
1739 }
1740 }
1741
1742 /*
1743 * Send RAID action system shutdown to sync IR. After action, send a
1744 * Message Unit Reset. Since after that DMA resource will be freed,
1745 * set ioc to READY state will avoid HBA initiated DMA operation.
1746 */
1747 mutex_enter(&mpt->m_mutex);
1748 MPTSAS_DISABLE_INTR(mpt);
1749 mptsas_raid_action_system_shutdown(mpt);
1750 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1751 (void) mptsas_ioc_reset(mpt, FALSE);
1752 mutex_exit(&mpt->m_mutex);
1753 mptsas_rem_intrs(mpt);
1754 ddi_taskq_destroy(mpt->m_event_taskq);
1755 ddi_taskq_destroy(mpt->m_dr_taskq);
1756
1757 if (mpt->m_doneq_thread_n) {
1758 mutex_enter(&mpt->m_doneq_mutex);
1759 doneq_thread_num = mpt->m_doneq_thread_n;
1760 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1761 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1762 mpt->m_doneq_thread_id[i].flag &=
1763 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1764 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1765 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1766 }
1767 while (mpt->m_doneq_thread_n) {
1768 cv_wait(&mpt->m_doneq_thread_cv,
1769 &mpt->m_doneq_mutex);
1770 }
1771 for (i = 0; i < doneq_thread_num; i++) {
1772 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1773 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1774 }
1775 kmem_free(mpt->m_doneq_thread_id,
1776 sizeof (mptsas_doneq_thread_list_t)
1777 * doneq_thread_num);
1778 mutex_exit(&mpt->m_doneq_mutex);
1779 cv_destroy(&mpt->m_doneq_thread_cv);
1780 mutex_destroy(&mpt->m_doneq_mutex);
1781 }
1782
1783 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1784
1785 mptsas_list_del(mpt);
1786
1787 /*
1788 * Cancel timeout threads for this mpt
1789 */
1790 mutex_enter(&mpt->m_mutex);
1791 if (mpt->m_quiesce_timeid) {
1792 timeout_id_t tid = mpt->m_quiesce_timeid;
1793 mpt->m_quiesce_timeid = 0;
1794 mutex_exit(&mpt->m_mutex);
1795 (void) untimeout(tid);
1796 mutex_enter(&mpt->m_mutex);
1797 }
1798
1799 if (mpt->m_restart_cmd_timeid) {
1800 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1801 mpt->m_restart_cmd_timeid = 0;
1802 mutex_exit(&mpt->m_mutex);
1803 (void) untimeout(tid);
1804 mutex_enter(&mpt->m_mutex);
1805 }
1806
1807 mutex_exit(&mpt->m_mutex);
1808
1809 /*
1810 * last mpt? ... if active, CANCEL watch threads.
1811 */
1812 mutex_enter(&mptsas_global_mutex);
1813 if (mptsas_head == NULL) {
1814 timeout_id_t tid;
1815 /*
1816 * Clear mptsas_timeouts_enable so that the watch thread
1817 * gets restarted on DDI_ATTACH
1818 */
1819 mptsas_timeouts_enabled = 0;
1820 if (mptsas_timeout_id) {
1821 tid = mptsas_timeout_id;
1822 mptsas_timeout_id = 0;
1823 mutex_exit(&mptsas_global_mutex);
1824 (void) untimeout(tid);
1825 mutex_enter(&mptsas_global_mutex);
1826 }
1827 if (mptsas_reset_watch) {
1828 tid = mptsas_reset_watch;
1829 mptsas_reset_watch = 0;
1830 mutex_exit(&mptsas_global_mutex);
1831 (void) untimeout(tid);
1832 mutex_enter(&mptsas_global_mutex);
1833 }
1834 }
1835 mutex_exit(&mptsas_global_mutex);
1836
1837 /*
1838 * Delete Phy stats
1839 */
1840 mptsas_destroy_phy_stats(mpt);
1841
1842 /*
1843 * Delete nt_active.
1844 */
1845 mutex_enter(&mpt->m_mutex);
1846 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1847 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1848 mptsas_free_active_slots(mpt);
1849 mutex_exit(&mpt->m_mutex);
1850
1851 /* deallocate everything that was allocated in mptsas_attach */
1852 mptsas_cache_destroy(mpt);
1853
1854 mptsas_hba_fini(mpt);
1855 mptsas_cfg_fini(mpt);
1856
1857 /* Lower the power informing PM Framework */
1858 if (mpt->m_options & MPTSAS_OPT_PM) {
1859 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1860 mptsas_log(mpt, CE_WARN,
1861 "!mptsas%d: Lower power request failed "
1862 "during detach, ignoring.",
1863 mpt->m_instance);
1864 }
1865
1866 mutex_destroy(&mpt->m_tx_waitq_mutex);
1867 mutex_destroy(&mpt->m_passthru_mutex);
1868 mutex_destroy(&mpt->m_mutex);
1869 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1870 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1871 }
1872 cv_destroy(&mpt->m_cv);
1873 cv_destroy(&mpt->m_passthru_cv);
1874 cv_destroy(&mpt->m_fw_cv);
1875 cv_destroy(&mpt->m_config_cv);
1876 cv_destroy(&mpt->m_fw_diag_cv);
1877
1878
1879 mptsas_smp_teardown(mpt);
1880 mptsas_hba_teardown(mpt);
1881
1882 mptsas_config_space_fini(mpt);
1883
1884 mptsas_free_handshake_msg(mpt);
1885
1886 mptsas_fm_fini(mpt);
1887 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1888 ddi_prop_remove_all(dip);
1889
1890 return (DDI_SUCCESS);
1891 }
1892
1893 static void
1894 mptsas_list_add(mptsas_t *mpt)
1895 {
1896 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1897
1898 if (mptsas_head == NULL) {
1899 mptsas_head = mpt;
1900 } else {
1901 mptsas_tail->m_next = mpt;
1902 }
1903 mptsas_tail = mpt;
1904 rw_exit(&mptsas_global_rwlock);
1905 }
1906
1907 static void
1908 mptsas_list_del(mptsas_t *mpt)
1909 {
1910 mptsas_t *m;
1911 /*
1912 * Remove device instance from the global linked list
1913 */
1914 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1915 if (mptsas_head == mpt) {
1916 m = mptsas_head = mpt->m_next;
1917 } else {
1918 for (m = mptsas_head; m != NULL; m = m->m_next) {
1919 if (m->m_next == mpt) {
1920 m->m_next = mpt->m_next;
1921 break;
1922 }
1923 }
1924 if (m == NULL) {
1925 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1926 }
1927 }
1928
1929 if (mptsas_tail == mpt) {
1930 mptsas_tail = m;
1931 }
1932 rw_exit(&mptsas_global_rwlock);
1933 }
1934
1935 static int
1936 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1937 {
1938 ddi_dma_attr_t task_dma_attrs;
1939
1940 task_dma_attrs = mpt->m_msg_dma_attr;
1941 task_dma_attrs.dma_attr_sgllen = 1;
1942 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1943
1944 /* allocate Task Management ddi_dma resources */
1945 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1946 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1947 alloc_size, NULL) == FALSE) {
1948 return (DDI_FAILURE);
1949 }
1950 mpt->m_hshk_dma_size = alloc_size;
1951
1952 return (DDI_SUCCESS);
1953 }
1954
1955 static void
1956 mptsas_free_handshake_msg(mptsas_t *mpt)
1957 {
1958 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1959 mpt->m_hshk_dma_size = 0;
1960 }
1961
1962 static int
1963 mptsas_hba_setup(mptsas_t *mpt)
1964 {
1965 scsi_hba_tran_t *hba_tran;
1966 int tran_flags;
1967
1968 /* Allocate a transport structure */
1969 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1970 SCSI_HBA_CANSLEEP);
1971 ASSERT(mpt->m_tran != NULL);
1972
1973 hba_tran->tran_hba_private = mpt;
1974 hba_tran->tran_tgt_private = NULL;
1975
1976 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1977 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1978
1979 hba_tran->tran_start = mptsas_scsi_start;
1980 hba_tran->tran_reset = mptsas_scsi_reset;
1981 hba_tran->tran_abort = mptsas_scsi_abort;
1982 hba_tran->tran_getcap = mptsas_scsi_getcap;
1983 hba_tran->tran_setcap = mptsas_scsi_setcap;
1984 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
1985 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
1986
1987 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
1988 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
1989 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
1990
1991 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
1992 hba_tran->tran_get_name = mptsas_get_name;
1993
1994 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
1995 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
1996 hba_tran->tran_bus_reset = NULL;
1997
1998 hba_tran->tran_add_eventcall = NULL;
1999 hba_tran->tran_get_eventcookie = NULL;
2000 hba_tran->tran_post_event = NULL;
2001 hba_tran->tran_remove_eventcall = NULL;
2002
2003 hba_tran->tran_bus_config = mptsas_bus_config;
2004
2005 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2006
2007 /*
2008 * All children of the HBA are iports. We need tran was cloned.
2009 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2010 * inherited to iport's tran vector.
2011 */
2012 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2013
2014 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2015 hba_tran, tran_flags) != DDI_SUCCESS) {
2016 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2017 scsi_hba_tran_free(hba_tran);
2018 mpt->m_tran = NULL;
2019 return (FALSE);
2020 }
2021 return (TRUE);
2022 }
2023
2024 static void
2025 mptsas_hba_teardown(mptsas_t *mpt)
2026 {
2027 (void) scsi_hba_detach(mpt->m_dip);
2028 if (mpt->m_tran != NULL) {
2029 scsi_hba_tran_free(mpt->m_tran);
2030 mpt->m_tran = NULL;
2031 }
2032 }
2033
2034 static void
2035 mptsas_iport_register(mptsas_t *mpt)
2036 {
2037 int i, j;
2038 mptsas_phymask_t mask = 0x0;
2039 /*
2040 * initial value of mask is 0
2041 */
2042 mutex_enter(&mpt->m_mutex);
2043 for (i = 0; i < mpt->m_num_phys; i++) {
2044 mptsas_phymask_t phy_mask = 0x0;
2045 char phy_mask_name[MPTSAS_MAX_PHYS];
2046 uint8_t current_port;
2047
2048 if (mpt->m_phy_info[i].attached_devhdl == 0)
2049 continue;
2050
2051 bzero(phy_mask_name, sizeof (phy_mask_name));
2052
2053 current_port = mpt->m_phy_info[i].port_num;
2054
2055 if ((mask & (1 << i)) != 0)
2056 continue;
2057
2058 for (j = 0; j < mpt->m_num_phys; j++) {
2059 if (mpt->m_phy_info[j].attached_devhdl &&
2060 (mpt->m_phy_info[j].port_num == current_port)) {
2061 phy_mask |= (1 << j);
2062 }
2063 }
2064 mask = mask | phy_mask;
2065
2066 for (j = 0; j < mpt->m_num_phys; j++) {
2067 if ((phy_mask >> j) & 0x01) {
2068 mpt->m_phy_info[j].phy_mask = phy_mask;
2069 }
2070 }
2071
2072 (void) sprintf(phy_mask_name, "%x", phy_mask);
2073
2074 mutex_exit(&mpt->m_mutex);
2075 /*
2076 * register a iport
2077 */
2078 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2079 mutex_enter(&mpt->m_mutex);
2080 }
2081 mutex_exit(&mpt->m_mutex);
2082 /*
2083 * register a virtual port for RAID volume always
2084 */
2085 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2086
2087 }
2088
2089 static int
2090 mptsas_smp_setup(mptsas_t *mpt)
2091 {
2092 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2093 ASSERT(mpt->m_smptran != NULL);
2094 mpt->m_smptran->smp_tran_hba_private = mpt;
2095 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2096 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2097 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2098 smp_hba_tran_free(mpt->m_smptran);
2099 mpt->m_smptran = NULL;
2100 return (FALSE);
2101 }
2102 /*
2103 * Initialize smp hash table
2104 */
2105 mptsas_hash_init(&mpt->m_active->m_smptbl);
2106 mpt->m_smp_devhdl = 0xFFFF;
2107
2108 return (TRUE);
2109 }
2110
2111 static void
2112 mptsas_smp_teardown(mptsas_t *mpt)
2113 {
2114 (void) smp_hba_detach(mpt->m_dip);
2115 if (mpt->m_smptran != NULL) {
2116 smp_hba_tran_free(mpt->m_smptran);
2117 mpt->m_smptran = NULL;
2118 }
2119 mpt->m_smp_devhdl = 0;
2120 }
2121
2122 static int
2123 mptsas_cache_create(mptsas_t *mpt)
2124 {
2125 int instance = mpt->m_instance;
2126 char buf[64];
2127
2128 /*
2129 * create kmem cache for packets
2130 */
2131 (void) sprintf(buf, "mptsas%d_cache", instance);
2132 mpt->m_kmem_cache = kmem_cache_create(buf,
2133 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2134 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2135 NULL, (void *)mpt, NULL, 0);
2136
2137 if (mpt->m_kmem_cache == NULL) {
2138 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2139 return (FALSE);
2140 }
2141
2142 /*
2143 * create kmem cache for extra SGL frames if SGL cannot
2144 * be accomodated into main request frame.
2145 */
2146 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2147 mpt->m_cache_frames = kmem_cache_create(buf,
2148 sizeof (mptsas_cache_frames_t), 8,
2149 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2150 NULL, (void *)mpt, NULL, 0);
2151
2152 if (mpt->m_cache_frames == NULL) {
2153 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2154 return (FALSE);
2155 }
2156
2157 return (TRUE);
2158 }
2159
2160 static void
2161 mptsas_cache_destroy(mptsas_t *mpt)
2162 {
2163 /* deallocate in reverse order */
2164 if (mpt->m_cache_frames) {
2165 kmem_cache_destroy(mpt->m_cache_frames);
2166 mpt->m_cache_frames = NULL;
2167 }
2168 if (mpt->m_kmem_cache) {
2169 kmem_cache_destroy(mpt->m_kmem_cache);
2170 mpt->m_kmem_cache = NULL;
2171 }
2172 }
2173
2174 static int
2175 mptsas_power(dev_info_t *dip, int component, int level)
2176 {
2177 #ifndef __lock_lint
2178 _NOTE(ARGUNUSED(component))
2179 #endif
2180 mptsas_t *mpt;
2181 int rval = DDI_SUCCESS;
2182 int polls = 0;
2183 uint32_t ioc_status;
2184
2185 if (scsi_hba_iport_unit_address(dip) != 0)
2186 return (DDI_SUCCESS);
2187
2188 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2189 if (mpt == NULL) {
2190 return (DDI_FAILURE);
2191 }
2192
2193 mutex_enter(&mpt->m_mutex);
2194
2195 /*
2196 * If the device is busy, don't lower its power level
2197 */
2198 if (mpt->m_busy && (mpt->m_power_level > level)) {
2199 mutex_exit(&mpt->m_mutex);
2200 return (DDI_FAILURE);
2201 }
2202 switch (level) {
2203 case PM_LEVEL_D0:
2204 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2205 MPTSAS_POWER_ON(mpt);
2206 /*
2207 * Wait up to 30 seconds for IOC to come out of reset.
2208 */
2209 while (((ioc_status = ddi_get32(mpt->m_datap,
2210 &mpt->m_reg->Doorbell)) &
2211 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2212 if (polls++ > 3000) {
2213 break;
2214 }
2215 delay(drv_usectohz(10000));
2216 }
2217 /*
2218 * If IOC is not in operational state, try to hard reset it.
2219 */
2220 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2221 MPI2_IOC_STATE_OPERATIONAL) {
2222 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2223 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2224 mptsas_log(mpt, CE_WARN,
2225 "mptsas_power: hard reset failed");
2226 mutex_exit(&mpt->m_mutex);
2227 return (DDI_FAILURE);
2228 }
2229 }
2230 mpt->m_power_level = PM_LEVEL_D0;
2231 break;
2232 case PM_LEVEL_D3:
2233 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2234 MPTSAS_POWER_OFF(mpt);
2235 break;
2236 default:
2237 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2238 mpt->m_instance, level);
2239 rval = DDI_FAILURE;
2240 break;
2241 }
2242 mutex_exit(&mpt->m_mutex);
2243 return (rval);
2244 }
2245
2246 /*
2247 * Initialize configuration space and figure out which
2248 * chip and revison of the chip the mpt driver is using.
2249 */
2250 static int
2251 mptsas_config_space_init(mptsas_t *mpt)
2252 {
2253 NDBG0(("mptsas_config_space_init"));
2254
2255 if (mpt->m_config_handle != NULL)
2256 return (TRUE);
2257
2258 if (pci_config_setup(mpt->m_dip,
2259 &mpt->m_config_handle) != DDI_SUCCESS) {
2260 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2261 return (FALSE);
2262 }
2263
2264 /*
2265 * This is a workaround for a XMITS ASIC bug which does not
2266 * drive the CBE upper bits.
2267 */
2268 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2269 PCI_STAT_PERROR) {
2270 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2271 PCI_STAT_PERROR);
2272 }
2273
2274 mptsas_setup_cmd_reg(mpt);
2275
2276 /*
2277 * Get the chip device id:
2278 */
2279 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2280
2281 /*
2282 * Save the revision.
2283 */
2284 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2285
2286 /*
2287 * Save the SubSystem Vendor and Device IDs
2288 */
2289 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2290 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2291
2292 /*
2293 * Set the latency timer to 0x40 as specified by the upa -> pci
2294 * bridge chip design team. This may be done by the sparc pci
2295 * bus nexus driver, but the driver should make sure the latency
2296 * timer is correct for performance reasons.
2297 */
2298 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2299 MPTSAS_LATENCY_TIMER);
2300
2301 (void) mptsas_get_pci_cap(mpt);
2302 return (TRUE);
2303 }
2304
2305 static void
2306 mptsas_config_space_fini(mptsas_t *mpt)
2307 {
2308 if (mpt->m_config_handle != NULL) {
2309 mptsas_disable_bus_master(mpt);
2310 pci_config_teardown(&mpt->m_config_handle);
2311 mpt->m_config_handle = NULL;
2312 }
2313 }
2314
2315 static void
2316 mptsas_setup_cmd_reg(mptsas_t *mpt)
2317 {
2318 ushort_t cmdreg;
2319
2320 /*
2321 * Set the command register to the needed values.
2322 */
2323 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2324 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2325 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2326 cmdreg &= ~PCI_COMM_IO;
2327 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2328 }
2329
2330 static void
2331 mptsas_disable_bus_master(mptsas_t *mpt)
2332 {
2333 ushort_t cmdreg;
2334
2335 /*
2336 * Clear the master enable bit in the PCI command register.
2337 * This prevents any bus mastering activity like DMA.
2338 */
2339 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2340 cmdreg &= ~PCI_COMM_ME;
2341 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2342 }
2343
2344 int
2345 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2346 {
2347 ddi_dma_attr_t attrs;
2348
2349 attrs = mpt->m_io_dma_attr;
2350 attrs.dma_attr_sgllen = 1;
2351
2352 ASSERT(dma_statep != NULL);
2353
2354 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2355 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2356 &dma_statep->cookie) == FALSE) {
2357 return (DDI_FAILURE);
2358 }
2359
2360 return (DDI_SUCCESS);
2361 }
2362
2363 void
2364 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2365 {
2366 ASSERT(dma_statep != NULL);
2367 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2368 dma_statep->size = 0;
2369 }
2370
2371 int
2372 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2373 {
2374 ddi_dma_attr_t attrs;
2375 ddi_dma_handle_t dma_handle;
2376 caddr_t memp;
2377 ddi_acc_handle_t accessp;
2378 int rval;
2379
2380 ASSERT(mutex_owned(&mpt->m_mutex));
2381
2382 attrs = mpt->m_msg_dma_attr;
2383 attrs.dma_attr_sgllen = 1;
2384 attrs.dma_attr_granular = size;
2385
2386 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2387 &accessp, &memp, size, NULL) == FALSE) {
2388 return (DDI_FAILURE);
2389 }
2390
2391 rval = (*callback) (mpt, memp, var, accessp);
2392
2393 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2394 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2395 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2396 rval = DDI_FAILURE;
2397 }
2398
2399 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2400 return (rval);
2401
2402 }
2403
2404 static int
2405 mptsas_alloc_request_frames(mptsas_t *mpt)
2406 {
2407 ddi_dma_attr_t frame_dma_attrs;
2408 caddr_t memp;
2409 ddi_dma_cookie_t cookie;
2410 size_t mem_size;
2411
2412 /*
2413 * re-alloc when it has already alloced
2414 */
2415 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2416 &mpt->m_acc_req_frame_hdl);
2417
2418 /*
2419 * The size of the request frame pool is:
2420 * Number of Request Frames * Request Frame Size
2421 */
2422 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2423
2424 /*
2425 * set the DMA attributes. System Request Message Frames must be
2426 * aligned on a 16-byte boundry.
2427 */
2428 frame_dma_attrs = mpt->m_msg_dma_attr;
2429 frame_dma_attrs.dma_attr_align = 16;
2430 frame_dma_attrs.dma_attr_sgllen = 1;
2431
2432 /*
2433 * allocate the request frame pool.
2434 */
2435 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2436 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2437 mem_size, &cookie) == FALSE) {
2438 return (DDI_FAILURE);
2439 }
2440
2441 /*
2442 * Store the request frame memory address. This chip uses this
2443 * address to dma to and from the driver's frame. The second
2444 * address is the address mpt uses to fill in the frame.
2445 */
2446 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2447 mpt->m_req_frame = memp;
2448
2449 /*
2450 * Clear the request frame pool.
2451 */
2452 bzero(mpt->m_req_frame, mem_size);
2453
2454 return (DDI_SUCCESS);
2455 }
2456
2457 static int
2458 mptsas_alloc_reply_frames(mptsas_t *mpt)
2459 {
2460 ddi_dma_attr_t frame_dma_attrs;
2461 caddr_t memp;
2462 ddi_dma_cookie_t cookie;
2463 size_t mem_size;
2464
2465 /*
2466 * re-alloc when it has already alloced
2467 */
2468 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2469 &mpt->m_acc_reply_frame_hdl);
2470
2471 /*
2472 * The size of the reply frame pool is:
2473 * Number of Reply Frames * Reply Frame Size
2474 */
2475 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2476
2477 /*
2478 * set the DMA attributes. System Reply Message Frames must be
2479 * aligned on a 4-byte boundry. This is the default.
2480 */
2481 frame_dma_attrs = mpt->m_msg_dma_attr;
2482 frame_dma_attrs.dma_attr_sgllen = 1;
2483
2484 /*
2485 * allocate the reply frame pool
2486 */
2487 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2488 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2489 mem_size, &cookie) == FALSE) {
2490 return (DDI_FAILURE);
2491 }
2492
2493 /*
2494 * Store the reply frame memory address. This chip uses this
2495 * address to dma to and from the driver's frame. The second
2496 * address is the address mpt uses to process the frame.
2497 */
2498 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2499 mpt->m_reply_frame = memp;
2500
2501 /*
2502 * Clear the reply frame pool.
2503 */
2504 bzero(mpt->m_reply_frame, mem_size);
2505
2506 return (DDI_SUCCESS);
2507 }
2508
2509 static int
2510 mptsas_alloc_free_queue(mptsas_t *mpt)
2511 {
2512 ddi_dma_attr_t frame_dma_attrs;
2513 caddr_t memp;
2514 ddi_dma_cookie_t cookie;
2515 size_t mem_size;
2516
2517 /*
2518 * re-alloc when it has already alloced
2519 */
2520 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2521 &mpt->m_acc_free_queue_hdl);
2522
2523 /*
2524 * The reply free queue size is:
2525 * Reply Free Queue Depth * 4
2526 * The "4" is the size of one 32 bit address (low part of 64-bit
2527 * address)
2528 */
2529 mem_size = mpt->m_free_queue_depth * 4;
2530
2531 /*
2532 * set the DMA attributes The Reply Free Queue must be aligned on a
2533 * 16-byte boundry.
2534 */
2535 frame_dma_attrs = mpt->m_msg_dma_attr;
2536 frame_dma_attrs.dma_attr_align = 16;
2537 frame_dma_attrs.dma_attr_sgllen = 1;
2538
2539 /*
2540 * allocate the reply free queue
2541 */
2542 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2543 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2544 mem_size, &cookie) == FALSE) {
2545 return (DDI_FAILURE);
2546 }
2547
2548 /*
2549 * Store the reply free queue memory address. This chip uses this
2550 * address to read from the reply free queue. The second address
2551 * is the address mpt uses to manage the queue.
2552 */
2553 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2554 mpt->m_free_queue = memp;
2555
2556 /*
2557 * Clear the reply free queue memory.
2558 */
2559 bzero(mpt->m_free_queue, mem_size);
2560
2561 return (DDI_SUCCESS);
2562 }
2563
2564 static int
2565 mptsas_alloc_post_queue(mptsas_t *mpt)
2566 {
2567 ddi_dma_attr_t frame_dma_attrs;
2568 caddr_t memp;
2569 ddi_dma_cookie_t cookie;
2570 size_t mem_size;
2571
2572 /*
2573 * re-alloc when it has already alloced
2574 */
2575 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2576 &mpt->m_acc_post_queue_hdl);
2577
2578 /*
2579 * The reply descriptor post queue size is:
2580 * Reply Descriptor Post Queue Depth * 8
2581 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2582 */
2583 mem_size = mpt->m_post_queue_depth * 8;
2584
2585 /*
2586 * set the DMA attributes. The Reply Descriptor Post Queue must be
2587 * aligned on a 16-byte boundry.
2588 */
2589 frame_dma_attrs = mpt->m_msg_dma_attr;
2590 frame_dma_attrs.dma_attr_align = 16;
2591 frame_dma_attrs.dma_attr_sgllen = 1;
2592
2593 /*
2594 * allocate the reply post queue
2595 */
2596 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2597 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2598 mem_size, &cookie) == FALSE) {
2599 return (DDI_FAILURE);
2600 }
2601
2602 /*
2603 * Store the reply descriptor post queue memory address. This chip
2604 * uses this address to write to the reply descriptor post queue. The
2605 * second address is the address mpt uses to manage the queue.
2606 */
2607 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2608 mpt->m_post_queue = memp;
2609
2610 /*
2611 * Clear the reply post queue memory.
2612 */
2613 bzero(mpt->m_post_queue, mem_size);
2614
2615 return (DDI_SUCCESS);
2616 }
2617
2618 static void
2619 mptsas_alloc_reply_args(mptsas_t *mpt)
2620 {
2621 if (mpt->m_replyh_args != NULL) {
2622 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2623 * mpt->m_max_replies);
2624 mpt->m_replyh_args = NULL;
2625 }
2626 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2627 mpt->m_max_replies, KM_SLEEP);
2628 }
2629
2630 static int
2631 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2632 {
2633 mptsas_cache_frames_t *frames = NULL;
2634 if (cmd->cmd_extra_frames == NULL) {
2635 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2636 if (frames == NULL) {
2637 return (DDI_FAILURE);
2638 }
2639 cmd->cmd_extra_frames = frames;
2640 }
2641 return (DDI_SUCCESS);
2642 }
2643
2644 static void
2645 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2646 {
2647 if (cmd->cmd_extra_frames) {
2648 kmem_cache_free(mpt->m_cache_frames,
2649 (void *)cmd->cmd_extra_frames);
2650 cmd->cmd_extra_frames = NULL;
2651 }
2652 }
2653
2654 static void
2655 mptsas_cfg_fini(mptsas_t *mpt)
2656 {
2657 NDBG0(("mptsas_cfg_fini"));
2658 ddi_regs_map_free(&mpt->m_datap);
2659 }
2660
2661 static void
2662 mptsas_hba_fini(mptsas_t *mpt)
2663 {
2664 NDBG0(("mptsas_hba_fini"));
2665
2666 /*
2667 * Free up any allocated memory
2668 */
2669 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2670 &mpt->m_acc_req_frame_hdl);
2671
2672 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2673 &mpt->m_acc_reply_frame_hdl);
2674
2675 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2676 &mpt->m_acc_free_queue_hdl);
2677
2678 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2679 &mpt->m_acc_post_queue_hdl);
2680
2681 if (mpt->m_replyh_args != NULL) {
2682 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2683 * mpt->m_max_replies);
2684 }
2685 }
2686
2687 static int
2688 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2689 {
2690 int lun = 0;
2691 char *sas_wwn = NULL;
2692 int phynum = -1;
2693 int reallen = 0;
2694
2695 /* Get the target num */
2696 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2697 LUN_PROP, 0);
2698
2699 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2700 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2701 /*
2702 * Stick in the address of form "pPHY,LUN"
2703 */
2704 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2705 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2706 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2707 == DDI_PROP_SUCCESS) {
2708 /*
2709 * Stick in the address of the form "wWWN,LUN"
2710 */
2711 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2712 ddi_prop_free(sas_wwn);
2713 } else {
2714 return (DDI_FAILURE);
2715 }
2716
2717 ASSERT(reallen < len);
2718 if (reallen >= len) {
2719 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2720 "length too small, it needs to be %d bytes", reallen + 1);
2721 }
2722 return (DDI_SUCCESS);
2723 }
2724
2725 /*
2726 * tran_tgt_init(9E) - target device instance initialization
2727 */
2728 static int
2729 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2730 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2731 {
2732 #ifndef __lock_lint
2733 _NOTE(ARGUNUSED(hba_tran))
2734 #endif
2735
2736 /*
2737 * At this point, the scsi_device structure already exists
2738 * and has been initialized.
2739 *
2740 * Use this function to allocate target-private data structures,
2741 * if needed by this HBA. Add revised flow-control and queue
2742 * properties for child here, if desired and if you can tell they
2743 * support tagged queueing by now.
2744 */
2745 mptsas_t *mpt;
2746 int lun = sd->sd_address.a_lun;
2747 mdi_pathinfo_t *pip = NULL;
2748 mptsas_tgt_private_t *tgt_private = NULL;
2749 mptsas_target_t *ptgt = NULL;
2750 char *psas_wwn = NULL;
2751 int phymask = 0;
2752 uint64_t sas_wwn = 0;
2753 mpt = SDEV2MPT(sd);
2754
2755 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2756
2757 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2758 (void *)hba_dip, (void *)tgt_dip, lun));
2759
2760 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2761 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2762 ddi_set_name_addr(tgt_dip, NULL);
2763 return (DDI_FAILURE);
2764 }
2765 /*
2766 * phymask is 0 means the virtual port for RAID
2767 */
2768 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2769 "phymask", 0);
2770 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2771 if ((pip = (void *)(sd->sd_private)) == NULL) {
2772 /*
2773 * Very bad news if this occurs. Somehow scsi_vhci has
2774 * lost the pathinfo node for this target.
2775 */
2776 return (DDI_NOT_WELL_FORMED);
2777 }
2778
2779 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2780 DDI_PROP_SUCCESS) {
2781 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2782 return (DDI_FAILURE);
2783 }
2784
2785 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2786 &psas_wwn) == MDI_SUCCESS) {
2787 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2788 sas_wwn = 0;
2789 }
2790 (void) mdi_prop_free(psas_wwn);
2791 }
2792 } else {
2793 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2794 DDI_PROP_DONTPASS, LUN_PROP, 0);
2795 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2796 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2797 DDI_PROP_SUCCESS) {
2798 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2799 sas_wwn = 0;
2800 }
2801 ddi_prop_free(psas_wwn);
2802 } else {
2803 sas_wwn = 0;
2804 }
2805 }
2806 ASSERT((sas_wwn != 0) || (phymask != 0));
2807 mutex_enter(&mpt->m_mutex);
2808 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2809 mutex_exit(&mpt->m_mutex);
2810 if (ptgt == NULL) {
2811 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2812 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2813 sas_wwn);
2814 return (DDI_FAILURE);
2815 }
2816 if (hba_tran->tran_tgt_private == NULL) {
2817 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2818 KM_SLEEP);
2819 tgt_private->t_lun = lun;
2820 tgt_private->t_private = ptgt;
2821 hba_tran->tran_tgt_private = tgt_private;
2822 }
2823
2824 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2825 return (DDI_SUCCESS);
2826 }
2827 mutex_enter(&mpt->m_mutex);
2828
2829 if (ptgt->m_deviceinfo &
2830 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2831 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2832 uchar_t *inq89 = NULL;
2833 int inq89_len = 0x238;
2834 int reallen = 0;
2835 int rval = 0;
2836 struct sata_id *sid = NULL;
2837 char model[SATA_ID_MODEL_LEN + 1];
2838 char fw[SATA_ID_FW_LEN + 1];
2839 char *vid, *pid;
2840 int i;
2841
2842 mutex_exit(&mpt->m_mutex);
2843 /*
2844 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2845 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2846 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2847 */
2848 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2849 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2850 inq89, inq89_len, &reallen, 1);
2851
2852 if (rval != 0) {
2853 if (inq89 != NULL) {
2854 kmem_free(inq89, inq89_len);
2855 }
2856
2857 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2858 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2859 return (DDI_SUCCESS);
2860 }
2861 sid = (void *)(&inq89[60]);
2862
2863 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2864 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2865
2866 model[SATA_ID_MODEL_LEN] = 0;
2867 fw[SATA_ID_FW_LEN] = 0;
2868
2869 /*
2870 * split model into into vid/pid
2871 */
2872 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2873 if ((*pid == ' ') || (*pid == '\t'))
2874 break;
2875 if (i < SATA_ID_MODEL_LEN) {
2876 vid = model;
2877 /*
2878 * terminate vid, establish pid
2879 */
2880 *pid++ = 0;
2881 } else {
2882 /*
2883 * vid will stay "ATA ", the rule is same
2884 * as sata framework implementation.
2885 */
2886 vid = NULL;
2887 /*
2888 * model is all pid
2889 */
2890 pid = model;
2891 }
2892
2893 /*
2894 * override SCSA "inquiry-*" properties
2895 */
2896 if (vid)
2897 (void) scsi_device_prop_update_inqstring(sd,
2898 INQUIRY_VENDOR_ID, vid, strlen(vid));
2899 if (pid)
2900 (void) scsi_device_prop_update_inqstring(sd,
2901 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2902 (void) scsi_device_prop_update_inqstring(sd,
2903 INQUIRY_REVISION_ID, fw, strlen(fw));
2904
2905 if (inq89 != NULL) {
2906 kmem_free(inq89, inq89_len);
2907 }
2908 } else {
2909 mutex_exit(&mpt->m_mutex);
2910 }
2911
2912 return (DDI_SUCCESS);
2913 }
2914 /*
2915 * tran_tgt_free(9E) - target device instance deallocation
2916 */
2917 static void
2918 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2919 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2920 {
2921 #ifndef __lock_lint
2922 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2923 #endif
2924
2925 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2926
2927 if (tgt_private != NULL) {
2928 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2929 hba_tran->tran_tgt_private = NULL;
2930 }
2931 }
2932
2933 /*
2934 * scsi_pkt handling
2935 *
2936 * Visible to the external world via the transport structure.
2937 */
2938
2939 /*
2940 * Notes:
2941 * - transport the command to the addressed SCSI target/lun device
2942 * - normal operation is to schedule the command to be transported,
2943 * and return TRAN_ACCEPT if this is successful.
2944 * - if NO_INTR, tran_start must poll device for command completion
2945 */
2946 static int
2947 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2948 {
2949 #ifndef __lock_lint
2950 _NOTE(ARGUNUSED(ap))
2951 #endif
2952 mptsas_t *mpt = PKT2MPT(pkt);
2953 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2954 int rval;
2955 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2956
2957 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2958 ASSERT(ptgt);
2959 if (ptgt == NULL)
2960 return (TRAN_FATAL_ERROR);
2961
2962 /*
2963 * prepare the pkt before taking mutex.
2964 */
2965 rval = mptsas_prepare_pkt(cmd);
2966 if (rval != TRAN_ACCEPT) {
2967 return (rval);
2968 }
2969
2970 /*
2971 * Send the command to target/lun, however your HBA requires it.
2972 * If busy, return TRAN_BUSY; if there's some other formatting error
2973 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2974 * return of TRAN_ACCEPT.
2975 *
2976 * Remember that access to shared resources, including the mptsas_t
2977 * data structure and the HBA hardware registers, must be protected
2978 * with mutexes, here and everywhere.
2979 *
2980 * Also remember that at interrupt time, you'll get an argument
2981 * to the interrupt handler which is a pointer to your mptsas_t
2982 * structure; you'll have to remember which commands are outstanding
2983 * and which scsi_pkt is the currently-running command so the
2984 * interrupt handler can refer to the pkt to set completion
2985 * status, call the target driver back through pkt_comp, etc.
2986 *
2987 * If the instance lock is held by other thread, don't spin to wait
2988 * for it. Instead, queue the cmd and next time when the instance lock
2989 * is not held, accept all the queued cmd. A extra tx_waitq is
2990 * introduced to protect the queue.
2991 *
2992 * The polled cmd will not be queud and accepted as usual.
2993 *
2994 * Under the tx_waitq mutex, record whether a thread is draining
2995 * the tx_waitq. An IO requesting thread that finds the instance
2996 * mutex contended appends to the tx_waitq and while holding the
2997 * tx_wait mutex, if the draining flag is not set, sets it and then
2998 * proceeds to spin for the instance mutex. This scheme ensures that
2999 * the last cmd in a burst be processed.
3000 *
3001 * we enable this feature only when the helper threads are enabled,
3002 * at which we think the loads are heavy.
3003 *
3004 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3005 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3006 */
3007
3008 if (mpt->m_doneq_thread_n) {
3009 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3010 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3011 mutex_exit(&mpt->m_mutex);
3012 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3013 mutex_enter(&mpt->m_mutex);
3014 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3015 mutex_exit(&mpt->m_mutex);
3016 } else {
3017 mutex_enter(&mpt->m_tx_waitq_mutex);
3018 /*
3019 * ptgt->m_dr_flag is protected by m_mutex or
3020 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3021 * is acquired.
3022 */
3023 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3024 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3025 /*
3026 * The command should be allowed to
3027 * retry by returning TRAN_BUSY to
3028 * to stall the I/O's which come from
3029 * scsi_vhci since the device/path is
3030 * in unstable state now.
3031 */
3032 mutex_exit(&mpt->m_tx_waitq_mutex);
3033 return (TRAN_BUSY);
3034 } else {
3035 /*
3036 * The device is offline, just fail the
3037 * command by returning
3038 * TRAN_FATAL_ERROR.
3039 */
3040 mutex_exit(&mpt->m_tx_waitq_mutex);
3041 return (TRAN_FATAL_ERROR);
3042 }
3043 }
3044 if (mpt->m_tx_draining) {
3045 cmd->cmd_flags |= CFLAG_TXQ;
3046 *mpt->m_tx_waitqtail = cmd;
3047 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3048 mutex_exit(&mpt->m_tx_waitq_mutex);
3049 } else { /* drain the queue */
3050 mpt->m_tx_draining = 1;
3051 mutex_exit(&mpt->m_tx_waitq_mutex);
3052 mutex_enter(&mpt->m_mutex);
3053 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3054 mutex_exit(&mpt->m_mutex);
3055 }
3056 }
3057 } else {
3058 mutex_enter(&mpt->m_mutex);
3059 /*
3060 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3061 * in this case, m_mutex is acquired.
3062 */
3063 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3064 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3065 /*
3066 * commands should be allowed to retry by
3067 * returning TRAN_BUSY to stall the I/O's
3068 * which come from scsi_vhci since the device/
3069 * path is in unstable state now.
3070 */
3071 mutex_exit(&mpt->m_mutex);
3072 return (TRAN_BUSY);
3073 } else {
3074 /*
3075 * The device is offline, just fail the
3076 * command by returning TRAN_FATAL_ERROR.
3077 */
3078 mutex_exit(&mpt->m_mutex);
3079 return (TRAN_FATAL_ERROR);
3080 }
3081 }
3082 rval = mptsas_accept_pkt(mpt, cmd);
3083 mutex_exit(&mpt->m_mutex);
3084 }
3085
3086 return (rval);
3087 }
3088
3089 /*
3090 * Accept all the queued cmds(if any) before accept the current one.
3091 */
3092 static int
3093 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3094 {
3095 int rval;
3096 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3097
3098 ASSERT(mutex_owned(&mpt->m_mutex));
3099 /*
3100 * The call to mptsas_accept_tx_waitq() must always be performed
3101 * because that is where mpt->m_tx_draining is cleared.
3102 */
3103 mutex_enter(&mpt->m_tx_waitq_mutex);
3104 mptsas_accept_tx_waitq(mpt);
3105 mutex_exit(&mpt->m_tx_waitq_mutex);
3106 /*
3107 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3108 * in this case, m_mutex is acquired.
3109 */
3110 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3111 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3112 /*
3113 * The command should be allowed to retry by returning
3114 * TRAN_BUSY to stall the I/O's which come from
3115 * scsi_vhci since the device/path is in unstable state
3116 * now.
3117 */
3118 return (TRAN_BUSY);
3119 } else {
3120 /*
3121 * The device is offline, just fail the command by
3122 * return TRAN_FATAL_ERROR.
3123 */
3124 return (TRAN_FATAL_ERROR);
3125 }
3126 }
3127 rval = mptsas_accept_pkt(mpt, cmd);
3128
3129 return (rval);
3130 }
3131
3132 static int
3133 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3134 {
3135 int rval = TRAN_ACCEPT;
3136 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3137
3138 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3139
3140 ASSERT(mutex_owned(&mpt->m_mutex));
3141
3142 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3143 rval = mptsas_prepare_pkt(cmd);
3144 if (rval != TRAN_ACCEPT) {
3145 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3146 return (rval);
3147 }
3148 }
3149
3150 /*
3151 * reset the throttle if we were draining
3152 */
3153 if ((ptgt->m_t_ncmds == 0) &&
3154 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3155 NDBG23(("reset throttle"));
3156 ASSERT(ptgt->m_reset_delay == 0);
3157 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3158 }
3159
3160 /*
3161 * If HBA is being reset, the DevHandles are being re-initialized,
3162 * which means that they could be invalid even if the target is still
3163 * attached. Check if being reset and if DevHandle is being
3164 * re-initialized. If this is the case, return BUSY so the I/O can be
3165 * retried later.
3166 */
3167 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3168 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3169 if (cmd->cmd_flags & CFLAG_TXQ) {
3170 mptsas_doneq_add(mpt, cmd);
3171 mptsas_doneq_empty(mpt);
3172 return (rval);
3173 } else {
3174 return (TRAN_BUSY);
3175 }
3176 }
3177
3178 /*
3179 * If device handle has already been invalidated, just
3180 * fail the command. In theory, command from scsi_vhci
3181 * client is impossible send down command with invalid
3182 * devhdl since devhdl is set after path offline, target
3183 * driver is not suppose to select a offlined path.
3184 */
3185 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3186 NDBG20(("rejecting command, it might because invalid devhdl "
3187 "request."));
3188 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3189 if (cmd->cmd_flags & CFLAG_TXQ) {
3190 mptsas_doneq_add(mpt, cmd);
3191 mptsas_doneq_empty(mpt);
3192 return (rval);
3193 } else {
3194 return (TRAN_FATAL_ERROR);
3195 }
3196 }
3197 /*
3198 * The first case is the normal case. mpt gets a command from the
3199 * target driver and starts it.
3200 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3201 * commands is m_max_requests - 2.
3202 */
3203 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3204 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3205 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3206 (ptgt->m_reset_delay == 0) &&
3207 (ptgt->m_t_nwait == 0) &&
3208 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3209 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3210 (void) mptsas_start_cmd(mpt, cmd);
3211 } else {
3212 mptsas_waitq_add(mpt, cmd);
3213 }
3214 } else {
3215 /*
3216 * Add this pkt to the work queue
3217 */
3218 mptsas_waitq_add(mpt, cmd);
3219
3220 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3221 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3222
3223 /*
3224 * Only flush the doneq if this is not a TM
3225 * cmd. For TM cmds the flushing of the
3226 * doneq will be done in those routines.
3227 */
3228 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3229 mptsas_doneq_empty(mpt);
3230 }
3231 }
3232 }
3233 return (rval);
3234 }
3235
3236 int
3237 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3238 {
3239 mptsas_slots_t *slots;
3240 int slot;
3241 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3242
3243 ASSERT(mutex_owned(&mpt->m_mutex));
3244 slots = mpt->m_active;
3245
3246 /*
3247 * Account for reserved TM request slot and reserved SMID of 0.
3248 */
3249 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3250
3251 /*
3252 * m_tags is equivalent to the SMID when sending requests. Since the
3253 * SMID cannot be 0, start out at one if rolling over past the size
3254 * of the request queue depth. Also, don't use the last SMID, which is
3255 * reserved for TM requests.
3256 */
3257 slot = (slots->m_tags)++;
3258 if (slots->m_tags > slots->m_n_slots) {
3259 slots->m_tags = 1;
3260 }
3261
3262 alloc_tag:
3263 /* Validate tag, should never fail. */
3264 if (slots->m_slot[slot] == NULL) {
3265 /*
3266 * Make sure SMID is not using reserved value of 0
3267 * and the TM request slot.
3268 */
3269 ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3270 cmd->cmd_slot = slot;
3271 slots->m_slot[slot] = cmd;
3272 mpt->m_ncmds++;
3273
3274 /*
3275 * only increment per target ncmds if this is not a
3276 * command that has no target associated with it (i.e. a
3277 * event acknoledgment)
3278 */
3279 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3280 ptgt->m_t_ncmds++;
3281 }
3282 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3283
3284 /*
3285 * If initial timout is less than or equal to one tick, bump
3286 * the timeout by a tick so that command doesn't timeout before
3287 * its allotted time.
3288 */
3289 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3290 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3291 }
3292 return (TRUE);
3293 } else {
3294 int i;
3295
3296 /*
3297 * If slot in use, scan until a free one is found. Don't use 0
3298 * or final slot, which is reserved for TM requests.
3299 */
3300 for (i = 0; i < slots->m_n_slots; i++) {
3301 slot = slots->m_tags;
3302 if (++(slots->m_tags) > slots->m_n_slots) {
3303 slots->m_tags = 1;
3304 }
3305 if (slots->m_slot[slot] == NULL) {
3306 NDBG22(("found free slot %d", slot));
3307 goto alloc_tag;
3308 }
3309 }
3310 }
3311 return (FALSE);
3312 }
3313
3314 /*
3315 * prepare the pkt:
3316 * the pkt may have been resubmitted or just reused so
3317 * initialize some fields and do some checks.
3318 */
3319 static int
3320 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3321 {
3322 struct scsi_pkt *pkt = CMD2PKT(cmd);
3323
3324 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3325
3326 /*
3327 * Reinitialize some fields that need it; the packet may
3328 * have been resubmitted
3329 */
3330 pkt->pkt_reason = CMD_CMPLT;
3331 pkt->pkt_state = 0;
3332 pkt->pkt_statistics = 0;
3333 pkt->pkt_resid = 0;
3334 cmd->cmd_age = 0;
3335 cmd->cmd_pkt_flags = pkt->pkt_flags;
3336
3337 /*
3338 * zero status byte.
3339 */
3340 *(pkt->pkt_scbp) = 0;
3341
3342 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3343 pkt->pkt_resid = cmd->cmd_dmacount;
3344
3345 /*
3346 * consistent packets need to be sync'ed first
3347 * (only for data going out)
3348 */
3349 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3350 (cmd->cmd_flags & CFLAG_DMASEND)) {
3351 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3352 DDI_DMA_SYNC_FORDEV);
3353 }
3354 }
3355
3356 cmd->cmd_flags =
3357 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3358 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3359
3360 return (TRAN_ACCEPT);
3361 }
3362
3363 /*
3364 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3365 *
3366 * One of three possibilities:
3367 * - allocate scsi_pkt
3368 * - allocate scsi_pkt and DMA resources
3369 * - allocate DMA resources to an already-allocated pkt
3370 */
3371 static struct scsi_pkt *
3372 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3373 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3374 int (*callback)(), caddr_t arg)
3375 {
3376 mptsas_cmd_t *cmd, *new_cmd;
3377 mptsas_t *mpt = ADDR2MPT(ap);
3378 int failure = 1;
3379 uint_t oldcookiec;
3380 mptsas_target_t *ptgt = NULL;
3381 int rval;
3382 mptsas_tgt_private_t *tgt_private;
3383 int kf;
3384
3385 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3386
3387 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3388 tran_tgt_private;
3389 ASSERT(tgt_private != NULL);
3390 if (tgt_private == NULL) {
3391 return (NULL);
3392 }
3393 ptgt = tgt_private->t_private;
3394 ASSERT(ptgt != NULL);
3395 if (ptgt == NULL)
3396 return (NULL);
3397 ap->a_target = ptgt->m_devhdl;
3398 ap->a_lun = tgt_private->t_lun;
3399
3400 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3401 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3402 statuslen *= 100; tgtlen *= 4;
3403 #endif
3404 NDBG3(("mptsas_scsi_init_pkt:\n"
3405 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3406 ap->a_target, (void *)pkt, (void *)bp,
3407 cmdlen, statuslen, tgtlen, flags));
3408
3409 /*
3410 * Allocate the new packet.
3411 */
3412 if (pkt == NULL) {
3413 ddi_dma_handle_t save_dma_handle;
3414 ddi_dma_handle_t save_arq_dma_handle;
3415 struct buf *save_arq_bp;
3416 ddi_dma_cookie_t save_arqcookie;
3417
3418 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3419
3420 if (cmd) {
3421 save_dma_handle = cmd->cmd_dmahandle;
3422 save_arq_dma_handle = cmd->cmd_arqhandle;
3423 save_arq_bp = cmd->cmd_arq_buf;
3424 save_arqcookie = cmd->cmd_arqcookie;
3425 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3426 cmd->cmd_dmahandle = save_dma_handle;
3427 cmd->cmd_arqhandle = save_arq_dma_handle;
3428 cmd->cmd_arq_buf = save_arq_bp;
3429 cmd->cmd_arqcookie = save_arqcookie;
3430
3431 pkt = (void *)((uchar_t *)cmd +
3432 sizeof (struct mptsas_cmd));
3433 pkt->pkt_ha_private = (opaque_t)cmd;
3434 pkt->pkt_address = *ap;
3435 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3436 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3437 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3438 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3439 cmd->cmd_cdblen = (uchar_t)cmdlen;
3440 cmd->cmd_scblen = statuslen;
3441 cmd->cmd_rqslen = SENSE_LENGTH;
3442 cmd->cmd_tgt_addr = ptgt;
3443 failure = 0;
3444 }
3445
3446 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3447 (tgtlen > PKT_PRIV_LEN) ||
3448 (statuslen > EXTCMDS_STATUS_SIZE)) {
3449 if (failure == 0) {
3450 /*
3451 * if extern alloc fails, all will be
3452 * deallocated, including cmd
3453 */
3454 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3455 cmdlen, tgtlen, statuslen, kf);
3456 }
3457 if (failure) {
3458 /*
3459 * if extern allocation fails, it will
3460 * deallocate the new pkt as well
3461 */
3462 return (NULL);
3463 }
3464 }
3465 new_cmd = cmd;
3466
3467 } else {
3468 cmd = PKT2CMD(pkt);
3469 new_cmd = NULL;
3470 }
3471
3472
3473 /* grab cmd->cmd_cookiec here as oldcookiec */
3474
3475 oldcookiec = cmd->cmd_cookiec;
3476
3477 /*
3478 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3479 * greater than 0 and we'll need to grab the next dma window
3480 */
3481 /*
3482 * SLM-not doing extra command frame right now; may add later
3483 */
3484
3485 if (cmd->cmd_nwin > 0) {
3486
3487 /*
3488 * Make sure we havn't gone past the the total number
3489 * of windows
3490 */
3491 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3492 return (NULL);
3493 }
3494 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3495 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3496 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3497 return (NULL);
3498 }
3499 goto get_dma_cookies;
3500 }
3501
3502
3503 if (flags & PKT_XARQ) {
3504 cmd->cmd_flags |= CFLAG_XARQ;
3505 }
3506
3507 /*
3508 * DMA resource allocation. This version assumes your
3509 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3510 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3511 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3512 */
3513 if (bp && (bp->b_bcount != 0) &&
3514 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3515
3516 int cnt, dma_flags;
3517 mptti_t *dmap; /* ptr to the S/G list */
3518
3519 /*
3520 * Set up DMA memory and position to the next DMA segment.
3521 */
3522 ASSERT(cmd->cmd_dmahandle != NULL);
3523
3524 if (bp->b_flags & B_READ) {
3525 dma_flags = DDI_DMA_READ;
3526 cmd->cmd_flags &= ~CFLAG_DMASEND;
3527 } else {
3528 dma_flags = DDI_DMA_WRITE;
3529 cmd->cmd_flags |= CFLAG_DMASEND;
3530 }
3531 if (flags & PKT_CONSISTENT) {
3532 cmd->cmd_flags |= CFLAG_CMDIOPB;
3533 dma_flags |= DDI_DMA_CONSISTENT;
3534 }
3535
3536 if (flags & PKT_DMA_PARTIAL) {
3537 dma_flags |= DDI_DMA_PARTIAL;
3538 }
3539
3540 /*
3541 * workaround for byte hole issue on psycho and
3542 * schizo pre 2.1
3543 */
3544 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3545 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3546 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3547 dma_flags |= DDI_DMA_CONSISTENT;
3548 }
3549
3550 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3551 dma_flags, callback, arg,
3552 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3553 if (rval == DDI_DMA_PARTIAL_MAP) {
3554 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3555 &cmd->cmd_nwin);
3556 cmd->cmd_winindex = 0;
3557 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3558 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3559 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3560 &cmd->cmd_cookiec);
3561 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3562 switch (rval) {
3563 case DDI_DMA_NORESOURCES:
3564 bioerror(bp, 0);
3565 break;
3566 case DDI_DMA_BADATTR:
3567 case DDI_DMA_NOMAPPING:
3568 bioerror(bp, EFAULT);
3569 break;
3570 case DDI_DMA_TOOBIG:
3571 default:
3572 bioerror(bp, EINVAL);
3573 break;
3574 }
3575 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3576 if (new_cmd) {
3577 mptsas_scsi_destroy_pkt(ap, pkt);
3578 }
3579 return ((struct scsi_pkt *)NULL);
3580 }
3581
3582 get_dma_cookies:
3583 cmd->cmd_flags |= CFLAG_DMAVALID;
3584 ASSERT(cmd->cmd_cookiec > 0);
3585
3586 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3587 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3588 cmd->cmd_cookiec);
3589 bioerror(bp, EINVAL);
3590 if (new_cmd) {
3591 mptsas_scsi_destroy_pkt(ap, pkt);
3592 }
3593 return ((struct scsi_pkt *)NULL);
3594 }
3595
3596 /*
3597 * Allocate extra SGL buffer if needed.
3598 */
3599 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3600 (cmd->cmd_extra_frames == NULL)) {
3601 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3602 DDI_FAILURE) {
3603 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3604 "failed");
3605 bioerror(bp, ENOMEM);
3606 if (new_cmd) {
3607 mptsas_scsi_destroy_pkt(ap, pkt);
3608 }
3609 return ((struct scsi_pkt *)NULL);
3610 }
3611 }
3612
3613 /*
3614 * Always use scatter-gather transfer
3615 * Use the loop below to store physical addresses of
3616 * DMA segments, from the DMA cookies, into your HBA's
3617 * scatter-gather list.
3618 * We need to ensure we have enough kmem alloc'd
3619 * for the sg entries since we are no longer using an
3620 * array inside mptsas_cmd_t.
3621 *
3622 * We check cmd->cmd_cookiec against oldcookiec so
3623 * the scatter-gather list is correctly allocated
3624 */
3625
3626 if (oldcookiec != cmd->cmd_cookiec) {
3627 if (cmd->cmd_sg != (mptti_t *)NULL) {
3628 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3629 oldcookiec);
3630 cmd->cmd_sg = NULL;
3631 }
3632 }
3633
3634 if (cmd->cmd_sg == (mptti_t *)NULL) {
3635 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3636 cmd->cmd_cookiec), kf);
3637
3638 if (cmd->cmd_sg == (mptti_t *)NULL) {
3639 mptsas_log(mpt, CE_WARN,
3640 "unable to kmem_alloc enough memory "
3641 "for scatter/gather list");
3642 /*
3643 * if we have an ENOMEM condition we need to behave
3644 * the same way as the rest of this routine
3645 */
3646
3647 bioerror(bp, ENOMEM);
3648 if (new_cmd) {
3649 mptsas_scsi_destroy_pkt(ap, pkt);
3650 }
3651 return ((struct scsi_pkt *)NULL);
3652 }
3653 }
3654
3655 dmap = cmd->cmd_sg;
3656
3657 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3658
3659 /*
3660 * store the first segment into the S/G list
3661 */
3662 dmap->count = cmd->cmd_cookie.dmac_size;
3663 dmap->addr.address64.Low = (uint32_t)
3664 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3665 dmap->addr.address64.High = (uint32_t)
3666 (cmd->cmd_cookie.dmac_laddress >> 32);
3667
3668 /*
3669 * dmacount counts the size of the dma for this window
3670 * (if partial dma is being used). totaldmacount
3671 * keeps track of the total amount of dma we have
3672 * transferred for all the windows (needed to calculate
3673 * the resid value below).
3674 */
3675 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3676 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3677
3678 /*
3679 * We already stored the first DMA scatter gather segment,
3680 * start at 1 if we need to store more.
3681 */
3682 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3683 /*
3684 * Get next DMA cookie
3685 */
3686 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3687 &cmd->cmd_cookie);
3688 dmap++;
3689
3690 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3691 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3692
3693 /*
3694 * store the segment parms into the S/G list
3695 */
3696 dmap->count = cmd->cmd_cookie.dmac_size;
3697 dmap->addr.address64.Low = (uint32_t)
3698 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3699 dmap->addr.address64.High = (uint32_t)
3700 (cmd->cmd_cookie.dmac_laddress >> 32);
3701 }
3702
3703 /*
3704 * If this was partially allocated we set the resid
3705 * the amount of data NOT transferred in this window
3706 * If there is only one window, the resid will be 0
3707 */
3708 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3709 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3710 }
3711 return (pkt);
3712 }
3713
3714 /*
3715 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3716 *
3717 * Notes:
3718 * - also frees DMA resources if allocated
3719 * - implicit DMA synchonization
3720 */
3721 static void
3722 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3723 {
3724 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3725 mptsas_t *mpt = ADDR2MPT(ap);
3726
3727 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3728 ap->a_target, (void *)pkt));
3729
3730 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3731 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3732 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3733 }
3734
3735 if (cmd->cmd_sg) {
3736 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3737 cmd->cmd_sg = NULL;
3738 }
3739
3740 mptsas_free_extra_sgl_frame(mpt, cmd);
3741
3742 if ((cmd->cmd_flags &
3743 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3744 CFLAG_SCBEXTERN)) == 0) {
3745 cmd->cmd_flags = CFLAG_FREE;
3746 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3747 } else {
3748 mptsas_pkt_destroy_extern(mpt, cmd);
3749 }
3750 }
3751
3752 /*
3753 * kmem cache constructor and destructor:
3754 * When constructing, we bzero the cmd and allocate the dma handle
3755 * When destructing, just free the dma handle
3756 */
3757 static int
3758 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3759 {
3760 mptsas_cmd_t *cmd = buf;
3761 mptsas_t *mpt = cdrarg;
3762 struct scsi_address ap;
3763 uint_t cookiec;
3764 ddi_dma_attr_t arq_dma_attr;
3765 int (*callback)(caddr_t);
3766
3767 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3768
3769 NDBG4(("mptsas_kmem_cache_constructor"));
3770
3771 ap.a_hba_tran = mpt->m_tran;
3772 ap.a_target = 0;
3773 ap.a_lun = 0;
3774
3775 /*
3776 * allocate a dma handle
3777 */
3778 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3779 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3780 cmd->cmd_dmahandle = NULL;
3781 return (-1);
3782 }
3783
3784 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3785 SENSE_LENGTH, B_READ, callback, NULL);
3786 if (cmd->cmd_arq_buf == NULL) {
3787 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3788 cmd->cmd_dmahandle = NULL;
3789 return (-1);
3790 }
3791
3792 /*
3793 * allocate a arq handle
3794 */
3795 arq_dma_attr = mpt->m_msg_dma_attr;
3796 arq_dma_attr.dma_attr_sgllen = 1;
3797 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3798 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3799 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3800 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3801 cmd->cmd_dmahandle = NULL;
3802 cmd->cmd_arqhandle = NULL;
3803 return (-1);
3804 }
3805
3806 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3807 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3808 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3809 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3810 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3811 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3812 cmd->cmd_dmahandle = NULL;
3813 cmd->cmd_arqhandle = NULL;
3814 cmd->cmd_arq_buf = NULL;
3815 return (-1);
3816 }
3817
3818 return (0);
3819 }
3820
3821 static void
3822 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3823 {
3824 #ifndef __lock_lint
3825 _NOTE(ARGUNUSED(cdrarg))
3826 #endif
3827 mptsas_cmd_t *cmd = buf;
3828
3829 NDBG4(("mptsas_kmem_cache_destructor"));
3830
3831 if (cmd->cmd_arqhandle) {
3832 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3833 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3834 cmd->cmd_arqhandle = NULL;
3835 }
3836 if (cmd->cmd_arq_buf) {
3837 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3838 cmd->cmd_arq_buf = NULL;
3839 }
3840 if (cmd->cmd_dmahandle) {
3841 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3842 cmd->cmd_dmahandle = NULL;
3843 }
3844 }
3845
3846 static int
3847 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3848 {
3849 mptsas_cache_frames_t *p = buf;
3850 mptsas_t *mpt = cdrarg;
3851 ddi_dma_attr_t frame_dma_attr;
3852 size_t mem_size, alloc_len;
3853 ddi_dma_cookie_t cookie;
3854 uint_t ncookie;
3855 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3856 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3857
3858 frame_dma_attr = mpt->m_msg_dma_attr;
3859 frame_dma_attr.dma_attr_align = 0x10;
3860 frame_dma_attr.dma_attr_sgllen = 1;
3861
3862 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3863 &p->m_dma_hdl) != DDI_SUCCESS) {
3864 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3865 " extra SGL.");
3866 return (DDI_FAILURE);
3867 }
3868
3869 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3870
3871 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3872 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3873 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3874 ddi_dma_free_handle(&p->m_dma_hdl);
3875 p->m_dma_hdl = NULL;
3876 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3877 " extra SGL.");
3878 return (DDI_FAILURE);
3879 }
3880
3881 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3882 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3883 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3884 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3885 ddi_dma_free_handle(&p->m_dma_hdl);
3886 p->m_dma_hdl = NULL;
3887 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3888 " extra SGL");
3889 return (DDI_FAILURE);
3890 }
3891
3892 /*
3893 * Store the SGL memory address. This chip uses this
3894 * address to dma to and from the driver. The second
3895 * address is the address mpt uses to fill in the SGL.
3896 */
3897 p->m_phys_addr = cookie.dmac_address;
3898
3899 return (DDI_SUCCESS);
3900 }
3901
3902 static void
3903 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3904 {
3905 #ifndef __lock_lint
3906 _NOTE(ARGUNUSED(cdrarg))
3907 #endif
3908 mptsas_cache_frames_t *p = buf;
3909 if (p->m_dma_hdl != NULL) {
3910 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3911 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3912 ddi_dma_free_handle(&p->m_dma_hdl);
3913 p->m_phys_addr = NULL;
3914 p->m_frames_addr = NULL;
3915 p->m_dma_hdl = NULL;
3916 p->m_acc_hdl = NULL;
3917 }
3918
3919 }
3920
3921 /*
3922 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3923 * for non-standard length cdb, pkt_private, status areas
3924 * if allocation fails, then deallocate all external space and the pkt
3925 */
3926 /* ARGSUSED */
3927 static int
3928 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3929 int cmdlen, int tgtlen, int statuslen, int kf)
3930 {
3931 caddr_t cdbp, scbp, tgt;
3932 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3933 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3934 struct scsi_address ap;
3935 size_t senselength;
3936 ddi_dma_attr_t ext_arq_dma_attr;
3937 uint_t cookiec;
3938
3939 NDBG3(("mptsas_pkt_alloc_extern: "
3940 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3941 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3942
3943 tgt = cdbp = scbp = NULL;
3944 cmd->cmd_scblen = statuslen;
3945 cmd->cmd_privlen = (uchar_t)tgtlen;
3946
3947 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3948 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3949 goto fail;
3950 }
3951 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3952 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3953 }
3954 if (tgtlen > PKT_PRIV_LEN) {
3955 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3956 goto fail;
3957 }
3958 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3959 cmd->cmd_pkt->pkt_private = tgt;
3960 }
3961 if (statuslen > EXTCMDS_STATUS_SIZE) {
3962 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3963 goto fail;
3964 }
3965 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3966 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3967
3968 /* allocate sense data buf for DMA */
3969
3970 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3971 struct scsi_arq_status, sts_sensedata);
3972 cmd->cmd_rqslen = (uchar_t)senselength;
3973
3974 ap.a_hba_tran = mpt->m_tran;
3975 ap.a_target = 0;
3976 ap.a_lun = 0;
3977
3978 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3979 (struct buf *)NULL, senselength, B_READ,
3980 callback, NULL);
3981
3982 if (cmd->cmd_ext_arq_buf == NULL) {
3983 goto fail;
3984 }
3985 /*
3986 * allocate a extern arq handle and bind the buf
3987 */
3988 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3989 ext_arq_dma_attr.dma_attr_sgllen = 1;
3990 if ((ddi_dma_alloc_handle(mpt->m_dip,
3991 &ext_arq_dma_attr, callback,
3992 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3993 goto fail;
3994 }
3995
3996 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3997 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3998 callback, NULL, &cmd->cmd_ext_arqcookie,
3999 &cookiec)
4000 != DDI_SUCCESS) {
4001 goto fail;
4002 }
4003 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4004 }
4005 return (0);
4006 fail:
4007 mptsas_pkt_destroy_extern(mpt, cmd);
4008 return (1);
4009 }
4010
4011 /*
4012 * deallocate external pkt space and deallocate the pkt
4013 */
4014 static void
4015 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4016 {
4017 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4018
4019 if (cmd->cmd_flags & CFLAG_FREE) {
4020 mptsas_log(mpt, CE_PANIC,
4021 "mptsas_pkt_destroy_extern: freeing free packet");
4022 _NOTE(NOT_REACHED)
4023 /* NOTREACHED */
4024 }
4025 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4026 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4027 }
4028 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4029 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4030 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4031 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4032 }
4033 if (cmd->cmd_ext_arqhandle) {
4034 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4035 cmd->cmd_ext_arqhandle = NULL;
4036 }
4037 if (cmd->cmd_ext_arq_buf)
4038 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4039 }
4040 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4041 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4042 }
4043 cmd->cmd_flags = CFLAG_FREE;
4044 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4045 }
4046
4047 /*
4048 * tran_sync_pkt(9E) - explicit DMA synchronization
4049 */
4050 /*ARGSUSED*/
4051 static void
4052 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4053 {
4054 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4055
4056 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4057 ap->a_target, (void *)pkt));
4058
4059 if (cmd->cmd_dmahandle) {
4060 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4061 (cmd->cmd_flags & CFLAG_DMASEND) ?
4062 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4063 }
4064 }
4065
4066 /*
4067 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4068 */
4069 /*ARGSUSED*/
4070 static void
4071 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4072 {
4073 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4074 mptsas_t *mpt = ADDR2MPT(ap);
4075
4076 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4077 ap->a_target, (void *)pkt));
4078
4079 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4080 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4081 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4082 }
4083
4084 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4085 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4086 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4087 }
4088
4089 mptsas_free_extra_sgl_frame(mpt, cmd);
4090 }
4091
4092 static void
4093 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4094 {
4095 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4096 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4097 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4098 DDI_DMA_SYNC_FORCPU);
4099 }
4100 (*pkt->pkt_comp)(pkt);
4101 }
4102
4103 static void
4104 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4105 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4106 {
4107 uint_t cookiec;
4108 mptti_t *dmap;
4109 uint32_t flags;
4110 pMpi2SGESimple64_t sge;
4111 pMpi2SGEChain64_t sgechain;
4112 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4113
4114 /*
4115 * Save the number of entries in the DMA
4116 * Scatter/Gather list
4117 */
4118 cookiec = cmd->cmd_cookiec;
4119
4120 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4121
4122 /*
4123 * Set read/write bit in control.
4124 */
4125 if (cmd->cmd_flags & CFLAG_DMASEND) {
4126 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4127 } else {
4128 *control |= MPI2_SCSIIO_CONTROL_READ;
4129 }
4130
4131 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4132
4133 /*
4134 * We have 2 cases here. First where we can fit all the
4135 * SG elements into the main frame, and the case
4136 * where we can't.
4137 * If we have more cookies than we can attach to a frame
4138 * we will need to use a chain element to point
4139 * a location of memory where the rest of the S/G
4140 * elements reside.
4141 */
4142 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4143 dmap = cmd->cmd_sg;
4144 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4145 while (cookiec--) {
4146 ddi_put32(acc_hdl,
4147 &sge->Address.Low, dmap->addr.address64.Low);
4148 ddi_put32(acc_hdl,
4149 &sge->Address.High, dmap->addr.address64.High);
4150 ddi_put32(acc_hdl, &sge->FlagsLength,
4151 dmap->count);
4152 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4153 flags |= ((uint32_t)
4154 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4155 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4156 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4157 MPI2_SGE_FLAGS_SHIFT);
4158
4159 /*
4160 * If this is the last cookie, we set the flags
4161 * to indicate so
4162 */
4163 if (cookiec == 0) {
4164 flags |=
4165 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4166 | MPI2_SGE_FLAGS_END_OF_BUFFER
4167 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4168 MPI2_SGE_FLAGS_SHIFT);
4169 }
4170 if (cmd->cmd_flags & CFLAG_DMASEND) {
4171 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4172 MPI2_SGE_FLAGS_SHIFT);
4173 } else {
4174 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4175 MPI2_SGE_FLAGS_SHIFT);
4176 }
4177 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4178 dmap++;
4179 sge++;
4180 }
4181 } else {
4182 /*
4183 * Hereby we start to deal with multiple frames.
4184 * The process is as follows:
4185 * 1. Determine how many frames are needed for SGL element
4186 * storage; Note that all frames are stored in contiguous
4187 * memory space and in 64-bit DMA mode each element is
4188 * 3 double-words (12 bytes) long.
4189 * 2. Fill up the main frame. We need to do this separately
4190 * since it contains the SCSI IO request header and needs
4191 * dedicated processing. Note that the last 4 double-words
4192 * of the SCSI IO header is for SGL element storage
4193 * (MPI2_SGE_IO_UNION).
4194 * 3. Fill the chain element in the main frame, so the DMA
4195 * engine can use the following frames.
4196 * 4. Enter a loop to fill the remaining frames. Note that the
4197 * last frame contains no chain element. The remaining
4198 * frames go into the mpt SGL buffer allocated on the fly,
4199 * not immediately following the main message frame, as in
4200 * Gen1.
4201 * Some restrictions:
4202 * 1. For 64-bit DMA, the simple element and chain element
4203 * are both of 3 double-words (12 bytes) in size, even
4204 * though all frames are stored in the first 4G of mem
4205 * range and the higher 32-bits of the address are always 0.
4206 * 2. On some controllers (like the 1064/1068), a frame can
4207 * hold SGL elements with the last 1 or 2 double-words
4208 * (4 or 8 bytes) un-used. On these controllers, we should
4209 * recognize that there's not enough room for another SGL
4210 * element and move the sge pointer to the next frame.
4211 */
4212 int i, j, k, l, frames, sgemax;
4213 int temp;
4214 uint8_t chainflags;
4215 uint16_t chainlength;
4216 mptsas_cache_frames_t *p;
4217
4218 /*
4219 * Sgemax is the number of SGE's that will fit
4220 * each extra frame and frames is total
4221 * number of frames we'll need. 1 sge entry per
4222 * frame is reseverd for the chain element thus the -1 below.
4223 */
4224 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4225 - 1);
4226 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4227
4228 /*
4229 * A little check to see if we need to round up the number
4230 * of frames we need
4231 */
4232 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4233 sgemax) > 1) {
4234 frames = (temp + 1);
4235 } else {
4236 frames = temp;
4237 }
4238 dmap = cmd->cmd_sg;
4239 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4240
4241 /*
4242 * First fill in the main frame
4243 */
4244 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4245 ddi_put32(acc_hdl, &sge->Address.Low,
4246 dmap->addr.address64.Low);
4247 ddi_put32(acc_hdl, &sge->Address.High,
4248 dmap->addr.address64.High);
4249 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4250 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4251 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4252 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4253 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4254 MPI2_SGE_FLAGS_SHIFT);
4255
4256 /*
4257 * If this is the last SGE of this frame
4258 * we set the end of list flag
4259 */
4260 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4261 flags |= ((uint32_t)
4262 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4263 MPI2_SGE_FLAGS_SHIFT);
4264 }
4265 if (cmd->cmd_flags & CFLAG_DMASEND) {
4266 flags |=
4267 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4268 MPI2_SGE_FLAGS_SHIFT);
4269 } else {
4270 flags |=
4271 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4272 MPI2_SGE_FLAGS_SHIFT);
4273 }
4274 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4275 dmap++;
4276 sge++;
4277 }
4278
4279 /*
4280 * Fill in the chain element in the main frame.
4281 * About calculation on ChainOffset:
4282 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4283 * in the end reserved for SGL element storage
4284 * (MPI2_SGE_IO_UNION); we should count it in our
4285 * calculation. See its definition in the header file.
4286 * 2. Constant j is the counter of the current SGL element
4287 * that will be processed, and (j - 1) is the number of
4288 * SGL elements that have been processed (stored in the
4289 * main frame).
4290 * 3. ChainOffset value should be in units of double-words (4
4291 * bytes) so the last value should be divided by 4.
4292 */
4293 ddi_put8(acc_hdl, &frame->ChainOffset,
4294 (sizeof (MPI2_SCSI_IO_REQUEST) -
4295 sizeof (MPI2_SGE_IO_UNION) +
4296 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4297 sgechain = (pMpi2SGEChain64_t)sge;
4298 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4299 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4300 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4301 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4302
4303 /*
4304 * The size of the next frame is the accurate size of space
4305 * (in bytes) used to store the SGL elements. j is the counter
4306 * of SGL elements. (j - 1) is the number of SGL elements that
4307 * have been processed (stored in frames).
4308 */
4309 if (frames >= 2) {
4310 chainlength = mpt->m_req_frame_size /
4311 sizeof (MPI2_SGE_SIMPLE64) *
4312 sizeof (MPI2_SGE_SIMPLE64);
4313 } else {
4314 chainlength = ((cookiec - (j - 1)) *
4315 sizeof (MPI2_SGE_SIMPLE64));
4316 }
4317
4318 p = cmd->cmd_extra_frames;
4319
4320 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4321 ddi_put32(acc_hdl, &sgechain->Address.Low,
4322 p->m_phys_addr);
4323 /* SGL is allocated in the first 4G mem range */
4324 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4325
4326 /*
4327 * If there are more than 2 frames left we have to
4328 * fill in the next chain offset to the location of
4329 * the chain element in the next frame.
4330 * sgemax is the number of simple elements in an extra
4331 * frame. Note that the value NextChainOffset should be
4332 * in double-words (4 bytes).
4333 */
4334 if (frames >= 2) {
4335 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4336 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4337 } else {
4338 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4339 }
4340
4341 /*
4342 * Jump to next frame;
4343 * Starting here, chain buffers go into the per command SGL.
4344 * This buffer is allocated when chain buffers are needed.
4345 */
4346 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4347 i = cookiec;
4348
4349 /*
4350 * Start filling in frames with SGE's. If we
4351 * reach the end of frame and still have SGE's
4352 * to fill we need to add a chain element and
4353 * use another frame. j will be our counter
4354 * for what cookie we are at and i will be
4355 * the total cookiec. k is the current frame
4356 */
4357 for (k = 1; k <= frames; k++) {
4358 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4359
4360 /*
4361 * If we have reached the end of frame
4362 * and we have more SGE's to fill in
4363 * we have to fill the final entry
4364 * with a chain element and then
4365 * continue to the next frame
4366 */
4367 if ((l == (sgemax + 1)) && (k != frames)) {
4368 sgechain = (pMpi2SGEChain64_t)sge;
4369 j--;
4370 chainflags = (
4371 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4372 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4373 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4374 ddi_put8(p->m_acc_hdl,
4375 &sgechain->Flags, chainflags);
4376 /*
4377 * k is the frame counter and (k + 1)
4378 * is the number of the next frame.
4379 * Note that frames are in contiguous
4380 * memory space.
4381 */
4382 ddi_put32(p->m_acc_hdl,
4383 &sgechain->Address.Low,
4384 (p->m_phys_addr +
4385 (mpt->m_req_frame_size * k)));
4386 ddi_put32(p->m_acc_hdl,
4387 &sgechain->Address.High, 0);
4388
4389 /*
4390 * If there are more than 2 frames left
4391 * we have to next chain offset to
4392 * the location of the chain element
4393 * in the next frame and fill in the
4394 * length of the next chain
4395 */
4396 if ((frames - k) >= 2) {
4397 ddi_put8(p->m_acc_hdl,
4398 &sgechain->NextChainOffset,
4399 (sgemax *
4400 sizeof (MPI2_SGE_SIMPLE64))
4401 >> 2);
4402 ddi_put16(p->m_acc_hdl,
4403 &sgechain->Length,
4404 mpt->m_req_frame_size /
4405 sizeof (MPI2_SGE_SIMPLE64) *
4406 sizeof (MPI2_SGE_SIMPLE64));
4407 } else {
4408 /*
4409 * This is the last frame. Set
4410 * the NextChainOffset to 0 and
4411 * Length is the total size of
4412 * all remaining simple elements
4413 */
4414 ddi_put8(p->m_acc_hdl,
4415 &sgechain->NextChainOffset,
4416 0);
4417 ddi_put16(p->m_acc_hdl,
4418 &sgechain->Length,
4419 (cookiec - j) *
4420 sizeof (MPI2_SGE_SIMPLE64));
4421 }
4422
4423 /* Jump to the next frame */
4424 sge = (pMpi2SGESimple64_t)
4425 ((char *)p->m_frames_addr +
4426 (int)mpt->m_req_frame_size * k);
4427
4428 continue;
4429 }
4430
4431 ddi_put32(p->m_acc_hdl,
4432 &sge->Address.Low,
4433 dmap->addr.address64.Low);
4434 ddi_put32(p->m_acc_hdl,
4435 &sge->Address.High,
4436 dmap->addr.address64.High);
4437 ddi_put32(p->m_acc_hdl,
4438 &sge->FlagsLength, dmap->count);
4439 flags = ddi_get32(p->m_acc_hdl,
4440 &sge->FlagsLength);
4441 flags |= ((uint32_t)(
4442 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4443 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4444 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4445 MPI2_SGE_FLAGS_SHIFT);
4446
4447 /*
4448 * If we are at the end of the frame and
4449 * there is another frame to fill in
4450 * we set the last simple element as last
4451 * element
4452 */
4453 if ((l == sgemax) && (k != frames)) {
4454 flags |= ((uint32_t)
4455 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4456 MPI2_SGE_FLAGS_SHIFT);
4457 }
4458
4459 /*
4460 * If this is the final cookie we
4461 * indicate it by setting the flags
4462 */
4463 if (j == i) {
4464 flags |= ((uint32_t)
4465 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4466 MPI2_SGE_FLAGS_END_OF_BUFFER |
4467 MPI2_SGE_FLAGS_END_OF_LIST) <<
4468 MPI2_SGE_FLAGS_SHIFT);
4469 }
4470 if (cmd->cmd_flags & CFLAG_DMASEND) {
4471 flags |=
4472 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4473 MPI2_SGE_FLAGS_SHIFT);
4474 } else {
4475 flags |=
4476 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4477 MPI2_SGE_FLAGS_SHIFT);
4478 }
4479 ddi_put32(p->m_acc_hdl,
4480 &sge->FlagsLength, flags);
4481 dmap++;
4482 sge++;
4483 }
4484 }
4485
4486 /*
4487 * Sync DMA with the chain buffers that were just created
4488 */
4489 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4490 }
4491 }
4492
4493 /*
4494 * Interrupt handling
4495 * Utility routine. Poll for status of a command sent to HBA
4496 * without interrupts (a FLAG_NOINTR command).
4497 */
4498 int
4499 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4500 {
4501 int rval = TRUE;
4502
4503 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4504
4505 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4506 mptsas_restart_hba(mpt);
4507 }
4508
4509 /*
4510 * Wait, using drv_usecwait(), long enough for the command to
4511 * reasonably return from the target if the target isn't
4512 * "dead". A polled command may well be sent from scsi_poll, and
4513 * there are retries built in to scsi_poll if the transport
4514 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4515 * and retries the transport up to scsi_poll_busycnt times
4516 * (currently 60) if
4517 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4518 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4519 *
4520 * limit the waiting to avoid a hang in the event that the
4521 * cmd never gets started but we are still receiving interrupts
4522 */
4523 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4524 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4525 NDBG5(("mptsas_poll: command incomplete"));
4526 rval = FALSE;
4527 break;
4528 }
4529 }
4530
4531 if (rval == FALSE) {
4532
4533 /*
4534 * this isn't supposed to happen, the hba must be wedged
4535 * Mark this cmd as a timeout.
4536 */
4537 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4538 (STAT_TIMEOUT|STAT_ABORTED));
4539
4540 if (poll_cmd->cmd_queued == FALSE) {
4541
4542 NDBG5(("mptsas_poll: not on waitq"));
4543
4544 poll_cmd->cmd_pkt->pkt_state |=
4545 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4546 } else {
4547
4548 /* find and remove it from the waitq */
4549 NDBG5(("mptsas_poll: delete from waitq"));
4550 mptsas_waitq_delete(mpt, poll_cmd);
4551 }
4552
4553 }
4554 mptsas_fma_check(mpt, poll_cmd);
4555 NDBG5(("mptsas_poll: done"));
4556 return (rval);
4557 }
4558
4559 /*
4560 * Used for polling cmds and TM function
4561 */
4562 static int
4563 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4564 {
4565 int cnt;
4566 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4567 uint32_t int_mask;
4568
4569 NDBG5(("mptsas_wait_intr"));
4570
4571 mpt->m_polled_intr = 1;
4572
4573 /*
4574 * Get the current interrupt mask and disable interrupts. When
4575 * re-enabling ints, set mask to saved value.
4576 */
4577 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4578 MPTSAS_DISABLE_INTR(mpt);
4579
4580 /*
4581 * Keep polling for at least (polltime * 1000) seconds
4582 */
4583 for (cnt = 0; cnt < polltime; cnt++) {
4584 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4585 DDI_DMA_SYNC_FORCPU);
4586
4587 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4588 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4589
4590 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4591 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4592 ddi_get32(mpt->m_acc_post_queue_hdl,
4593 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4594 drv_usecwait(1000);
4595 continue;
4596 }
4597
4598 /*
4599 * The reply is valid, process it according to its
4600 * type.
4601 */
4602 mptsas_process_intr(mpt, reply_desc_union);
4603
4604 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4605 mpt->m_post_index = 0;
4606 }
4607
4608 /*
4609 * Update the global reply index
4610 */
4611 ddi_put32(mpt->m_datap,
4612 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4613 mpt->m_polled_intr = 0;
4614
4615 /*
4616 * Re-enable interrupts and quit.
4617 */
4618 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4619 int_mask);
4620 return (TRUE);
4621
4622 }
4623
4624 /*
4625 * Clear polling flag, re-enable interrupts and quit.
4626 */
4627 mpt->m_polled_intr = 0;
4628 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4629 return (FALSE);
4630 }
4631
4632 static void
4633 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4634 pMpi2ReplyDescriptorsUnion_t reply_desc)
4635 {
4636 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4637 uint16_t SMID;
4638 mptsas_slots_t *slots = mpt->m_active;
4639 mptsas_cmd_t *cmd = NULL;
4640 struct scsi_pkt *pkt;
4641
4642 ASSERT(mutex_owned(&mpt->m_mutex));
4643
4644 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4645 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4646
4647 /*
4648 * This is a success reply so just complete the IO. First, do a sanity
4649 * check on the SMID. The final slot is used for TM requests, which
4650 * would not come into this reply handler.
4651 */
4652 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4653 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4654 SMID);
4655 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4656 return;
4657 }
4658
4659 cmd = slots->m_slot[SMID];
4660
4661 /*
4662 * print warning and return if the slot is empty
4663 */
4664 if (cmd == NULL) {
4665 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4666 "in slot %d", SMID);
4667 return;
4668 }
4669
4670 pkt = CMD2PKT(cmd);
4671 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4672 STATE_GOT_STATUS);
4673 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4674 pkt->pkt_state |= STATE_XFERRED_DATA;
4675 }
4676 pkt->pkt_resid = 0;
4677
4678 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4679 cmd->cmd_flags |= CFLAG_FINISHED;
4680 cv_broadcast(&mpt->m_passthru_cv);
4681 return;
4682 } else {
4683 mptsas_remove_cmd(mpt, cmd);
4684 }
4685
4686 if (cmd->cmd_flags & CFLAG_RETRY) {
4687 /*
4688 * The target returned QFULL or busy, do not add tihs
4689 * pkt to the doneq since the hba will retry
4690 * this cmd.
4691 *
4692 * The pkt has already been resubmitted in
4693 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4694 * Remove this cmd_flag here.
4695 */
4696 cmd->cmd_flags &= ~CFLAG_RETRY;
4697 } else {
4698 mptsas_doneq_add(mpt, cmd);
4699 }
4700 }
4701
4702 static void
4703 mptsas_handle_address_reply(mptsas_t *mpt,
4704 pMpi2ReplyDescriptorsUnion_t reply_desc)
4705 {
4706 pMpi2AddressReplyDescriptor_t address_reply;
4707 pMPI2DefaultReply_t reply;
4708 mptsas_fw_diagnostic_buffer_t *pBuffer;
4709 uint32_t reply_addr;
4710 uint16_t SMID, iocstatus;
4711 mptsas_slots_t *slots = mpt->m_active;
4712 mptsas_cmd_t *cmd = NULL;
4713 uint8_t function, buffer_type;
4714 m_replyh_arg_t *args;
4715 int reply_frame_no;
4716
4717 ASSERT(mutex_owned(&mpt->m_mutex));
4718
4719 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4720 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4721 &address_reply->ReplyFrameAddress);
4722 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4723
4724 /*
4725 * If reply frame is not in the proper range we should ignore this
4726 * message and exit the interrupt handler.
4727 */
4728 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4729 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4730 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4731 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4732 mpt->m_reply_frame_size != 0)) {
4733 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4734 "address 0x%x\n", reply_addr);
4735 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4736 return;
4737 }
4738
4739 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4740 DDI_DMA_SYNC_FORCPU);
4741 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4742 mpt->m_reply_frame_dma_addr));
4743 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4744
4745 /*
4746 * don't get slot information and command for events since these values
4747 * don't exist
4748 */
4749 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4750 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4751 /*
4752 * This could be a TM reply, which use the last allocated SMID,
4753 * so allow for that.
4754 */
4755 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4756 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4757 "%d\n", SMID);
4758 ddi_fm_service_impact(mpt->m_dip,
4759 DDI_SERVICE_UNAFFECTED);
4760 return;
4761 }
4762
4763 cmd = slots->m_slot[SMID];
4764
4765 /*
4766 * print warning and return if the slot is empty
4767 */
4768 if (cmd == NULL) {
4769 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4770 "reply in slot %d", SMID);
4771 return;
4772 }
4773 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4774 (cmd->cmd_flags & CFLAG_CONFIG) ||
4775 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4776 cmd->cmd_rfm = reply_addr;
4777 cmd->cmd_flags |= CFLAG_FINISHED;
4778 cv_broadcast(&mpt->m_passthru_cv);
4779 cv_broadcast(&mpt->m_config_cv);
4780 cv_broadcast(&mpt->m_fw_diag_cv);
4781 return;
4782 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4783 mptsas_remove_cmd(mpt, cmd);
4784 }
4785 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4786 }
4787 /*
4788 * Depending on the function, we need to handle
4789 * the reply frame (and cmd) differently.
4790 */
4791 switch (function) {
4792 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4793 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4794 break;
4795 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4796 cmd->cmd_rfm = reply_addr;
4797 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4798 cmd);
4799 break;
4800 case MPI2_FUNCTION_FW_DOWNLOAD:
4801 cmd->cmd_flags |= CFLAG_FINISHED;
4802 cv_signal(&mpt->m_fw_cv);
4803 break;
4804 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4805 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4806 mpt->m_reply_frame_size;
4807 args = &mpt->m_replyh_args[reply_frame_no];
4808 args->mpt = (void *)mpt;
4809 args->rfm = reply_addr;
4810
4811 /*
4812 * Record the event if its type is enabled in
4813 * this mpt instance by ioctl.
4814 */
4815 mptsas_record_event(args);
4816
4817 /*
4818 * Handle time critical events
4819 * NOT_RESPONDING/ADDED only now
4820 */
4821 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4822 /*
4823 * Would not return main process,
4824 * just let taskq resolve ack action
4825 * and ack would be sent in taskq thread
4826 */
4827 NDBG20(("send mptsas_handle_event_sync success"));
4828 }
4829 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4830 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4831 mptsas_log(mpt, CE_WARN, "No memory available"
4832 "for dispatch taskq");
4833 /*
4834 * Return the reply frame to the free queue.
4835 */
4836 ddi_put32(mpt->m_acc_free_queue_hdl,
4837 &((uint32_t *)(void *)
4838 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4839 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4840 DDI_DMA_SYNC_FORDEV);
4841 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4842 mpt->m_free_index = 0;
4843 }
4844
4845 ddi_put32(mpt->m_datap,
4846 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4847 }
4848 return;
4849 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4850 /*
4851 * If SMID is 0, this implies that the reply is due to a
4852 * release function with a status that the buffer has been
4853 * released. Set the buffer flags accordingly.
4854 */
4855 if (SMID == 0) {
4856 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4857 &reply->IOCStatus);
4858 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4859 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4860 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4861 pBuffer =
4862 &mpt->m_fw_diag_buffer_list[buffer_type];
4863 pBuffer->valid_data = TRUE;
4864 pBuffer->owned_by_firmware = FALSE;
4865 pBuffer->immediate = FALSE;
4866 }
4867 } else {
4868 /*
4869 * Normal handling of diag post reply with SMID.
4870 */
4871 cmd = slots->m_slot[SMID];
4872
4873 /*
4874 * print warning and return if the slot is empty
4875 */
4876 if (cmd == NULL) {
4877 mptsas_log(mpt, CE_WARN, "?NULL command for "
4878 "address reply in slot %d", SMID);
4879 return;
4880 }
4881 cmd->cmd_rfm = reply_addr;
4882 cmd->cmd_flags |= CFLAG_FINISHED;
4883 cv_broadcast(&mpt->m_fw_diag_cv);
4884 }
4885 return;
4886 default:
4887 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4888 break;
4889 }
4890
4891 /*
4892 * Return the reply frame to the free queue.
4893 */
4894 ddi_put32(mpt->m_acc_free_queue_hdl,
4895 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4896 reply_addr);
4897 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4898 DDI_DMA_SYNC_FORDEV);
4899 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4900 mpt->m_free_index = 0;
4901 }
4902 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4903 mpt->m_free_index);
4904
4905 if (cmd->cmd_flags & CFLAG_FW_CMD)
4906 return;
4907
4908 if (cmd->cmd_flags & CFLAG_RETRY) {
4909 /*
4910 * The target returned QFULL or busy, do not add tihs
4911 * pkt to the doneq since the hba will retry
4912 * this cmd.
4913 *
4914 * The pkt has already been resubmitted in
4915 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4916 * Remove this cmd_flag here.
4917 */
4918 cmd->cmd_flags &= ~CFLAG_RETRY;
4919 } else {
4920 mptsas_doneq_add(mpt, cmd);
4921 }
4922 }
4923
4924 static void
4925 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4926 mptsas_cmd_t *cmd)
4927 {
4928 uint8_t scsi_status, scsi_state;
4929 uint16_t ioc_status;
4930 uint32_t xferred, sensecount, responsedata, loginfo = 0;
4931 struct scsi_pkt *pkt;
4932 struct scsi_arq_status *arqstat;
4933 struct buf *bp;
4934 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
4935 uint8_t *sensedata = NULL;
4936
4937 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4938 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4939 bp = cmd->cmd_ext_arq_buf;
4940 } else {
4941 bp = cmd->cmd_arq_buf;
4942 }
4943
4944 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4945 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4946 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4947 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
4948 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
4949 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
4950 &reply->ResponseInfo);
4951
4952 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
4953 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
4954 &reply->IOCLogInfo);
4955 mptsas_log(mpt, CE_NOTE,
4956 "?Log info 0x%x received for target %d.\n"
4957 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4958 loginfo, Tgt(cmd), scsi_status, ioc_status,
4959 scsi_state);
4960 }
4961
4962 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4963 scsi_status, ioc_status, scsi_state));
4964
4965 pkt = CMD2PKT(cmd);
4966 *(pkt->pkt_scbp) = scsi_status;
4967
4968 if (loginfo == 0x31170000) {
4969 /*
4970 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4971 * 0x31170000 comes, that means the device missing delay
4972 * is in progressing, the command need retry later.
4973 */
4974 *(pkt->pkt_scbp) = STATUS_BUSY;
4975 return;
4976 }
4977
4978 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4979 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4980 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4981 pkt->pkt_reason = CMD_INCOMPLETE;
4982 pkt->pkt_state |= STATE_GOT_BUS;
4983 if (ptgt->m_reset_delay == 0) {
4984 mptsas_set_throttle(mpt, ptgt,
4985 DRAIN_THROTTLE);
4986 }
4987 return;
4988 }
4989
4990 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4991 responsedata &= 0x000000FF;
4992 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
4993 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
4994 pkt->pkt_reason = CMD_TLR_OFF;
4995 return;
4996 }
4997 }
4998
4999
5000 switch (scsi_status) {
5001 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5002 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5003 arqstat = (void*)(pkt->pkt_scbp);
5004 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5005 (pkt->pkt_scbp));
5006 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5007 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5008 if (cmd->cmd_flags & CFLAG_XARQ) {
5009 pkt->pkt_state |= STATE_XARQ_DONE;
5010 }
5011 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5012 pkt->pkt_state |= STATE_XFERRED_DATA;
5013 }
5014 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5015 arqstat->sts_rqpkt_state = pkt->pkt_state;
5016 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5017 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5018 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5019
5020 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5021 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5022 cmd->cmd_rqslen));
5023 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5024 cmd->cmd_flags |= CFLAG_CMDARQ;
5025 /*
5026 * Set proper status for pkt if autosense was valid
5027 */
5028 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5029 struct scsi_status zero_status = { 0 };
5030 arqstat->sts_rqpkt_status = zero_status;
5031 }
5032
5033 /*
5034 * ASC=0x47 is parity error
5035 * ASC=0x48 is initiator detected error received
5036 */
5037 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5038 ((scsi_sense_asc(sensedata) == 0x47) ||
5039 (scsi_sense_asc(sensedata) == 0x48))) {
5040 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5041 }
5042
5043 /*
5044 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5045 * ASC/ASCQ=0x25/0x00 means invalid lun
5046 */
5047 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5048 (scsi_sense_asc(sensedata) == 0x3F) &&
5049 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5050 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5051 (scsi_sense_asc(sensedata) == 0x25) &&
5052 (scsi_sense_ascq(sensedata) == 0x00))) {
5053 mptsas_topo_change_list_t *topo_node = NULL;
5054
5055 topo_node = kmem_zalloc(
5056 sizeof (mptsas_topo_change_list_t),
5057 KM_NOSLEEP);
5058 if (topo_node == NULL) {
5059 mptsas_log(mpt, CE_NOTE, "No memory"
5060 "resource for handle SAS dynamic"
5061 "reconfigure.\n");
5062 break;
5063 }
5064 topo_node->mpt = mpt;
5065 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5066 topo_node->un.phymask = ptgt->m_phymask;
5067 topo_node->devhdl = ptgt->m_devhdl;
5068 topo_node->object = (void *)ptgt;
5069 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5070
5071 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5072 mptsas_handle_dr,
5073 (void *)topo_node,
5074 DDI_NOSLEEP)) != DDI_SUCCESS) {
5075 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5076 "for handle SAS dynamic reconfigure"
5077 "failed. \n");
5078 }
5079 }
5080 break;
5081 case MPI2_SCSI_STATUS_GOOD:
5082 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5083 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5084 pkt->pkt_reason = CMD_DEV_GONE;
5085 pkt->pkt_state |= STATE_GOT_BUS;
5086 if (ptgt->m_reset_delay == 0) {
5087 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5088 }
5089 NDBG31(("lost disk for target%d, command:%x",
5090 Tgt(cmd), pkt->pkt_cdbp[0]));
5091 break;
5092 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5093 NDBG31(("data overrun: xferred=%d", xferred));
5094 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5095 pkt->pkt_reason = CMD_DATA_OVR;
5096 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5097 | STATE_SENT_CMD | STATE_GOT_STATUS
5098 | STATE_XFERRED_DATA);
5099 pkt->pkt_resid = 0;
5100 break;
5101 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5102 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5103 NDBG31(("data underrun: xferred=%d", xferred));
5104 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5105 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5106 | STATE_SENT_CMD | STATE_GOT_STATUS);
5107 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5108 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5109 pkt->pkt_state |= STATE_XFERRED_DATA;
5110 }
5111 break;
5112 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5113 mptsas_set_pkt_reason(mpt,
5114 cmd, CMD_RESET, STAT_BUS_RESET);
5115 break;
5116 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5117 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5118 mptsas_set_pkt_reason(mpt,
5119 cmd, CMD_RESET, STAT_DEV_RESET);
5120 break;
5121 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5122 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5123 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5124 mptsas_set_pkt_reason(mpt,
5125 cmd, CMD_TERMINATED, STAT_TERMINATED);
5126 break;
5127 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5128 case MPI2_IOCSTATUS_BUSY:
5129 /*
5130 * set throttles to drain
5131 */
5132 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5133 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5134 while (ptgt != NULL) {
5135 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5136
5137 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5138 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5139 }
5140
5141 /*
5142 * retry command
5143 */
5144 cmd->cmd_flags |= CFLAG_RETRY;
5145 cmd->cmd_pkt_flags |= FLAG_HEAD;
5146
5147 (void) mptsas_accept_pkt(mpt, cmd);
5148 break;
5149 default:
5150 mptsas_log(mpt, CE_WARN,
5151 "unknown ioc_status = %x\n", ioc_status);
5152 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5153 "count = %x, scsi_status = %x", scsi_state,
5154 xferred, scsi_status);
5155 break;
5156 }
5157 break;
5158 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5159 mptsas_handle_qfull(mpt, cmd);
5160 break;
5161 case MPI2_SCSI_STATUS_BUSY:
5162 NDBG31(("scsi_status busy received"));
5163 break;
5164 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5165 NDBG31(("scsi_status reservation conflict received"));
5166 break;
5167 default:
5168 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5169 scsi_status, ioc_status);
5170 mptsas_log(mpt, CE_WARN,
5171 "mptsas_process_intr: invalid scsi status\n");
5172 break;
5173 }
5174 }
5175
5176 static void
5177 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5178 mptsas_cmd_t *cmd)
5179 {
5180 uint8_t task_type;
5181 uint16_t ioc_status;
5182 uint32_t log_info;
5183 uint16_t dev_handle;
5184 struct scsi_pkt *pkt = CMD2PKT(cmd);
5185
5186 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5187 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5188 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5189 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5190
5191 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5192 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5193 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5194 task_type, ioc_status, log_info, dev_handle);
5195 pkt->pkt_reason = CMD_INCOMPLETE;
5196 return;
5197 }
5198
5199 switch (task_type) {
5200 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5201 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5202 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5203 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5204 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5205 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5206 break;
5207 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5208 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5209 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5210 /*
5211 * Check for invalid DevHandle of 0 in case application
5212 * sends bad command. DevHandle of 0 could cause problems.
5213 */
5214 if (dev_handle == 0) {
5215 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5216 " DevHandle of 0.");
5217 } else {
5218 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5219 task_type);
5220 }
5221 break;
5222 default:
5223 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5224 task_type);
5225 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5226 break;
5227 }
5228 }
5229
5230 static void
5231 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5232 {
5233 mptsas_t *mpt = arg->mpt;
5234 uint64_t t = arg->t;
5235 mptsas_cmd_t *cmd;
5236 struct scsi_pkt *pkt;
5237 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5238
5239 mutex_enter(&item->mutex);
5240 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5241 if (!item->doneq) {
5242 cv_wait(&item->cv, &item->mutex);
5243 }
5244 pkt = NULL;
5245 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5246 cmd->cmd_flags |= CFLAG_COMPLETED;
5247 pkt = CMD2PKT(cmd);
5248 }
5249 mutex_exit(&item->mutex);
5250 if (pkt) {
5251 mptsas_pkt_comp(pkt, cmd);
5252 }
5253 mutex_enter(&item->mutex);
5254 }
5255 mutex_exit(&item->mutex);
5256 mutex_enter(&mpt->m_doneq_mutex);
5257 mpt->m_doneq_thread_n--;
5258 cv_broadcast(&mpt->m_doneq_thread_cv);
5259 mutex_exit(&mpt->m_doneq_mutex);
5260 }
5261
5262
5263 /*
5264 * mpt interrupt handler.
5265 */
5266 static uint_t
5267 mptsas_intr(caddr_t arg1, caddr_t arg2)
5268 {
5269 mptsas_t *mpt = (void *)arg1;
5270 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5271 uchar_t did_reply = FALSE;
5272
5273 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5274
5275 mutex_enter(&mpt->m_mutex);
5276
5277 /*
5278 * If interrupts are shared by two channels then check whether this
5279 * interrupt is genuinely for this channel by making sure first the
5280 * chip is in high power state.
5281 */
5282 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5283 (mpt->m_power_level != PM_LEVEL_D0)) {
5284 mutex_exit(&mpt->m_mutex);
5285 return (DDI_INTR_UNCLAIMED);
5286 }
5287
5288 /*
5289 * If polling, interrupt was triggered by some shared interrupt because
5290 * IOC interrupts are disabled during polling, so polling routine will
5291 * handle any replies. Considering this, if polling is happening,
5292 * return with interrupt unclaimed.
5293 */
5294 if (mpt->m_polled_intr) {
5295 mutex_exit(&mpt->m_mutex);
5296 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5297 return (DDI_INTR_UNCLAIMED);
5298 }
5299
5300 /*
5301 * Read the istat register.
5302 */
5303 if ((INTPENDING(mpt)) != 0) {
5304 /*
5305 * read fifo until empty.
5306 */
5307 #ifndef __lock_lint
5308 _NOTE(CONSTCOND)
5309 #endif
5310 while (TRUE) {
5311 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5312 DDI_DMA_SYNC_FORCPU);
5313 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5314 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5315
5316 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5317 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5318 ddi_get32(mpt->m_acc_post_queue_hdl,
5319 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5320 break;
5321 }
5322
5323 /*
5324 * The reply is valid, process it according to its
5325 * type. Also, set a flag for updating the reply index
5326 * after they've all been processed.
5327 */
5328 did_reply = TRUE;
5329
5330 mptsas_process_intr(mpt, reply_desc_union);
5331
5332 /*
5333 * Increment post index and roll over if needed.
5334 */
5335 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5336 mpt->m_post_index = 0;
5337 }
5338 }
5339
5340 /*
5341 * Update the global reply index if at least one reply was
5342 * processed.
5343 */
5344 if (did_reply) {
5345 ddi_put32(mpt->m_datap,
5346 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5347 }
5348 } else {
5349 mutex_exit(&mpt->m_mutex);
5350 return (DDI_INTR_UNCLAIMED);
5351 }
5352 NDBG1(("mptsas_intr complete"));
5353
5354 /*
5355 * If no helper threads are created, process the doneq in ISR. If
5356 * helpers are created, use the doneq length as a metric to measure the
5357 * load on the interrupt CPU. If it is long enough, which indicates the
5358 * load is heavy, then we deliver the IO completions to the helpers.
5359 * This measurement has some limitations, although it is simple and
5360 * straightforward and works well for most of the cases at present.
5361 */
5362 if (!mpt->m_doneq_thread_n ||
5363 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5364 mptsas_doneq_empty(mpt);
5365 } else {
5366 mptsas_deliver_doneq_thread(mpt);
5367 }
5368
5369 /*
5370 * If there are queued cmd, start them now.
5371 */
5372 if (mpt->m_waitq != NULL) {
5373 mptsas_restart_waitq(mpt);
5374 }
5375
5376 mutex_exit(&mpt->m_mutex);
5377 return (DDI_INTR_CLAIMED);
5378 }
5379
5380 static void
5381 mptsas_process_intr(mptsas_t *mpt,
5382 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5383 {
5384 uint8_t reply_type;
5385
5386 ASSERT(mutex_owned(&mpt->m_mutex));
5387
5388 /*
5389 * The reply is valid, process it according to its
5390 * type. Also, set a flag for updated the reply index
5391 * after they've all been processed.
5392 */
5393 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5394 &reply_desc_union->Default.ReplyFlags);
5395 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5396 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5397 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5398 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5399 mptsas_handle_address_reply(mpt, reply_desc_union);
5400 } else {
5401 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5402 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5403 }
5404
5405 /*
5406 * Clear the reply descriptor for re-use and increment
5407 * index.
5408 */
5409 ddi_put64(mpt->m_acc_post_queue_hdl,
5410 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5411 0xFFFFFFFFFFFFFFFF);
5412 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5413 DDI_DMA_SYNC_FORDEV);
5414 }
5415
5416 /*
5417 * handle qfull condition
5418 */
5419 static void
5420 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5421 {
5422 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5423
5424 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5425 (ptgt->m_qfull_retries == 0)) {
5426 /*
5427 * We have exhausted the retries on QFULL, or,
5428 * the target driver has indicated that it
5429 * wants to handle QFULL itself by setting
5430 * qfull-retries capability to 0. In either case
5431 * we want the target driver's QFULL handling
5432 * to kick in. We do this by having pkt_reason
5433 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5434 */
5435 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5436 } else {
5437 if (ptgt->m_reset_delay == 0) {
5438 ptgt->m_t_throttle =
5439 max((ptgt->m_t_ncmds - 2), 0);
5440 }
5441
5442 cmd->cmd_pkt_flags |= FLAG_HEAD;
5443 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5444 cmd->cmd_flags |= CFLAG_RETRY;
5445
5446 (void) mptsas_accept_pkt(mpt, cmd);
5447
5448 /*
5449 * when target gives queue full status with no commands
5450 * outstanding (m_t_ncmds == 0), throttle is set to 0
5451 * (HOLD_THROTTLE), and the queue full handling start
5452 * (see psarc/1994/313); if there are commands outstanding,
5453 * throttle is set to (m_t_ncmds - 2)
5454 */
5455 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5456 /*
5457 * By setting throttle to QFULL_THROTTLE, we
5458 * avoid submitting new commands and in
5459 * mptsas_restart_cmd find out slots which need
5460 * their throttles to be cleared.
5461 */
5462 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5463 if (mpt->m_restart_cmd_timeid == 0) {
5464 mpt->m_restart_cmd_timeid =
5465 timeout(mptsas_restart_cmd, mpt,
5466 ptgt->m_qfull_retry_interval);
5467 }
5468 }
5469 }
5470 }
5471
5472 mptsas_phymask_t
5473 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5474 {
5475 mptsas_phymask_t phy_mask = 0;
5476 uint8_t i = 0;
5477
5478 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5479
5480 ASSERT(mutex_owned(&mpt->m_mutex));
5481
5482 /*
5483 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5484 */
5485 if (physport == 0xFF) {
5486 return (0);
5487 }
5488
5489 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5490 if (mpt->m_phy_info[i].attached_devhdl &&
5491 (mpt->m_phy_info[i].phy_mask != 0) &&
5492 (mpt->m_phy_info[i].port_num == physport)) {
5493 phy_mask = mpt->m_phy_info[i].phy_mask;
5494 break;
5495 }
5496 }
5497 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5498 mpt->m_instance, physport, phy_mask));
5499 return (phy_mask);
5500 }
5501
5502 /*
5503 * mpt free device handle after device gone, by use of passthrough
5504 */
5505 static int
5506 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5507 {
5508 Mpi2SasIoUnitControlRequest_t req;
5509 Mpi2SasIoUnitControlReply_t rep;
5510 int ret;
5511
5512 ASSERT(mutex_owned(&mpt->m_mutex));
5513
5514 /*
5515 * Need to compose a SAS IO Unit Control request message
5516 * and call mptsas_do_passthru() function
5517 */
5518 bzero(&req, sizeof (req));
5519 bzero(&rep, sizeof (rep));
5520
5521 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5522 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5523 req.DevHandle = LE_16(devhdl);
5524
5525 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5526 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5527 if (ret != 0) {
5528 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5529 "Control error %d", ret);
5530 return (DDI_FAILURE);
5531 }
5532
5533 /* do passthrough success, check the ioc status */
5534 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5535 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5536 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5537 return (DDI_FAILURE);
5538 }
5539
5540 return (DDI_SUCCESS);
5541 }
5542
5543 static void
5544 mptsas_update_phymask(mptsas_t *mpt)
5545 {
5546 mptsas_phymask_t mask = 0, phy_mask;
5547 char *phy_mask_name;
5548 uint8_t current_port;
5549 int i, j;
5550
5551 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5552
5553 ASSERT(mutex_owned(&mpt->m_mutex));
5554
5555 (void) mptsas_get_sas_io_unit_page(mpt);
5556
5557 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5558
5559 for (i = 0; i < mpt->m_num_phys; i++) {
5560 phy_mask = 0x00;
5561
5562 if (mpt->m_phy_info[i].attached_devhdl == 0)
5563 continue;
5564
5565 bzero(phy_mask_name, sizeof (phy_mask_name));
5566
5567 current_port = mpt->m_phy_info[i].port_num;
5568
5569 if ((mask & (1 << i)) != 0)
5570 continue;
5571
5572 for (j = 0; j < mpt->m_num_phys; j++) {
5573 if (mpt->m_phy_info[j].attached_devhdl &&
5574 (mpt->m_phy_info[j].port_num == current_port)) {
5575 phy_mask |= (1 << j);
5576 }
5577 }
5578 mask = mask | phy_mask;
5579
5580 for (j = 0; j < mpt->m_num_phys; j++) {
5581 if ((phy_mask >> j) & 0x01) {
5582 mpt->m_phy_info[j].phy_mask = phy_mask;
5583 }
5584 }
5585
5586 (void) sprintf(phy_mask_name, "%x", phy_mask);
5587
5588 mutex_exit(&mpt->m_mutex);
5589 /*
5590 * register a iport, if the port has already been existed
5591 * SCSA will do nothing and just return.
5592 */
5593 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5594 mutex_enter(&mpt->m_mutex);
5595 }
5596 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5597 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5598 }
5599
5600 /*
5601 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5602 * 1. Directly attched Device Added/Removed.
5603 * 2. Expander Device Added/Removed.
5604 * 3. Indirectly Attached Device Added/Expander.
5605 * 4. LUNs of a existing device status change.
5606 * 5. RAID volume created/deleted.
5607 * 6. Member of RAID volume is released because of RAID deletion.
5608 * 7. Physical disks are removed because of RAID creation.
5609 */
5610 static void
5611 mptsas_handle_dr(void *args) {
5612 mptsas_topo_change_list_t *topo_node = NULL;
5613 mptsas_topo_change_list_t *save_node = NULL;
5614 mptsas_t *mpt;
5615 dev_info_t *parent = NULL;
5616 mptsas_phymask_t phymask = 0;
5617 char *phy_mask_name;
5618 uint8_t flags = 0, physport = 0xff;
5619 uint8_t port_update = 0;
5620 uint_t event;
5621
5622 topo_node = (mptsas_topo_change_list_t *)args;
5623
5624 mpt = topo_node->mpt;
5625 event = topo_node->event;
5626 flags = topo_node->flags;
5627
5628 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5629
5630 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5631
5632 switch (event) {
5633 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5634 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5635 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5636 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5637 /*
5638 * Direct attached or expander attached device added
5639 * into system or a Phys Disk that is being unhidden.
5640 */
5641 port_update = 1;
5642 }
5643 break;
5644 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5645 /*
5646 * New expander added into system, it must be the head
5647 * of topo_change_list_t
5648 */
5649 port_update = 1;
5650 break;
5651 default:
5652 port_update = 0;
5653 break;
5654 }
5655 /*
5656 * All cases port_update == 1 may cause initiator port form change
5657 */
5658 mutex_enter(&mpt->m_mutex);
5659 if (mpt->m_port_chng && port_update) {
5660 /*
5661 * mpt->m_port_chng flag indicates some PHYs of initiator
5662 * port have changed to online. So when expander added or
5663 * directly attached device online event come, we force to
5664 * update port information by issueing SAS IO Unit Page and
5665 * update PHYMASKs.
5666 */
5667 (void) mptsas_update_phymask(mpt);
5668 mpt->m_port_chng = 0;
5669
5670 }
5671 mutex_exit(&mpt->m_mutex);
5672 while (topo_node) {
5673 phymask = 0;
5674 if (parent == NULL) {
5675 physport = topo_node->un.physport;
5676 event = topo_node->event;
5677 flags = topo_node->flags;
5678 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5679 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5680 /*
5681 * For all offline events, phymask is known
5682 */
5683 phymask = topo_node->un.phymask;
5684 goto find_parent;
5685 }
5686 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5687 goto handle_topo_change;
5688 }
5689 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5690 phymask = topo_node->un.phymask;
5691 goto find_parent;
5692 }
5693
5694 if ((flags ==
5695 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5696 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5697 /*
5698 * There is no any field in IR_CONFIG_CHANGE
5699 * event indicate physport/phynum, let's get
5700 * parent after SAS Device Page0 request.
5701 */
5702 goto handle_topo_change;
5703 }
5704
5705 mutex_enter(&mpt->m_mutex);
5706 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5707 /*
5708 * If the direct attached device added or a
5709 * phys disk is being unhidden, argument
5710 * physport actually is PHY#, so we have to get
5711 * phymask according PHY#.
5712 */
5713 physport = mpt->m_phy_info[physport].port_num;
5714 }
5715
5716 /*
5717 * Translate physport to phymask so that we can search
5718 * parent dip.
5719 */
5720 phymask = mptsas_physport_to_phymask(mpt,
5721 physport);
5722 mutex_exit(&mpt->m_mutex);
5723
5724 find_parent:
5725 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5726 /*
5727 * For RAID topology change node, write the iport name
5728 * as v0.
5729 */
5730 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5731 (void) sprintf(phy_mask_name, "v0");
5732 } else {
5733 /*
5734 * phymask can bo 0 if the drive has been
5735 * pulled by the time an add event is
5736 * processed. If phymask is 0, just skip this
5737 * event and continue.
5738 */
5739 if (phymask == 0) {
5740 mutex_enter(&mpt->m_mutex);
5741 save_node = topo_node;
5742 topo_node = topo_node->next;
5743 ASSERT(save_node);
5744 kmem_free(save_node,
5745 sizeof (mptsas_topo_change_list_t));
5746 mutex_exit(&mpt->m_mutex);
5747
5748 parent = NULL;
5749 continue;
5750 }
5751 (void) sprintf(phy_mask_name, "%x", phymask);
5752 }
5753 parent = scsi_hba_iport_find(mpt->m_dip,
5754 phy_mask_name);
5755 if (parent == NULL) {
5756 mptsas_log(mpt, CE_WARN, "Failed to find an "
5757 "iport, should not happen!");
5758 goto out;
5759 }
5760
5761 }
5762 ASSERT(parent);
5763 handle_topo_change:
5764
5765 mutex_enter(&mpt->m_mutex);
5766
5767 mptsas_handle_topo_change(topo_node, parent);
5768 save_node = topo_node;
5769 topo_node = topo_node->next;
5770 ASSERT(save_node);
5771 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5772 mutex_exit(&mpt->m_mutex);
5773
5774 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5775 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5776 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5777 /*
5778 * If direct attached device associated, make sure
5779 * reset the parent before start the next one. But
5780 * all devices associated with expander shares the
5781 * parent. Also, reset parent if this is for RAID.
5782 */
5783 parent = NULL;
5784 }
5785 }
5786 out:
5787 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5788 }
5789
5790 static void
5791 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5792 dev_info_t *parent)
5793 {
5794 mptsas_target_t *ptgt = NULL;
5795 mptsas_smp_t *psmp = NULL;
5796 mptsas_t *mpt = (void *)topo_node->mpt;
5797 uint16_t devhdl;
5798 uint16_t attached_devhdl;
5799 uint64_t sas_wwn = 0;
5800 int rval = 0;
5801 uint32_t page_address;
5802 uint8_t phy, flags;
5803 char *addr = NULL;
5804 dev_info_t *lundip;
5805 int circ = 0, circ1 = 0;
5806 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5807
5808 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5809
5810 ASSERT(mutex_owned(&mpt->m_mutex));
5811
5812 switch (topo_node->event) {
5813 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5814 {
5815 char *phy_mask_name;
5816 mptsas_phymask_t phymask = 0;
5817
5818 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5819 /*
5820 * Get latest RAID info.
5821 */
5822 (void) mptsas_get_raid_info(mpt);
5823 ptgt = mptsas_search_by_devhdl(
5824 &mpt->m_active->m_tgttbl, topo_node->devhdl);
5825 if (ptgt == NULL)
5826 break;
5827 } else {
5828 ptgt = (void *)topo_node->object;
5829 }
5830
5831 if (ptgt == NULL) {
5832 /*
5833 * If a Phys Disk was deleted, RAID info needs to be
5834 * updated to reflect the new topology.
5835 */
5836 (void) mptsas_get_raid_info(mpt);
5837
5838 /*
5839 * Get sas device page 0 by DevHandle to make sure if
5840 * SSP/SATA end device exist.
5841 */
5842 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5843 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5844 topo_node->devhdl;
5845
5846 rval = mptsas_get_target_device_info(mpt, page_address,
5847 &devhdl, &ptgt);
5848 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5849 mptsas_log(mpt, CE_NOTE,
5850 "mptsas_handle_topo_change: target %d is "
5851 "not a SAS/SATA device. \n",
5852 topo_node->devhdl);
5853 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5854 mptsas_log(mpt, CE_NOTE,
5855 "mptsas_handle_topo_change: could not "
5856 "allocate memory. \n");
5857 }
5858 /*
5859 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5860 * else to do, just leave.
5861 */
5862 if (rval != DEV_INFO_SUCCESS) {
5863 return;
5864 }
5865 }
5866
5867 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5868
5869 mutex_exit(&mpt->m_mutex);
5870 flags = topo_node->flags;
5871
5872 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5873 phymask = ptgt->m_phymask;
5874 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5875 (void) sprintf(phy_mask_name, "%x", phymask);
5876 parent = scsi_hba_iport_find(mpt->m_dip,
5877 phy_mask_name);
5878 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5879 if (parent == NULL) {
5880 mptsas_log(mpt, CE_WARN, "Failed to find a "
5881 "iport for PD, should not happen!");
5882 mutex_enter(&mpt->m_mutex);
5883 break;
5884 }
5885 }
5886
5887 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5888 ndi_devi_enter(parent, &circ1);
5889 (void) mptsas_config_raid(parent, topo_node->devhdl,
5890 &lundip);
5891 ndi_devi_exit(parent, circ1);
5892 } else {
5893 /*
5894 * hold nexus for bus configure
5895 */
5896 ndi_devi_enter(scsi_vhci_dip, &circ);
5897 ndi_devi_enter(parent, &circ1);
5898 rval = mptsas_config_target(parent, ptgt);
5899 /*
5900 * release nexus for bus configure
5901 */
5902 ndi_devi_exit(parent, circ1);
5903 ndi_devi_exit(scsi_vhci_dip, circ);
5904
5905 /*
5906 * Add parent's props for SMHBA support
5907 */
5908 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5909 bzero(attached_wwnstr,
5910 sizeof (attached_wwnstr));
5911 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
5912 ptgt->m_sas_wwn);
5913 if (ddi_prop_update_string(DDI_DEV_T_NONE,
5914 parent,
5915 SCSI_ADDR_PROP_ATTACHED_PORT,
5916 attached_wwnstr)
5917 != DDI_PROP_SUCCESS) {
5918 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5919 parent,
5920 SCSI_ADDR_PROP_ATTACHED_PORT);
5921 mptsas_log(mpt, CE_WARN, "Failed to"
5922 "attached-port props");
5923 return;
5924 }
5925 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5926 MPTSAS_NUM_PHYS, 1) !=
5927 DDI_PROP_SUCCESS) {
5928 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5929 parent, MPTSAS_NUM_PHYS);
5930 mptsas_log(mpt, CE_WARN, "Failed to"
5931 " create num-phys props");
5932 return;
5933 }
5934
5935 /*
5936 * Update PHY info for smhba
5937 */
5938 mutex_enter(&mpt->m_mutex);
5939 if (mptsas_smhba_phy_init(mpt)) {
5940 mutex_exit(&mpt->m_mutex);
5941 mptsas_log(mpt, CE_WARN, "mptsas phy"
5942 " update failed");
5943 return;
5944 }
5945 mutex_exit(&mpt->m_mutex);
5946 mptsas_smhba_set_phy_props(mpt,
5947 ddi_get_name_addr(parent), parent,
5948 1, &attached_devhdl);
5949 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5950 MPTSAS_VIRTUAL_PORT, 0) !=
5951 DDI_PROP_SUCCESS) {
5952 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5953 parent, MPTSAS_VIRTUAL_PORT);
5954 mptsas_log(mpt, CE_WARN,
5955 "mptsas virtual-port"
5956 "port prop update failed");
5957 return;
5958 }
5959 }
5960 }
5961 mutex_enter(&mpt->m_mutex);
5962
5963 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5964 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5965 ptgt->m_phymask));
5966 break;
5967 }
5968 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5969 {
5970 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5971 devhdl = topo_node->devhdl;
5972 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
5973 if (ptgt == NULL)
5974 break;
5975
5976 sas_wwn = ptgt->m_sas_wwn;
5977 phy = ptgt->m_phynum;
5978
5979 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5980
5981 if (sas_wwn) {
5982 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
5983 } else {
5984 (void) sprintf(addr, "p%x", phy);
5985 }
5986 ASSERT(ptgt->m_devhdl == devhdl);
5987
5988 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
5989 (topo_node->flags ==
5990 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5991 /*
5992 * Get latest RAID info if RAID volume status changes
5993 * or Phys Disk status changes
5994 */
5995 (void) mptsas_get_raid_info(mpt);
5996 }
5997 /*
5998 * Abort all outstanding command on the device
5999 */
6000 rval = mptsas_do_scsi_reset(mpt, devhdl);
6001 if (rval) {
6002 NDBG20(("mptsas%d handle_topo_change to reset target "
6003 "before offline devhdl:%x, phymask:%x, rval:%x",
6004 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6005 rval));
6006 }
6007
6008 mutex_exit(&mpt->m_mutex);
6009
6010 ndi_devi_enter(scsi_vhci_dip, &circ);
6011 ndi_devi_enter(parent, &circ1);
6012 rval = mptsas_offline_target(parent, addr);
6013 ndi_devi_exit(parent, circ1);
6014 ndi_devi_exit(scsi_vhci_dip, circ);
6015 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6016 "phymask:%x, rval:%x", mpt->m_instance,
6017 ptgt->m_devhdl, ptgt->m_phymask, rval));
6018
6019 kmem_free(addr, SCSI_MAXNAMELEN);
6020
6021 /*
6022 * Clear parent's props for SMHBA support
6023 */
6024 flags = topo_node->flags;
6025 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6026 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6027 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6028 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6029 DDI_PROP_SUCCESS) {
6030 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6031 SCSI_ADDR_PROP_ATTACHED_PORT);
6032 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6033 "prop update failed");
6034 break;
6035 }
6036 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6037 MPTSAS_NUM_PHYS, 0) !=
6038 DDI_PROP_SUCCESS) {
6039 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6040 MPTSAS_NUM_PHYS);
6041 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6042 "prop update failed");
6043 break;
6044 }
6045 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6046 MPTSAS_VIRTUAL_PORT, 1) !=
6047 DDI_PROP_SUCCESS) {
6048 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6049 MPTSAS_VIRTUAL_PORT);
6050 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6051 "prop update failed");
6052 break;
6053 }
6054 }
6055
6056 mutex_enter(&mpt->m_mutex);
6057 if (mptsas_set_led_status(mpt, ptgt, 0) != DDI_SUCCESS) {
6058 NDBG14(("mptsas: clear LED for tgt %x failed",
6059 ptgt->m_slot_num));
6060 }
6061 if (rval == DDI_SUCCESS) {
6062 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6063 ptgt->m_sas_wwn, ptgt->m_phymask);
6064 ptgt = NULL;
6065 } else {
6066 /*
6067 * clean DR_INTRANSITION flag to allow I/O down to
6068 * PHCI driver since failover finished.
6069 * Invalidate the devhdl
6070 */
6071 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6072 ptgt->m_tgt_unconfigured = 0;
6073 mutex_enter(&mpt->m_tx_waitq_mutex);
6074 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6075 mutex_exit(&mpt->m_tx_waitq_mutex);
6076 }
6077
6078 /*
6079 * Send SAS IO Unit Control to free the dev handle
6080 */
6081 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6082 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6083 rval = mptsas_free_devhdl(mpt, devhdl);
6084
6085 NDBG20(("mptsas%d handle_topo_change to remove "
6086 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6087 rval));
6088 }
6089
6090 break;
6091 }
6092 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6093 {
6094 devhdl = topo_node->devhdl;
6095 /*
6096 * If this is the remove handle event, do a reset first.
6097 */
6098 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6099 rval = mptsas_do_scsi_reset(mpt, devhdl);
6100 if (rval) {
6101 NDBG20(("mpt%d reset target before remove "
6102 "devhdl:%x, rval:%x", mpt->m_instance,
6103 devhdl, rval));
6104 }
6105 }
6106
6107 /*
6108 * Send SAS IO Unit Control to free the dev handle
6109 */
6110 rval = mptsas_free_devhdl(mpt, devhdl);
6111 NDBG20(("mptsas%d handle_topo_change to remove "
6112 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6113 rval));
6114 break;
6115 }
6116 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6117 {
6118 mptsas_smp_t smp;
6119 dev_info_t *smpdip;
6120 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6121
6122 devhdl = topo_node->devhdl;
6123
6124 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6125 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6126 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6127 if (rval != DDI_SUCCESS) {
6128 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6129 "handle %x", devhdl);
6130 return;
6131 }
6132
6133 psmp = mptsas_smp_alloc(smptbl, &smp);
6134 if (psmp == NULL) {
6135 return;
6136 }
6137
6138 mutex_exit(&mpt->m_mutex);
6139 ndi_devi_enter(parent, &circ1);
6140 (void) mptsas_online_smp(parent, psmp, &smpdip);
6141 ndi_devi_exit(parent, circ1);
6142
6143 mutex_enter(&mpt->m_mutex);
6144 break;
6145 }
6146 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6147 {
6148 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6149 devhdl = topo_node->devhdl;
6150 uint32_t dev_info;
6151
6152 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6153 if (psmp == NULL)
6154 break;
6155 /*
6156 * The mptsas_smp_t data is released only if the dip is offlined
6157 * successfully.
6158 */
6159 mutex_exit(&mpt->m_mutex);
6160
6161 ndi_devi_enter(parent, &circ1);
6162 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6163 ndi_devi_exit(parent, circ1);
6164
6165 dev_info = psmp->m_deviceinfo;
6166 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6167 DEVINFO_DIRECT_ATTACHED) {
6168 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6169 MPTSAS_VIRTUAL_PORT, 1) !=
6170 DDI_PROP_SUCCESS) {
6171 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6172 MPTSAS_VIRTUAL_PORT);
6173 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6174 "prop update failed");
6175 return;
6176 }
6177 /*
6178 * Check whether the smp connected to the iport,
6179 */
6180 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6181 MPTSAS_NUM_PHYS, 0) !=
6182 DDI_PROP_SUCCESS) {
6183 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6184 MPTSAS_NUM_PHYS);
6185 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6186 "prop update failed");
6187 return;
6188 }
6189 /*
6190 * Clear parent's attached-port props
6191 */
6192 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6193 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6194 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6195 DDI_PROP_SUCCESS) {
6196 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6197 SCSI_ADDR_PROP_ATTACHED_PORT);
6198 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6199 "prop update failed");
6200 return;
6201 }
6202 }
6203
6204 mutex_enter(&mpt->m_mutex);
6205 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6206 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6207 if (rval == DDI_SUCCESS) {
6208 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6209 psmp->m_phymask);
6210 } else {
6211 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6212 }
6213
6214 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6215
6216 break;
6217 }
6218 default:
6219 return;
6220 }
6221 }
6222
6223 /*
6224 * Record the event if its type is enabled in mpt instance by ioctl.
6225 */
6226 static void
6227 mptsas_record_event(void *args)
6228 {
6229 m_replyh_arg_t *replyh_arg;
6230 pMpi2EventNotificationReply_t eventreply;
6231 uint32_t event, rfm;
6232 mptsas_t *mpt;
6233 int i, j;
6234 uint16_t event_data_len;
6235 boolean_t sendAEN = FALSE;
6236
6237 replyh_arg = (m_replyh_arg_t *)args;
6238 rfm = replyh_arg->rfm;
6239 mpt = replyh_arg->mpt;
6240
6241 eventreply = (pMpi2EventNotificationReply_t)
6242 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6243 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6244
6245
6246 /*
6247 * Generate a system event to let anyone who cares know that a
6248 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6249 * event mask is set to.
6250 */
6251 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6252 sendAEN = TRUE;
6253 }
6254
6255 /*
6256 * Record the event only if it is not masked. Determine which dword
6257 * and bit of event mask to test.
6258 */
6259 i = (uint8_t)(event / 32);
6260 j = (uint8_t)(event % 32);
6261 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6262 i = mpt->m_event_index;
6263 mpt->m_events[i].Type = event;
6264 mpt->m_events[i].Number = ++mpt->m_event_number;
6265 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6266 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6267 &eventreply->EventDataLength);
6268
6269 if (event_data_len > 0) {
6270 /*
6271 * Limit data to size in m_event entry
6272 */
6273 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6274 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6275 }
6276 for (j = 0; j < event_data_len; j++) {
6277 mpt->m_events[i].Data[j] =
6278 ddi_get32(mpt->m_acc_reply_frame_hdl,
6279 &(eventreply->EventData[j]));
6280 }
6281
6282 /*
6283 * check for index wrap-around
6284 */
6285 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6286 i = 0;
6287 }
6288 mpt->m_event_index = (uint8_t)i;
6289
6290 /*
6291 * Set flag to send the event.
6292 */
6293 sendAEN = TRUE;
6294 }
6295 }
6296
6297 /*
6298 * Generate a system event if flag is set to let anyone who cares know
6299 * that an event has occurred.
6300 */
6301 if (sendAEN) {
6302 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6303 "SAS", NULL, NULL, DDI_NOSLEEP);
6304 }
6305 }
6306
6307 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6308 /*
6309 * handle sync events from ioc in interrupt
6310 * return value:
6311 * DDI_SUCCESS: The event is handled by this func
6312 * DDI_FAILURE: Event is not handled
6313 */
6314 static int
6315 mptsas_handle_event_sync(void *args)
6316 {
6317 m_replyh_arg_t *replyh_arg;
6318 pMpi2EventNotificationReply_t eventreply;
6319 uint32_t event, rfm;
6320 mptsas_t *mpt;
6321 uint_t iocstatus;
6322
6323 replyh_arg = (m_replyh_arg_t *)args;
6324 rfm = replyh_arg->rfm;
6325 mpt = replyh_arg->mpt;
6326
6327 ASSERT(mutex_owned(&mpt->m_mutex));
6328
6329 eventreply = (pMpi2EventNotificationReply_t)
6330 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6331 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6332
6333 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6334 &eventreply->IOCStatus)) {
6335 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6336 mptsas_log(mpt, CE_WARN,
6337 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6338 "IOCLogInfo=0x%x", iocstatus,
6339 ddi_get32(mpt->m_acc_reply_frame_hdl,
6340 &eventreply->IOCLogInfo));
6341 } else {
6342 mptsas_log(mpt, CE_WARN,
6343 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6344 "IOCLogInfo=0x%x", iocstatus,
6345 ddi_get32(mpt->m_acc_reply_frame_hdl,
6346 &eventreply->IOCLogInfo));
6347 }
6348 }
6349
6350 /*
6351 * figure out what kind of event we got and handle accordingly
6352 */
6353 switch (event) {
6354 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6355 {
6356 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6357 uint8_t num_entries, expstatus, phy;
6358 uint8_t phystatus, physport, state, i;
6359 uint8_t start_phy_num, link_rate;
6360 uint16_t dev_handle, reason_code;
6361 uint16_t enc_handle, expd_handle;
6362 char string[80], curr[80], prev[80];
6363 mptsas_topo_change_list_t *topo_head = NULL;
6364 mptsas_topo_change_list_t *topo_tail = NULL;
6365 mptsas_topo_change_list_t *topo_node = NULL;
6366 mptsas_target_t *ptgt;
6367 mptsas_smp_t *psmp;
6368 mptsas_hash_table_t *tgttbl, *smptbl;
6369 uint8_t flags = 0, exp_flag;
6370 smhba_info_t *pSmhba = NULL;
6371
6372 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6373
6374 tgttbl = &mpt->m_active->m_tgttbl;
6375 smptbl = &mpt->m_active->m_smptbl;
6376
6377 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6378 eventreply->EventData;
6379
6380 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6381 &sas_topo_change_list->EnclosureHandle);
6382 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6383 &sas_topo_change_list->ExpanderDevHandle);
6384 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6385 &sas_topo_change_list->NumEntries);
6386 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6387 &sas_topo_change_list->StartPhyNum);
6388 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6389 &sas_topo_change_list->ExpStatus);
6390 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6391 &sas_topo_change_list->PhysicalPort);
6392
6393 string[0] = 0;
6394 if (expd_handle) {
6395 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6396 switch (expstatus) {
6397 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6398 (void) sprintf(string, " added");
6399 /*
6400 * New expander device added
6401 */
6402 mpt->m_port_chng = 1;
6403 topo_node = kmem_zalloc(
6404 sizeof (mptsas_topo_change_list_t),
6405 KM_SLEEP);
6406 topo_node->mpt = mpt;
6407 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6408 topo_node->un.physport = physport;
6409 topo_node->devhdl = expd_handle;
6410 topo_node->flags = flags;
6411 topo_node->object = NULL;
6412 if (topo_head == NULL) {
6413 topo_head = topo_tail = topo_node;
6414 } else {
6415 topo_tail->next = topo_node;
6416 topo_tail = topo_node;
6417 }
6418 break;
6419 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6420 (void) sprintf(string, " not responding, "
6421 "removed");
6422 psmp = mptsas_search_by_devhdl(smptbl,
6423 expd_handle);
6424 if (psmp == NULL)
6425 break;
6426
6427 topo_node = kmem_zalloc(
6428 sizeof (mptsas_topo_change_list_t),
6429 KM_SLEEP);
6430 topo_node->mpt = mpt;
6431 topo_node->un.phymask = psmp->m_phymask;
6432 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6433 topo_node->devhdl = expd_handle;
6434 topo_node->flags = flags;
6435 topo_node->object = NULL;
6436 if (topo_head == NULL) {
6437 topo_head = topo_tail = topo_node;
6438 } else {
6439 topo_tail->next = topo_node;
6440 topo_tail = topo_node;
6441 }
6442 break;
6443 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6444 break;
6445 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6446 (void) sprintf(string, " not responding, "
6447 "delaying removal");
6448 break;
6449 default:
6450 break;
6451 }
6452 } else {
6453 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6454 }
6455
6456 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6457 enc_handle, expd_handle, string));
6458 for (i = 0; i < num_entries; i++) {
6459 phy = i + start_phy_num;
6460 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6461 &sas_topo_change_list->PHY[i].PhyStatus);
6462 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6463 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6464 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6465 /*
6466 * Filter out processing of Phy Vacant Status unless
6467 * the reason code is "Not Responding". Process all
6468 * other combinations of Phy Status and Reason Codes.
6469 */
6470 if ((phystatus &
6471 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6472 (reason_code !=
6473 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6474 continue;
6475 }
6476 curr[0] = 0;
6477 prev[0] = 0;
6478 string[0] = 0;
6479 switch (reason_code) {
6480 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6481 {
6482 NDBG20(("mptsas%d phy %d physical_port %d "
6483 "dev_handle %d added", mpt->m_instance, phy,
6484 physport, dev_handle));
6485 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6486 &sas_topo_change_list->PHY[i].LinkRate);
6487 state = (link_rate &
6488 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6489 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6490 switch (state) {
6491 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6492 (void) sprintf(curr, "is disabled");
6493 break;
6494 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6495 (void) sprintf(curr, "is offline, "
6496 "failed speed negotiation");
6497 break;
6498 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6499 (void) sprintf(curr, "SATA OOB "
6500 "complete");
6501 break;
6502 case SMP_RESET_IN_PROGRESS:
6503 (void) sprintf(curr, "SMP reset in "
6504 "progress");
6505 break;
6506 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6507 (void) sprintf(curr, "is online at "
6508 "1.5 Gbps");
6509 break;
6510 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6511 (void) sprintf(curr, "is online at 3.0 "
6512 "Gbps");
6513 break;
6514 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6515 (void) sprintf(curr, "is online at 6.0 "
6516 "Gbps");
6517 break;
6518 default:
6519 (void) sprintf(curr, "state is "
6520 "unknown");
6521 break;
6522 }
6523 /*
6524 * New target device added into the system.
6525 * Set association flag according to if an
6526 * expander is used or not.
6527 */
6528 exp_flag =
6529 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6530 if (flags ==
6531 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6532 flags = exp_flag;
6533 }
6534 topo_node = kmem_zalloc(
6535 sizeof (mptsas_topo_change_list_t),
6536 KM_SLEEP);
6537 topo_node->mpt = mpt;
6538 topo_node->event =
6539 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6540 if (expd_handle == 0) {
6541 /*
6542 * Per MPI 2, if expander dev handle
6543 * is 0, it's a directly attached
6544 * device. So driver use PHY to decide
6545 * which iport is associated
6546 */
6547 physport = phy;
6548 mpt->m_port_chng = 1;
6549 }
6550 topo_node->un.physport = physport;
6551 topo_node->devhdl = dev_handle;
6552 topo_node->flags = flags;
6553 topo_node->object = NULL;
6554 if (topo_head == NULL) {
6555 topo_head = topo_tail = topo_node;
6556 } else {
6557 topo_tail->next = topo_node;
6558 topo_tail = topo_node;
6559 }
6560 break;
6561 }
6562 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6563 {
6564 NDBG20(("mptsas%d phy %d physical_port %d "
6565 "dev_handle %d removed", mpt->m_instance,
6566 phy, physport, dev_handle));
6567 /*
6568 * Set association flag according to if an
6569 * expander is used or not.
6570 */
6571 exp_flag =
6572 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6573 if (flags ==
6574 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6575 flags = exp_flag;
6576 }
6577 /*
6578 * Target device is removed from the system
6579 * Before the device is really offline from
6580 * from system.
6581 */
6582 ptgt = mptsas_search_by_devhdl(tgttbl,
6583 dev_handle);
6584 /*
6585 * If ptgt is NULL here, it means that the
6586 * DevHandle is not in the hash table. This is
6587 * reasonable sometimes. For example, if a
6588 * disk was pulled, then added, then pulled
6589 * again, the disk will not have been put into
6590 * the hash table because the add event will
6591 * have an invalid phymask. BUT, this does not
6592 * mean that the DevHandle is invalid. The
6593 * controller will still have a valid DevHandle
6594 * that must be removed. To do this, use the
6595 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6596 */
6597 if (ptgt == NULL) {
6598 topo_node = kmem_zalloc(
6599 sizeof (mptsas_topo_change_list_t),
6600 KM_SLEEP);
6601 topo_node->mpt = mpt;
6602 topo_node->un.phymask = 0;
6603 topo_node->event =
6604 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6605 topo_node->devhdl = dev_handle;
6606 topo_node->flags = flags;
6607 topo_node->object = NULL;
6608 if (topo_head == NULL) {
6609 topo_head = topo_tail =
6610 topo_node;
6611 } else {
6612 topo_tail->next = topo_node;
6613 topo_tail = topo_node;
6614 }
6615 break;
6616 }
6617
6618 /*
6619 * Update DR flag immediately avoid I/O failure
6620 * before failover finish. Pay attention to the
6621 * mutex protect, we need grab m_tx_waitq_mutex
6622 * during set m_dr_flag because we won't add
6623 * the following command into waitq, instead,
6624 * we need return TRAN_BUSY in the tran_start
6625 * context.
6626 */
6627 mutex_enter(&mpt->m_tx_waitq_mutex);
6628 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6629 mutex_exit(&mpt->m_tx_waitq_mutex);
6630
6631 topo_node = kmem_zalloc(
6632 sizeof (mptsas_topo_change_list_t),
6633 KM_SLEEP);
6634 topo_node->mpt = mpt;
6635 topo_node->un.phymask = ptgt->m_phymask;
6636 topo_node->event =
6637 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6638 topo_node->devhdl = dev_handle;
6639 topo_node->flags = flags;
6640 topo_node->object = NULL;
6641 if (topo_head == NULL) {
6642 topo_head = topo_tail = topo_node;
6643 } else {
6644 topo_tail->next = topo_node;
6645 topo_tail = topo_node;
6646 }
6647 break;
6648 }
6649 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6650 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6651 &sas_topo_change_list->PHY[i].LinkRate);
6652 state = (link_rate &
6653 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6654 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6655 pSmhba = &mpt->m_phy_info[i].smhba_info;
6656 pSmhba->negotiated_link_rate = state;
6657 switch (state) {
6658 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6659 (void) sprintf(curr, "is disabled");
6660 mptsas_smhba_log_sysevent(mpt,
6661 ESC_SAS_PHY_EVENT,
6662 SAS_PHY_REMOVE,
6663 &mpt->m_phy_info[i].smhba_info);
6664 mpt->m_phy_info[i].smhba_info.
6665 negotiated_link_rate
6666 = 0x1;
6667 break;
6668 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6669 (void) sprintf(curr, "is offline, "
6670 "failed speed negotiation");
6671 mptsas_smhba_log_sysevent(mpt,
6672 ESC_SAS_PHY_EVENT,
6673 SAS_PHY_OFFLINE,
6674 &mpt->m_phy_info[i].smhba_info);
6675 break;
6676 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6677 (void) sprintf(curr, "SATA OOB "
6678 "complete");
6679 break;
6680 case SMP_RESET_IN_PROGRESS:
6681 (void) sprintf(curr, "SMP reset in "
6682 "progress");
6683 break;
6684 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6685 (void) sprintf(curr, "is online at "
6686 "1.5 Gbps");
6687 if ((expd_handle == 0) &&
6688 (enc_handle == 1)) {
6689 mpt->m_port_chng = 1;
6690 }
6691 mptsas_smhba_log_sysevent(mpt,
6692 ESC_SAS_PHY_EVENT,
6693 SAS_PHY_ONLINE,
6694 &mpt->m_phy_info[i].smhba_info);
6695 break;
6696 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6697 (void) sprintf(curr, "is online at 3.0 "
6698 "Gbps");
6699 if ((expd_handle == 0) &&
6700 (enc_handle == 1)) {
6701 mpt->m_port_chng = 1;
6702 }
6703 mptsas_smhba_log_sysevent(mpt,
6704 ESC_SAS_PHY_EVENT,
6705 SAS_PHY_ONLINE,
6706 &mpt->m_phy_info[i].smhba_info);
6707 break;
6708 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6709 (void) sprintf(curr, "is online at "
6710 "6.0 Gbps");
6711 if ((expd_handle == 0) &&
6712 (enc_handle == 1)) {
6713 mpt->m_port_chng = 1;
6714 }
6715 mptsas_smhba_log_sysevent(mpt,
6716 ESC_SAS_PHY_EVENT,
6717 SAS_PHY_ONLINE,
6718 &mpt->m_phy_info[i].smhba_info);
6719 break;
6720 default:
6721 (void) sprintf(curr, "state is "
6722 "unknown");
6723 break;
6724 }
6725
6726 state = (link_rate &
6727 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6728 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6729 switch (state) {
6730 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6731 (void) sprintf(prev, ", was disabled");
6732 break;
6733 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6734 (void) sprintf(prev, ", was offline, "
6735 "failed speed negotiation");
6736 break;
6737 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6738 (void) sprintf(prev, ", was SATA OOB "
6739 "complete");
6740 break;
6741 case SMP_RESET_IN_PROGRESS:
6742 (void) sprintf(prev, ", was SMP reset "
6743 "in progress");
6744 break;
6745 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6746 (void) sprintf(prev, ", was online at "
6747 "1.5 Gbps");
6748 break;
6749 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6750 (void) sprintf(prev, ", was online at "
6751 "3.0 Gbps");
6752 break;
6753 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6754 (void) sprintf(prev, ", was online at "
6755 "6.0 Gbps");
6756 break;
6757 default:
6758 break;
6759 }
6760 (void) sprintf(&string[strlen(string)], "link "
6761 "changed, ");
6762 break;
6763 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6764 continue;
6765 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6766 (void) sprintf(&string[strlen(string)],
6767 "target not responding, delaying "
6768 "removal");
6769 break;
6770 }
6771 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6772 mpt->m_instance, phy, dev_handle, string, curr,
6773 prev));
6774 }
6775 if (topo_head != NULL) {
6776 /*
6777 * Launch DR taskq to handle topology change
6778 */
6779 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6780 mptsas_handle_dr, (void *)topo_head,
6781 DDI_NOSLEEP)) != DDI_SUCCESS) {
6782 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6783 "for handle SAS DR event failed. \n");
6784 }
6785 }
6786 break;
6787 }
6788 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6789 {
6790 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6791 mptsas_topo_change_list_t *topo_head = NULL;
6792 mptsas_topo_change_list_t *topo_tail = NULL;
6793 mptsas_topo_change_list_t *topo_node = NULL;
6794 mptsas_target_t *ptgt;
6795 mptsas_hash_table_t *tgttbl;
6796 uint8_t num_entries, i, reason;
6797 uint16_t volhandle, diskhandle;
6798
6799 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6800 eventreply->EventData;
6801 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6802 &irChangeList->NumElements);
6803
6804 tgttbl = &mpt->m_active->m_tgttbl;
6805
6806 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6807 mpt->m_instance));
6808
6809 for (i = 0; i < num_entries; i++) {
6810 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6811 &irChangeList->ConfigElement[i].ReasonCode);
6812 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6813 &irChangeList->ConfigElement[i].VolDevHandle);
6814 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6815 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6816
6817 switch (reason) {
6818 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6819 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6820 {
6821 NDBG20(("mptsas %d volume added\n",
6822 mpt->m_instance));
6823
6824 topo_node = kmem_zalloc(
6825 sizeof (mptsas_topo_change_list_t),
6826 KM_SLEEP);
6827
6828 topo_node->mpt = mpt;
6829 topo_node->event =
6830 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6831 topo_node->un.physport = 0xff;
6832 topo_node->devhdl = volhandle;
6833 topo_node->flags =
6834 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6835 topo_node->object = NULL;
6836 if (topo_head == NULL) {
6837 topo_head = topo_tail = topo_node;
6838 } else {
6839 topo_tail->next = topo_node;
6840 topo_tail = topo_node;
6841 }
6842 break;
6843 }
6844 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6845 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6846 {
6847 NDBG20(("mptsas %d volume deleted\n",
6848 mpt->m_instance));
6849 ptgt = mptsas_search_by_devhdl(tgttbl,
6850 volhandle);
6851 if (ptgt == NULL)
6852 break;
6853
6854 /*
6855 * Clear any flags related to volume
6856 */
6857 (void) mptsas_delete_volume(mpt, volhandle);
6858
6859 /*
6860 * Update DR flag immediately avoid I/O failure
6861 */
6862 mutex_enter(&mpt->m_tx_waitq_mutex);
6863 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6864 mutex_exit(&mpt->m_tx_waitq_mutex);
6865
6866 topo_node = kmem_zalloc(
6867 sizeof (mptsas_topo_change_list_t),
6868 KM_SLEEP);
6869 topo_node->mpt = mpt;
6870 topo_node->un.phymask = ptgt->m_phymask;
6871 topo_node->event =
6872 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6873 topo_node->devhdl = volhandle;
6874 topo_node->flags =
6875 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6876 topo_node->object = (void *)ptgt;
6877 if (topo_head == NULL) {
6878 topo_head = topo_tail = topo_node;
6879 } else {
6880 topo_tail->next = topo_node;
6881 topo_tail = topo_node;
6882 }
6883 break;
6884 }
6885 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6886 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6887 {
6888 ptgt = mptsas_search_by_devhdl(tgttbl,
6889 diskhandle);
6890 if (ptgt == NULL)
6891 break;
6892
6893 /*
6894 * Update DR flag immediately avoid I/O failure
6895 */
6896 mutex_enter(&mpt->m_tx_waitq_mutex);
6897 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6898 mutex_exit(&mpt->m_tx_waitq_mutex);
6899
6900 topo_node = kmem_zalloc(
6901 sizeof (mptsas_topo_change_list_t),
6902 KM_SLEEP);
6903 topo_node->mpt = mpt;
6904 topo_node->un.phymask = ptgt->m_phymask;
6905 topo_node->event =
6906 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6907 topo_node->devhdl = diskhandle;
6908 topo_node->flags =
6909 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6910 topo_node->object = (void *)ptgt;
6911 if (topo_head == NULL) {
6912 topo_head = topo_tail = topo_node;
6913 } else {
6914 topo_tail->next = topo_node;
6915 topo_tail = topo_node;
6916 }
6917 break;
6918 }
6919 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6920 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6921 {
6922 /*
6923 * The physical drive is released by a IR
6924 * volume. But we cannot get the the physport
6925 * or phynum from the event data, so we only
6926 * can get the physport/phynum after SAS
6927 * Device Page0 request for the devhdl.
6928 */
6929 topo_node = kmem_zalloc(
6930 sizeof (mptsas_topo_change_list_t),
6931 KM_SLEEP);
6932 topo_node->mpt = mpt;
6933 topo_node->un.phymask = 0;
6934 topo_node->event =
6935 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6936 topo_node->devhdl = diskhandle;
6937 topo_node->flags =
6938 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6939 topo_node->object = NULL;
6940 mpt->m_port_chng = 1;
6941 if (topo_head == NULL) {
6942 topo_head = topo_tail = topo_node;
6943 } else {
6944 topo_tail->next = topo_node;
6945 topo_tail = topo_node;
6946 }
6947 break;
6948 }
6949 default:
6950 break;
6951 }
6952 }
6953
6954 if (topo_head != NULL) {
6955 /*
6956 * Launch DR taskq to handle topology change
6957 */
6958 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6959 mptsas_handle_dr, (void *)topo_head,
6960 DDI_NOSLEEP)) != DDI_SUCCESS) {
6961 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6962 "for handle SAS DR event failed. \n");
6963 }
6964 }
6965 break;
6966 }
6967 default:
6968 return (DDI_FAILURE);
6969 }
6970
6971 return (DDI_SUCCESS);
6972 }
6973
6974 /*
6975 * handle events from ioc
6976 */
6977 static void
6978 mptsas_handle_event(void *args)
6979 {
6980 m_replyh_arg_t *replyh_arg;
6981 pMpi2EventNotificationReply_t eventreply;
6982 uint32_t event, iocloginfo, rfm;
6983 uint32_t status;
6984 uint8_t port;
6985 mptsas_t *mpt;
6986 uint_t iocstatus;
6987
6988 replyh_arg = (m_replyh_arg_t *)args;
6989 rfm = replyh_arg->rfm;
6990 mpt = replyh_arg->mpt;
6991
6992 mutex_enter(&mpt->m_mutex);
6993
6994 eventreply = (pMpi2EventNotificationReply_t)
6995 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6996 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6997
6998 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6999 &eventreply->IOCStatus)) {
7000 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7001 mptsas_log(mpt, CE_WARN,
7002 "!mptsas_handle_event: IOCStatus=0x%x, "
7003 "IOCLogInfo=0x%x", iocstatus,
7004 ddi_get32(mpt->m_acc_reply_frame_hdl,
7005 &eventreply->IOCLogInfo));
7006 } else {
7007 mptsas_log(mpt, CE_WARN,
7008 "mptsas_handle_event: IOCStatus=0x%x, "
7009 "IOCLogInfo=0x%x", iocstatus,
7010 ddi_get32(mpt->m_acc_reply_frame_hdl,
7011 &eventreply->IOCLogInfo));
7012 }
7013 }
7014
7015 /*
7016 * figure out what kind of event we got and handle accordingly
7017 */
7018 switch (event) {
7019 case MPI2_EVENT_LOG_ENTRY_ADDED:
7020 break;
7021 case MPI2_EVENT_LOG_DATA:
7022 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7023 &eventreply->IOCLogInfo);
7024 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7025 iocloginfo));
7026 break;
7027 case MPI2_EVENT_STATE_CHANGE:
7028 NDBG20(("mptsas%d state change.", mpt->m_instance));
7029 break;
7030 case MPI2_EVENT_HARD_RESET_RECEIVED:
7031 NDBG20(("mptsas%d event change.", mpt->m_instance));
7032 break;
7033 case MPI2_EVENT_SAS_DISCOVERY:
7034 {
7035 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7036 char string[80];
7037 uint8_t rc;
7038
7039 sasdiscovery =
7040 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7041
7042 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7043 &sasdiscovery->ReasonCode);
7044 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7045 &sasdiscovery->PhysicalPort);
7046 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7047 &sasdiscovery->DiscoveryStatus);
7048
7049 string[0] = 0;
7050 switch (rc) {
7051 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7052 (void) sprintf(string, "STARTING");
7053 break;
7054 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7055 (void) sprintf(string, "COMPLETED");
7056 break;
7057 default:
7058 (void) sprintf(string, "UNKNOWN");
7059 break;
7060 }
7061
7062 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7063 port, status));
7064
7065 break;
7066 }
7067 case MPI2_EVENT_EVENT_CHANGE:
7068 NDBG20(("mptsas%d event change.", mpt->m_instance));
7069 break;
7070 case MPI2_EVENT_TASK_SET_FULL:
7071 {
7072 pMpi2EventDataTaskSetFull_t taskfull;
7073
7074 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7075
7076 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7077 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7078 &taskfull->CurrentDepth)));
7079 break;
7080 }
7081 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7082 {
7083 /*
7084 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7085 * in mptsas_handle_event_sync() of interrupt context
7086 */
7087 break;
7088 }
7089 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7090 {
7091 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7092 uint8_t rc;
7093 char string[80];
7094
7095 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7096 eventreply->EventData;
7097
7098 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7099 &encstatus->ReasonCode);
7100 switch (rc) {
7101 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7102 (void) sprintf(string, "added");
7103 break;
7104 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7105 (void) sprintf(string, ", not responding");
7106 break;
7107 default:
7108 break;
7109 }
7110 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7111 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7112 &encstatus->EnclosureHandle), string));
7113 break;
7114 }
7115
7116 /*
7117 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7118 * mptsas_handle_event_sync,in here just send ack message.
7119 */
7120 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7121 {
7122 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7123 uint8_t rc;
7124 uint16_t devhdl;
7125 uint64_t wwn = 0;
7126 uint32_t wwn_lo, wwn_hi;
7127
7128 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7129 eventreply->EventData;
7130 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7131 &statuschange->ReasonCode);
7132 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7133 (uint32_t *)(void *)&statuschange->SASAddress);
7134 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7135 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7136 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7137 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7138 &statuschange->DevHandle);
7139
7140 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7141 wwn));
7142
7143 switch (rc) {
7144 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7145 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7146 ddi_get8(mpt->m_acc_reply_frame_hdl,
7147 &statuschange->ASC),
7148 ddi_get8(mpt->m_acc_reply_frame_hdl,
7149 &statuschange->ASCQ)));
7150 break;
7151
7152 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7153 NDBG20(("Device not supported"));
7154 break;
7155
7156 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7157 NDBG20(("IOC internally generated the Target Reset "
7158 "for devhdl:%x", devhdl));
7159 break;
7160
7161 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7162 NDBG20(("IOC's internally generated Target Reset "
7163 "completed for devhdl:%x", devhdl));
7164 break;
7165
7166 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7167 NDBG20(("IOC internally generated Abort Task"));
7168 break;
7169
7170 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7171 NDBG20(("IOC's internally generated Abort Task "
7172 "completed"));
7173 break;
7174
7175 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7176 NDBG20(("IOC internally generated Abort Task Set"));
7177 break;
7178
7179 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7180 NDBG20(("IOC internally generated Clear Task Set"));
7181 break;
7182
7183 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7184 NDBG20(("IOC internally generated Query Task"));
7185 break;
7186
7187 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7188 NDBG20(("Device sent an Asynchronous Notification"));
7189 break;
7190
7191 default:
7192 break;
7193 }
7194 break;
7195 }
7196 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7197 {
7198 /*
7199 * IR TOPOLOGY CHANGE LIST Event has already been handled
7200 * in mpt_handle_event_sync() of interrupt context
7201 */
7202 break;
7203 }
7204 case MPI2_EVENT_IR_OPERATION_STATUS:
7205 {
7206 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7207 char reason_str[80];
7208 uint8_t rc, percent;
7209 uint16_t handle;
7210
7211 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7212 eventreply->EventData;
7213 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7214 &irOpStatus->RAIDOperation);
7215 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7216 &irOpStatus->PercentComplete);
7217 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7218 &irOpStatus->VolDevHandle);
7219
7220 switch (rc) {
7221 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7222 (void) sprintf(reason_str, "resync");
7223 break;
7224 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7225 (void) sprintf(reason_str, "online capacity "
7226 "expansion");
7227 break;
7228 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7229 (void) sprintf(reason_str, "consistency check");
7230 break;
7231 default:
7232 (void) sprintf(reason_str, "unknown reason %x",
7233 rc);
7234 }
7235
7236 NDBG20(("mptsas%d raid operational status: (%s)"
7237 "\thandle(0x%04x), percent complete(%d)\n",
7238 mpt->m_instance, reason_str, handle, percent));
7239 break;
7240 }
7241 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7242 {
7243 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7244 uint8_t phy_num;
7245 uint8_t primitive;
7246
7247 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7248 eventreply->EventData;
7249
7250 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7251 &sas_broadcast->PhyNum);
7252 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7253 &sas_broadcast->Primitive);
7254
7255 switch (primitive) {
7256 case MPI2_EVENT_PRIMITIVE_CHANGE:
7257 mptsas_smhba_log_sysevent(mpt,
7258 ESC_SAS_HBA_PORT_BROADCAST,
7259 SAS_PORT_BROADCAST_CHANGE,
7260 &mpt->m_phy_info[phy_num].smhba_info);
7261 break;
7262 case MPI2_EVENT_PRIMITIVE_SES:
7263 mptsas_smhba_log_sysevent(mpt,
7264 ESC_SAS_HBA_PORT_BROADCAST,
7265 SAS_PORT_BROADCAST_SES,
7266 &mpt->m_phy_info[phy_num].smhba_info);
7267 break;
7268 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7269 mptsas_smhba_log_sysevent(mpt,
7270 ESC_SAS_HBA_PORT_BROADCAST,
7271 SAS_PORT_BROADCAST_D01_4,
7272 &mpt->m_phy_info[phy_num].smhba_info);
7273 break;
7274 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7275 mptsas_smhba_log_sysevent(mpt,
7276 ESC_SAS_HBA_PORT_BROADCAST,
7277 SAS_PORT_BROADCAST_D04_7,
7278 &mpt->m_phy_info[phy_num].smhba_info);
7279 break;
7280 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7281 mptsas_smhba_log_sysevent(mpt,
7282 ESC_SAS_HBA_PORT_BROADCAST,
7283 SAS_PORT_BROADCAST_D16_7,
7284 &mpt->m_phy_info[phy_num].smhba_info);
7285 break;
7286 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7287 mptsas_smhba_log_sysevent(mpt,
7288 ESC_SAS_HBA_PORT_BROADCAST,
7289 SAS_PORT_BROADCAST_D29_7,
7290 &mpt->m_phy_info[phy_num].smhba_info);
7291 break;
7292 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7293 mptsas_smhba_log_sysevent(mpt,
7294 ESC_SAS_HBA_PORT_BROADCAST,
7295 SAS_PORT_BROADCAST_D24_0,
7296 &mpt->m_phy_info[phy_num].smhba_info);
7297 break;
7298 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7299 mptsas_smhba_log_sysevent(mpt,
7300 ESC_SAS_HBA_PORT_BROADCAST,
7301 SAS_PORT_BROADCAST_D27_4,
7302 &mpt->m_phy_info[phy_num].smhba_info);
7303 break;
7304 default:
7305 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7306 " %x received",
7307 mpt->m_instance, primitive));
7308 break;
7309 }
7310 NDBG20(("mptsas%d sas broadcast primitive: "
7311 "\tprimitive(0x%04x), phy(%d) complete\n",
7312 mpt->m_instance, primitive, phy_num));
7313 break;
7314 }
7315 case MPI2_EVENT_IR_VOLUME:
7316 {
7317 Mpi2EventDataIrVolume_t *irVolume;
7318 uint16_t devhandle;
7319 uint32_t state;
7320 int config, vol;
7321 mptsas_slots_t *slots = mpt->m_active;
7322 uint8_t found = FALSE;
7323
7324 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7325 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7326 &irVolume->NewValue);
7327 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7328 &irVolume->VolDevHandle);
7329
7330 NDBG20(("EVENT_IR_VOLUME event is received"));
7331
7332 /*
7333 * Get latest RAID info and then find the DevHandle for this
7334 * event in the configuration. If the DevHandle is not found
7335 * just exit the event.
7336 */
7337 (void) mptsas_get_raid_info(mpt);
7338 for (config = 0; (config < slots->m_num_raid_configs) &&
7339 (!found); config++) {
7340 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7341 if (slots->m_raidconfig[config].m_raidvol[vol].
7342 m_raidhandle == devhandle) {
7343 found = TRUE;
7344 break;
7345 }
7346 }
7347 }
7348 if (!found) {
7349 break;
7350 }
7351
7352 switch (irVolume->ReasonCode) {
7353 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7354 {
7355 uint32_t i;
7356 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7357 state;
7358
7359 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7360 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7361 ", auto-config of hot-swap drives is %s"
7362 ", write caching is %s"
7363 ", hot-spare pool mask is %02x\n",
7364 vol, state &
7365 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7366 ? "disabled" : "enabled",
7367 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7368 ? "controlled by member disks" :
7369 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7370 ? "disabled" :
7371 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7372 ? "enabled" :
7373 "incorrectly set",
7374 (state >> 16) & 0xff);
7375 break;
7376 }
7377 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7378 {
7379 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7380 (uint8_t)state;
7381
7382 mptsas_log(mpt, CE_NOTE,
7383 "Volume %d is now %s\n", vol,
7384 state == MPI2_RAID_VOL_STATE_OPTIMAL
7385 ? "optimal" :
7386 state == MPI2_RAID_VOL_STATE_DEGRADED
7387 ? "degraded" :
7388 state == MPI2_RAID_VOL_STATE_ONLINE
7389 ? "online" :
7390 state == MPI2_RAID_VOL_STATE_INITIALIZING
7391 ? "initializing" :
7392 state == MPI2_RAID_VOL_STATE_FAILED
7393 ? "failed" :
7394 state == MPI2_RAID_VOL_STATE_MISSING
7395 ? "missing" :
7396 "state unknown");
7397 break;
7398 }
7399 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7400 {
7401 slots->m_raidconfig[config].m_raidvol[vol].
7402 m_statusflags = state;
7403
7404 mptsas_log(mpt, CE_NOTE,
7405 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7406 vol,
7407 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7408 ? ", enabled" : ", disabled",
7409 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7410 ? ", quiesced" : "",
7411 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7412 ? ", inactive" : ", active",
7413 state &
7414 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7415 ? ", bad block table is full" : "",
7416 state &
7417 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7418 ? ", resync in progress" : "",
7419 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7420 ? ", background initialization in progress" : "",
7421 state &
7422 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7423 ? ", capacity expansion in progress" : "",
7424 state &
7425 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7426 ? ", consistency check in progress" : "",
7427 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7428 ? ", data scrub in progress" : "");
7429 break;
7430 }
7431 default:
7432 break;
7433 }
7434 break;
7435 }
7436 case MPI2_EVENT_IR_PHYSICAL_DISK:
7437 {
7438 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7439 uint16_t devhandle, enchandle, slot;
7440 uint32_t status, state;
7441 uint8_t physdisknum, reason;
7442
7443 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7444 eventreply->EventData;
7445 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7446 &irPhysDisk->PhysDiskNum);
7447 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7448 &irPhysDisk->PhysDiskDevHandle);
7449 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7450 &irPhysDisk->EnclosureHandle);
7451 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7452 &irPhysDisk->Slot);
7453 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7454 &irPhysDisk->NewValue);
7455 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7456 &irPhysDisk->ReasonCode);
7457
7458 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7459
7460 switch (reason) {
7461 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7462 mptsas_log(mpt, CE_NOTE,
7463 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7464 "for enclosure with handle 0x%x is now in hot "
7465 "spare pool %d",
7466 physdisknum, devhandle, slot, enchandle,
7467 (state >> 16) & 0xff);
7468 break;
7469
7470 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7471 status = state;
7472 mptsas_log(mpt, CE_NOTE,
7473 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7474 "for enclosure with handle 0x%x is now "
7475 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7476 enchandle,
7477 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7478 ? ", inactive" : ", active",
7479 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7480 ? ", out of sync" : "",
7481 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7482 ? ", quiesced" : "",
7483 status &
7484 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7485 ? ", write cache enabled" : "",
7486 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7487 ? ", capacity expansion target" : "");
7488 break;
7489
7490 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7491 mptsas_log(mpt, CE_NOTE,
7492 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7493 "for enclosure with handle 0x%x is now %s\n",
7494 physdisknum, devhandle, slot, enchandle,
7495 state == MPI2_RAID_PD_STATE_OPTIMAL
7496 ? "optimal" :
7497 state == MPI2_RAID_PD_STATE_REBUILDING
7498 ? "rebuilding" :
7499 state == MPI2_RAID_PD_STATE_DEGRADED
7500 ? "degraded" :
7501 state == MPI2_RAID_PD_STATE_HOT_SPARE
7502 ? "a hot spare" :
7503 state == MPI2_RAID_PD_STATE_ONLINE
7504 ? "online" :
7505 state == MPI2_RAID_PD_STATE_OFFLINE
7506 ? "offline" :
7507 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7508 ? "not compatible" :
7509 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7510 ? "not configured" :
7511 "state unknown");
7512 break;
7513 }
7514 break;
7515 }
7516 default:
7517 NDBG20(("mptsas%d: unknown event %x received",
7518 mpt->m_instance, event));
7519 break;
7520 }
7521
7522 /*
7523 * Return the reply frame to the free queue.
7524 */
7525 ddi_put32(mpt->m_acc_free_queue_hdl,
7526 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7527 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7528 DDI_DMA_SYNC_FORDEV);
7529 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7530 mpt->m_free_index = 0;
7531 }
7532 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7533 mpt->m_free_index);
7534 mutex_exit(&mpt->m_mutex);
7535 }
7536
7537 /*
7538 * invoked from timeout() to restart qfull cmds with throttle == 0
7539 */
7540 static void
7541 mptsas_restart_cmd(void *arg)
7542 {
7543 mptsas_t *mpt = arg;
7544 mptsas_target_t *ptgt = NULL;
7545
7546 mutex_enter(&mpt->m_mutex);
7547
7548 mpt->m_restart_cmd_timeid = 0;
7549
7550 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7551 MPTSAS_HASH_FIRST);
7552 while (ptgt != NULL) {
7553 if (ptgt->m_reset_delay == 0) {
7554 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7555 mptsas_set_throttle(mpt, ptgt,
7556 MAX_THROTTLE);
7557 }
7558 }
7559
7560 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7561 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7562 }
7563 mptsas_restart_hba(mpt);
7564 mutex_exit(&mpt->m_mutex);
7565 }
7566
7567 void
7568 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7569 {
7570 int slot;
7571 mptsas_slots_t *slots = mpt->m_active;
7572 int t;
7573 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7574
7575 ASSERT(cmd != NULL);
7576 ASSERT(cmd->cmd_queued == FALSE);
7577
7578 /*
7579 * Task Management cmds are removed in their own routines. Also,
7580 * we don't want to modify timeout based on TM cmds.
7581 */
7582 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7583 return;
7584 }
7585
7586 t = Tgt(cmd);
7587 slot = cmd->cmd_slot;
7588
7589 /*
7590 * remove the cmd.
7591 */
7592 if (cmd == slots->m_slot[slot]) {
7593 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7594 slots->m_slot[slot] = NULL;
7595 mpt->m_ncmds--;
7596
7597 /*
7598 * only decrement per target ncmds if command
7599 * has a target associated with it.
7600 */
7601 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7602 ptgt->m_t_ncmds--;
7603 /*
7604 * reset throttle if we just ran an untagged command
7605 * to a tagged target
7606 */
7607 if ((ptgt->m_t_ncmds == 0) &&
7608 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7609 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7610 }
7611 }
7612
7613 }
7614
7615 /*
7616 * This is all we need to do for ioc commands.
7617 */
7618 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7619 mptsas_return_to_pool(mpt, cmd);
7620 return;
7621 }
7622
7623 /*
7624 * Figure out what to set tag Q timeout for...
7625 *
7626 * Optimize: If we have duplicate's of same timeout
7627 * we're using, then we'll use it again until we run
7628 * out of duplicates. This should be the normal case
7629 * for block and raw I/O.
7630 * If no duplicates, we have to scan through tag que and
7631 * find the longest timeout value and use it. This is
7632 * going to take a while...
7633 * Add 1 to m_n_slots to account for TM request.
7634 */
7635 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7636 if (--(ptgt->m_dups) == 0) {
7637 if (ptgt->m_t_ncmds) {
7638 mptsas_cmd_t *ssp;
7639 uint_t n = 0;
7640 ushort_t nslots = (slots->m_n_slots + 1);
7641 ushort_t i;
7642 /*
7643 * This crude check assumes we don't do
7644 * this too often which seems reasonable
7645 * for block and raw I/O.
7646 */
7647 for (i = 0; i < nslots; i++) {
7648 ssp = slots->m_slot[i];
7649 if (ssp && (Tgt(ssp) == t) &&
7650 (ssp->cmd_pkt->pkt_time > n)) {
7651 n = ssp->cmd_pkt->pkt_time;
7652 ptgt->m_dups = 1;
7653 } else if (ssp && (Tgt(ssp) == t) &&
7654 (ssp->cmd_pkt->pkt_time == n)) {
7655 ptgt->m_dups++;
7656 }
7657 }
7658 ptgt->m_timebase = n;
7659 } else {
7660 ptgt->m_dups = 0;
7661 ptgt->m_timebase = 0;
7662 }
7663 }
7664 }
7665 ptgt->m_timeout = ptgt->m_timebase;
7666
7667 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7668 }
7669
7670 /*
7671 * accept all cmds on the tx_waitq if any and then
7672 * start a fresh request from the top of the device queue.
7673 *
7674 * since there are always cmds queued on the tx_waitq, and rare cmds on
7675 * the instance waitq, so this function should not be invoked in the ISR,
7676 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7677 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7678 */
7679 static void
7680 mptsas_restart_hba(mptsas_t *mpt)
7681 {
7682 ASSERT(mutex_owned(&mpt->m_mutex));
7683
7684 mutex_enter(&mpt->m_tx_waitq_mutex);
7685 if (mpt->m_tx_waitq) {
7686 mptsas_accept_tx_waitq(mpt);
7687 }
7688 mutex_exit(&mpt->m_tx_waitq_mutex);
7689 mptsas_restart_waitq(mpt);
7690 }
7691
7692 /*
7693 * start a fresh request from the top of the device queue
7694 */
7695 static void
7696 mptsas_restart_waitq(mptsas_t *mpt)
7697 {
7698 mptsas_cmd_t *cmd, *next_cmd;
7699 mptsas_target_t *ptgt = NULL;
7700
7701 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7702
7703 ASSERT(mutex_owned(&mpt->m_mutex));
7704
7705 /*
7706 * If there is a reset delay, don't start any cmds. Otherwise, start
7707 * as many cmds as possible.
7708 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7709 * commands is m_max_requests - 2.
7710 */
7711 cmd = mpt->m_waitq;
7712
7713 while (cmd != NULL) {
7714 next_cmd = cmd->cmd_linkp;
7715 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7716 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7717 /*
7718 * passthru command get slot need
7719 * set CFLAG_PREPARED.
7720 */
7721 cmd->cmd_flags |= CFLAG_PREPARED;
7722 mptsas_waitq_delete(mpt, cmd);
7723 mptsas_start_passthru(mpt, cmd);
7724 }
7725 cmd = next_cmd;
7726 continue;
7727 }
7728 if (cmd->cmd_flags & CFLAG_CONFIG) {
7729 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7730 /*
7731 * Send the config page request and delete it
7732 * from the waitq.
7733 */
7734 cmd->cmd_flags |= CFLAG_PREPARED;
7735 mptsas_waitq_delete(mpt, cmd);
7736 mptsas_start_config_page_access(mpt, cmd);
7737 }
7738 cmd = next_cmd;
7739 continue;
7740 }
7741 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7742 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7743 /*
7744 * Send the FW Diag request and delete if from
7745 * the waitq.
7746 */
7747 cmd->cmd_flags |= CFLAG_PREPARED;
7748 mptsas_waitq_delete(mpt, cmd);
7749 mptsas_start_diag(mpt, cmd);
7750 }
7751 cmd = next_cmd;
7752 continue;
7753 }
7754
7755 ptgt = cmd->cmd_tgt_addr;
7756 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7757 (ptgt->m_t_ncmds == 0)) {
7758 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7759 }
7760 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7761 (ptgt && (ptgt->m_reset_delay == 0)) &&
7762 (ptgt && (ptgt->m_t_ncmds <
7763 ptgt->m_t_throttle))) {
7764 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7765 mptsas_waitq_delete(mpt, cmd);
7766 (void) mptsas_start_cmd(mpt, cmd);
7767 }
7768 }
7769 cmd = next_cmd;
7770 }
7771 }
7772 /*
7773 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7774 * Accept all those queued cmds before new cmd is accept so that the
7775 * cmds are sent in order.
7776 */
7777 static void
7778 mptsas_accept_tx_waitq(mptsas_t *mpt)
7779 {
7780 mptsas_cmd_t *cmd;
7781
7782 ASSERT(mutex_owned(&mpt->m_mutex));
7783 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7784
7785 /*
7786 * A Bus Reset could occur at any time and flush the tx_waitq,
7787 * so we cannot count on the tx_waitq to contain even one cmd.
7788 * And when the m_tx_waitq_mutex is released and run
7789 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7790 */
7791 cmd = mpt->m_tx_waitq;
7792 for (;;) {
7793 if ((cmd = mpt->m_tx_waitq) == NULL) {
7794 mpt->m_tx_draining = 0;
7795 break;
7796 }
7797 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7798 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7799 }
7800 cmd->cmd_linkp = NULL;
7801 mutex_exit(&mpt->m_tx_waitq_mutex);
7802 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7803 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7804 "to accept cmd on queue\n");
7805 mutex_enter(&mpt->m_tx_waitq_mutex);
7806 }
7807 }
7808
7809
7810 /*
7811 * mpt tag type lookup
7812 */
7813 static char mptsas_tag_lookup[] =
7814 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7815
7816 static int
7817 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7818 {
7819 struct scsi_pkt *pkt = CMD2PKT(cmd);
7820 uint32_t control = 0;
7821 int n;
7822 caddr_t mem;
7823 pMpi2SCSIIORequest_t io_request;
7824 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7825 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7826 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7827 uint16_t SMID, io_flags = 0;
7828 uint32_t request_desc_low, request_desc_high;
7829
7830 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7831
7832 /*
7833 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7834 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7835 */
7836 SMID = cmd->cmd_slot;
7837
7838 /*
7839 * It is possible for back to back device reset to
7840 * happen before the reset delay has expired. That's
7841 * ok, just let the device reset go out on the bus.
7842 */
7843 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7844 ASSERT(ptgt->m_reset_delay == 0);
7845 }
7846
7847 /*
7848 * if a non-tagged cmd is submitted to an active tagged target
7849 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7850 * to be untagged
7851 */
7852 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7853 (ptgt->m_t_ncmds > 1) &&
7854 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7855 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7856 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7857 NDBG23(("target=%d, untagged cmd, start draining\n",
7858 ptgt->m_devhdl));
7859
7860 if (ptgt->m_reset_delay == 0) {
7861 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7862 }
7863
7864 mptsas_remove_cmd(mpt, cmd);
7865 cmd->cmd_pkt_flags |= FLAG_HEAD;
7866 mptsas_waitq_add(mpt, cmd);
7867 }
7868 return (DDI_FAILURE);
7869 }
7870
7871 /*
7872 * Set correct tag bits.
7873 */
7874 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7875 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7876 FLAG_TAGMASK) >> 12)]) {
7877 case MSG_SIMPLE_QTAG:
7878 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7879 break;
7880 case MSG_HEAD_QTAG:
7881 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7882 break;
7883 case MSG_ORDERED_QTAG:
7884 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7885 break;
7886 default:
7887 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7888 break;
7889 }
7890 } else {
7891 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7892 ptgt->m_t_throttle = 1;
7893 }
7894 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7895 }
7896
7897 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7898 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7899 }
7900
7901 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7902 io_request = (pMpi2SCSIIORequest_t)mem;
7903
7904 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7905 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7906 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7907 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7908 MPI2_FUNCTION_SCSI_IO_REQUEST);
7909
7910 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7911 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7912
7913 io_flags = cmd->cmd_cdblen;
7914 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7915 /*
7916 * setup the Scatter/Gather DMA list for this request
7917 */
7918 if (cmd->cmd_cookiec > 0) {
7919 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7920 } else {
7921 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7922 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7923 MPI2_SGE_FLAGS_END_OF_BUFFER |
7924 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7925 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7926 }
7927
7928 /*
7929 * save ARQ information
7930 */
7931 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7932 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7933 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7934 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7935 cmd->cmd_ext_arqcookie.dmac_address);
7936 } else {
7937 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7938 cmd->cmd_arqcookie.dmac_address);
7939 }
7940
7941 ddi_put32(acc_hdl, &io_request->Control, control);
7942
7943 NDBG31(("starting message=0x%p, with cmd=0x%p",
7944 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7945
7946 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7947
7948 /*
7949 * Build request descriptor and write it to the request desc post reg.
7950 */
7951 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7952 request_desc_high = ptgt->m_devhdl << 16;
7953 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7954
7955 /*
7956 * Start timeout.
7957 */
7958 #ifdef MPTSAS_TEST
7959 /*
7960 * Temporarily set timebase = 0; needed for
7961 * timeout torture test.
7962 */
7963 if (mptsas_test_timeouts) {
7964 ptgt->m_timebase = 0;
7965 }
7966 #endif
7967 n = pkt->pkt_time - ptgt->m_timebase;
7968
7969 if (n == 0) {
7970 (ptgt->m_dups)++;
7971 ptgt->m_timeout = ptgt->m_timebase;
7972 } else if (n > 0) {
7973 ptgt->m_timeout =
7974 ptgt->m_timebase = pkt->pkt_time;
7975 ptgt->m_dups = 1;
7976 } else if (n < 0) {
7977 ptgt->m_timeout = ptgt->m_timebase;
7978 }
7979 #ifdef MPTSAS_TEST
7980 /*
7981 * Set back to a number higher than
7982 * mptsas_scsi_watchdog_tick
7983 * so timeouts will happen in mptsas_watchsubr
7984 */
7985 if (mptsas_test_timeouts) {
7986 ptgt->m_timebase = 60;
7987 }
7988 #endif
7989
7990 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
7991 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
7992 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
7993 return (DDI_FAILURE);
7994 }
7995 return (DDI_SUCCESS);
7996 }
7997
7998 /*
7999 * Select a helper thread to handle current doneq
8000 */
8001 static void
8002 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8003 {
8004 uint64_t t, i;
8005 uint32_t min = 0xffffffff;
8006 mptsas_doneq_thread_list_t *item;
8007
8008 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8009 item = &mpt->m_doneq_thread_id[i];
8010 /*
8011 * If the completed command on help thread[i] less than
8012 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8013 * pick a thread which has least completed command.
8014 */
8015
8016 mutex_enter(&item->mutex);
8017 if (item->len < mpt->m_doneq_thread_threshold) {
8018 t = i;
8019 mutex_exit(&item->mutex);
8020 break;
8021 }
8022 if (item->len < min) {
8023 min = item->len;
8024 t = i;
8025 }
8026 mutex_exit(&item->mutex);
8027 }
8028 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8029 mptsas_doneq_mv(mpt, t);
8030 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8031 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8032 }
8033
8034 /*
8035 * move the current global doneq to the doneq of thead[t]
8036 */
8037 static void
8038 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8039 {
8040 mptsas_cmd_t *cmd;
8041 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8042
8043 ASSERT(mutex_owned(&item->mutex));
8044 while ((cmd = mpt->m_doneq) != NULL) {
8045 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8046 mpt->m_donetail = &mpt->m_doneq;
8047 }
8048 cmd->cmd_linkp = NULL;
8049 *item->donetail = cmd;
8050 item->donetail = &cmd->cmd_linkp;
8051 mpt->m_doneq_len--;
8052 item->len++;
8053 }
8054 }
8055
8056 void
8057 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8058 {
8059 struct scsi_pkt *pkt = CMD2PKT(cmd);
8060
8061 /* Check all acc and dma handles */
8062 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8063 DDI_SUCCESS) ||
8064 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8065 DDI_SUCCESS) ||
8066 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8067 DDI_SUCCESS) ||
8068 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8069 DDI_SUCCESS) ||
8070 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8071 DDI_SUCCESS) ||
8072 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8073 DDI_SUCCESS) ||
8074 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8075 DDI_SUCCESS)) {
8076 ddi_fm_service_impact(mpt->m_dip,
8077 DDI_SERVICE_UNAFFECTED);
8078 ddi_fm_acc_err_clear(mpt->m_config_handle,
8079 DDI_FME_VER0);
8080 pkt->pkt_reason = CMD_TRAN_ERR;
8081 pkt->pkt_statistics = 0;
8082 }
8083 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8084 DDI_SUCCESS) ||
8085 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8086 DDI_SUCCESS) ||
8087 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8088 DDI_SUCCESS) ||
8089 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8090 DDI_SUCCESS) ||
8091 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8092 DDI_SUCCESS)) {
8093 ddi_fm_service_impact(mpt->m_dip,
8094 DDI_SERVICE_UNAFFECTED);
8095 pkt->pkt_reason = CMD_TRAN_ERR;
8096 pkt->pkt_statistics = 0;
8097 }
8098 if (cmd->cmd_dmahandle &&
8099 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8100 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8101 pkt->pkt_reason = CMD_TRAN_ERR;
8102 pkt->pkt_statistics = 0;
8103 }
8104 if ((cmd->cmd_extra_frames &&
8105 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8106 DDI_SUCCESS) ||
8107 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8108 DDI_SUCCESS)))) {
8109 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8110 pkt->pkt_reason = CMD_TRAN_ERR;
8111 pkt->pkt_statistics = 0;
8112 }
8113 if (cmd->cmd_arqhandle &&
8114 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8115 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8116 pkt->pkt_reason = CMD_TRAN_ERR;
8117 pkt->pkt_statistics = 0;
8118 }
8119 if (cmd->cmd_ext_arqhandle &&
8120 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8121 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8122 pkt->pkt_reason = CMD_TRAN_ERR;
8123 pkt->pkt_statistics = 0;
8124 }
8125 }
8126
8127 /*
8128 * These routines manipulate the queue of commands that
8129 * are waiting for their completion routines to be called.
8130 * The queue is usually in FIFO order but on an MP system
8131 * it's possible for the completion routines to get out
8132 * of order. If that's a problem you need to add a global
8133 * mutex around the code that calls the completion routine
8134 * in the interrupt handler.
8135 */
8136 static void
8137 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8138 {
8139 struct scsi_pkt *pkt = CMD2PKT(cmd);
8140
8141 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8142
8143 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8144 cmd->cmd_linkp = NULL;
8145 cmd->cmd_flags |= CFLAG_FINISHED;
8146 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8147
8148 mptsas_fma_check(mpt, cmd);
8149
8150 /*
8151 * only add scsi pkts that have completion routines to
8152 * the doneq. no intr cmds do not have callbacks.
8153 */
8154 if (pkt && (pkt->pkt_comp)) {
8155 *mpt->m_donetail = cmd;
8156 mpt->m_donetail = &cmd->cmd_linkp;
8157 mpt->m_doneq_len++;
8158 }
8159 }
8160
8161 static mptsas_cmd_t *
8162 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8163 {
8164 mptsas_cmd_t *cmd;
8165 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8166
8167 /* pop one off the done queue */
8168 if ((cmd = item->doneq) != NULL) {
8169 /* if the queue is now empty fix the tail pointer */
8170 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8171 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8172 item->donetail = &item->doneq;
8173 }
8174 cmd->cmd_linkp = NULL;
8175 item->len--;
8176 }
8177 return (cmd);
8178 }
8179
8180 static void
8181 mptsas_doneq_empty(mptsas_t *mpt)
8182 {
8183 if (mpt->m_doneq && !mpt->m_in_callback) {
8184 mptsas_cmd_t *cmd, *next;
8185 struct scsi_pkt *pkt;
8186
8187 mpt->m_in_callback = 1;
8188 cmd = mpt->m_doneq;
8189 mpt->m_doneq = NULL;
8190 mpt->m_donetail = &mpt->m_doneq;
8191 mpt->m_doneq_len = 0;
8192
8193 mutex_exit(&mpt->m_mutex);
8194 /*
8195 * run the completion routines of all the
8196 * completed commands
8197 */
8198 while (cmd != NULL) {
8199 next = cmd->cmd_linkp;
8200 cmd->cmd_linkp = NULL;
8201 /* run this command's completion routine */
8202 cmd->cmd_flags |= CFLAG_COMPLETED;
8203 pkt = CMD2PKT(cmd);
8204 mptsas_pkt_comp(pkt, cmd);
8205 cmd = next;
8206 }
8207 mutex_enter(&mpt->m_mutex);
8208 mpt->m_in_callback = 0;
8209 }
8210 }
8211
8212 /*
8213 * These routines manipulate the target's queue of pending requests
8214 */
8215 void
8216 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8217 {
8218 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8219 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8220 cmd->cmd_queued = TRUE;
8221 if (ptgt)
8222 ptgt->m_t_nwait++;
8223 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8224 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8225 mpt->m_waitqtail = &cmd->cmd_linkp;
8226 }
8227 mpt->m_waitq = cmd;
8228 } else {
8229 cmd->cmd_linkp = NULL;
8230 *(mpt->m_waitqtail) = cmd;
8231 mpt->m_waitqtail = &cmd->cmd_linkp;
8232 }
8233 }
8234
8235 static mptsas_cmd_t *
8236 mptsas_waitq_rm(mptsas_t *mpt)
8237 {
8238 mptsas_cmd_t *cmd;
8239 mptsas_target_t *ptgt;
8240 NDBG7(("mptsas_waitq_rm"));
8241
8242 MPTSAS_WAITQ_RM(mpt, cmd);
8243
8244 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8245 if (cmd) {
8246 ptgt = cmd->cmd_tgt_addr;
8247 if (ptgt) {
8248 ptgt->m_t_nwait--;
8249 ASSERT(ptgt->m_t_nwait >= 0);
8250 }
8251 }
8252 return (cmd);
8253 }
8254
8255 /*
8256 * remove specified cmd from the middle of the wait queue.
8257 */
8258 static void
8259 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8260 {
8261 mptsas_cmd_t *prevp = mpt->m_waitq;
8262 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8263
8264 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8265 (void *)mpt, (void *)cmd));
8266 if (ptgt) {
8267 ptgt->m_t_nwait--;
8268 ASSERT(ptgt->m_t_nwait >= 0);
8269 }
8270
8271 if (prevp == cmd) {
8272 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8273 mpt->m_waitqtail = &mpt->m_waitq;
8274
8275 cmd->cmd_linkp = NULL;
8276 cmd->cmd_queued = FALSE;
8277 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8278 (void *)mpt, (void *)cmd));
8279 return;
8280 }
8281
8282 while (prevp != NULL) {
8283 if (prevp->cmd_linkp == cmd) {
8284 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8285 mpt->m_waitqtail = &prevp->cmd_linkp;
8286
8287 cmd->cmd_linkp = NULL;
8288 cmd->cmd_queued = FALSE;
8289 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8290 (void *)mpt, (void *)cmd));
8291 return;
8292 }
8293 prevp = prevp->cmd_linkp;
8294 }
8295 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8296 }
8297
8298 static mptsas_cmd_t *
8299 mptsas_tx_waitq_rm(mptsas_t *mpt)
8300 {
8301 mptsas_cmd_t *cmd;
8302 NDBG7(("mptsas_tx_waitq_rm"));
8303
8304 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8305
8306 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8307
8308 return (cmd);
8309 }
8310
8311 /*
8312 * remove specified cmd from the middle of the tx_waitq.
8313 */
8314 static void
8315 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8316 {
8317 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8318
8319 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8320 (void *)mpt, (void *)cmd));
8321
8322 if (prevp == cmd) {
8323 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8324 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8325
8326 cmd->cmd_linkp = NULL;
8327 cmd->cmd_queued = FALSE;
8328 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8329 (void *)mpt, (void *)cmd));
8330 return;
8331 }
8332
8333 while (prevp != NULL) {
8334 if (prevp->cmd_linkp == cmd) {
8335 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8336 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8337
8338 cmd->cmd_linkp = NULL;
8339 cmd->cmd_queued = FALSE;
8340 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8341 (void *)mpt, (void *)cmd));
8342 return;
8343 }
8344 prevp = prevp->cmd_linkp;
8345 }
8346 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8347 }
8348
8349 /*
8350 * device and bus reset handling
8351 *
8352 * Notes:
8353 * - RESET_ALL: reset the controller
8354 * - RESET_TARGET: reset the target specified in scsi_address
8355 */
8356 static int
8357 mptsas_scsi_reset(struct scsi_address *ap, int level)
8358 {
8359 mptsas_t *mpt = ADDR2MPT(ap);
8360 int rval;
8361 mptsas_tgt_private_t *tgt_private;
8362 mptsas_target_t *ptgt = NULL;
8363
8364 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8365 ptgt = tgt_private->t_private;
8366 if (ptgt == NULL) {
8367 return (FALSE);
8368 }
8369 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8370 level));
8371
8372 mutex_enter(&mpt->m_mutex);
8373 /*
8374 * if we are not in panic set up a reset delay for this target
8375 */
8376 if (!ddi_in_panic()) {
8377 mptsas_setup_bus_reset_delay(mpt);
8378 } else {
8379 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8380 }
8381 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8382 mutex_exit(&mpt->m_mutex);
8383
8384 /*
8385 * The transport layer expect to only see TRUE and
8386 * FALSE. Therefore, we will adjust the return value
8387 * if mptsas_do_scsi_reset returns FAILED.
8388 */
8389 if (rval == FAILED)
8390 rval = FALSE;
8391 return (rval);
8392 }
8393
8394 static int
8395 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8396 {
8397 int rval = FALSE;
8398 uint8_t config, disk;
8399 mptsas_slots_t *slots = mpt->m_active;
8400
8401 ASSERT(mutex_owned(&mpt->m_mutex));
8402
8403 if (mptsas_debug_resets) {
8404 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8405 devhdl);
8406 }
8407
8408 /*
8409 * Issue a Target Reset message to the target specified but not to a
8410 * disk making up a raid volume. Just look through the RAID config
8411 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8412 * list, then don't reset this target.
8413 */
8414 for (config = 0; config < slots->m_num_raid_configs; config++) {
8415 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8416 if (devhdl == slots->m_raidconfig[config].
8417 m_physdisk_devhdl[disk]) {
8418 return (TRUE);
8419 }
8420 }
8421 }
8422
8423 rval = mptsas_ioc_task_management(mpt,
8424 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8425
8426 mptsas_doneq_empty(mpt);
8427 return (rval);
8428 }
8429
8430 static int
8431 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8432 void (*callback)(caddr_t), caddr_t arg)
8433 {
8434 mptsas_t *mpt = ADDR2MPT(ap);
8435
8436 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8437
8438 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8439 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8440 }
8441
8442 static int
8443 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8444 {
8445 dev_info_t *lun_dip = NULL;
8446
8447 ASSERT(sd != NULL);
8448 ASSERT(name != NULL);
8449 lun_dip = sd->sd_dev;
8450 ASSERT(lun_dip != NULL);
8451
8452 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8453 return (1);
8454 } else {
8455 return (0);
8456 }
8457 }
8458
8459 static int
8460 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8461 {
8462 return (mptsas_get_name(sd, name, len));
8463 }
8464
8465 void
8466 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8467 {
8468
8469 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8470
8471 /*
8472 * if the bus is draining/quiesced, no changes to the throttles
8473 * are allowed. Not allowing change of throttles during draining
8474 * limits error recovery but will reduce draining time
8475 *
8476 * all throttles should have been set to HOLD_THROTTLE
8477 */
8478 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8479 return;
8480 }
8481
8482 if (what == HOLD_THROTTLE) {
8483 ptgt->m_t_throttle = HOLD_THROTTLE;
8484 } else if (ptgt->m_reset_delay == 0) {
8485 ptgt->m_t_throttle = what;
8486 }
8487 }
8488
8489 /*
8490 * Clean up from a device reset.
8491 * For the case of target reset, this function clears the waitq of all
8492 * commands for a particular target. For the case of abort task set, this
8493 * function clears the waitq of all commonds for a particular target/lun.
8494 */
8495 static void
8496 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8497 {
8498 mptsas_slots_t *slots = mpt->m_active;
8499 mptsas_cmd_t *cmd, *next_cmd;
8500 int slot;
8501 uchar_t reason;
8502 uint_t stat;
8503
8504 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8505
8506 /*
8507 * Make sure the I/O Controller has flushed all cmds
8508 * that are associated with this target for a target reset
8509 * and target/lun for abort task set.
8510 * Account for TM requests, which use the last SMID.
8511 */
8512 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8513 if ((cmd = slots->m_slot[slot]) == NULL)
8514 continue;
8515 reason = CMD_RESET;
8516 stat = STAT_DEV_RESET;
8517 switch (tasktype) {
8518 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8519 if (Tgt(cmd) == target) {
8520 NDBG25(("mptsas_flush_target discovered non-"
8521 "NULL cmd in slot %d, tasktype 0x%x", slot,
8522 tasktype));
8523 mptsas_dump_cmd(mpt, cmd);
8524 mptsas_remove_cmd(mpt, cmd);
8525 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8526 mptsas_doneq_add(mpt, cmd);
8527 }
8528 break;
8529 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8530 reason = CMD_ABORTED;
8531 stat = STAT_ABORTED;
8532 /*FALLTHROUGH*/
8533 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8534 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8535
8536 NDBG25(("mptsas_flush_target discovered non-"
8537 "NULL cmd in slot %d, tasktype 0x%x", slot,
8538 tasktype));
8539 mptsas_dump_cmd(mpt, cmd);
8540 mptsas_remove_cmd(mpt, cmd);
8541 mptsas_set_pkt_reason(mpt, cmd, reason,
8542 stat);
8543 mptsas_doneq_add(mpt, cmd);
8544 }
8545 break;
8546 default:
8547 break;
8548 }
8549 }
8550
8551 /*
8552 * Flush the waitq and tx_waitq of this target's cmds
8553 */
8554 cmd = mpt->m_waitq;
8555
8556 reason = CMD_RESET;
8557 stat = STAT_DEV_RESET;
8558
8559 switch (tasktype) {
8560 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8561 while (cmd != NULL) {
8562 next_cmd = cmd->cmd_linkp;
8563 if (Tgt(cmd) == target) {
8564 mptsas_waitq_delete(mpt, cmd);
8565 mptsas_set_pkt_reason(mpt, cmd,
8566 reason, stat);
8567 mptsas_doneq_add(mpt, cmd);
8568 }
8569 cmd = next_cmd;
8570 }
8571 mutex_enter(&mpt->m_tx_waitq_mutex);
8572 cmd = mpt->m_tx_waitq;
8573 while (cmd != NULL) {
8574 next_cmd = cmd->cmd_linkp;
8575 if (Tgt(cmd) == target) {
8576 mptsas_tx_waitq_delete(mpt, cmd);
8577 mutex_exit(&mpt->m_tx_waitq_mutex);
8578 mptsas_set_pkt_reason(mpt, cmd,
8579 reason, stat);
8580 mptsas_doneq_add(mpt, cmd);
8581 mutex_enter(&mpt->m_tx_waitq_mutex);
8582 }
8583 cmd = next_cmd;
8584 }
8585 mutex_exit(&mpt->m_tx_waitq_mutex);
8586 break;
8587 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8588 reason = CMD_ABORTED;
8589 stat = STAT_ABORTED;
8590 /*FALLTHROUGH*/
8591 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8592 while (cmd != NULL) {
8593 next_cmd = cmd->cmd_linkp;
8594 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8595 mptsas_waitq_delete(mpt, cmd);
8596 mptsas_set_pkt_reason(mpt, cmd,
8597 reason, stat);
8598 mptsas_doneq_add(mpt, cmd);
8599 }
8600 cmd = next_cmd;
8601 }
8602 mutex_enter(&mpt->m_tx_waitq_mutex);
8603 cmd = mpt->m_tx_waitq;
8604 while (cmd != NULL) {
8605 next_cmd = cmd->cmd_linkp;
8606 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8607 mptsas_tx_waitq_delete(mpt, cmd);
8608 mutex_exit(&mpt->m_tx_waitq_mutex);
8609 mptsas_set_pkt_reason(mpt, cmd,
8610 reason, stat);
8611 mptsas_doneq_add(mpt, cmd);
8612 mutex_enter(&mpt->m_tx_waitq_mutex);
8613 }
8614 cmd = next_cmd;
8615 }
8616 mutex_exit(&mpt->m_tx_waitq_mutex);
8617 break;
8618 default:
8619 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8620 tasktype);
8621 break;
8622 }
8623 }
8624
8625 /*
8626 * Clean up hba state, abort all outstanding command and commands in waitq
8627 * reset timeout of all targets.
8628 */
8629 static void
8630 mptsas_flush_hba(mptsas_t *mpt)
8631 {
8632 mptsas_slots_t *slots = mpt->m_active;
8633 mptsas_cmd_t *cmd;
8634 int slot;
8635
8636 NDBG25(("mptsas_flush_hba"));
8637
8638 /*
8639 * The I/O Controller should have already sent back
8640 * all commands via the scsi I/O reply frame. Make
8641 * sure all commands have been flushed.
8642 * Account for TM request, which use the last SMID.
8643 */
8644 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8645 if ((cmd = slots->m_slot[slot]) == NULL)
8646 continue;
8647
8648 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8649 /*
8650 * Need to make sure to tell everyone that might be
8651 * waiting on this command that it's going to fail. If
8652 * we get here, this command will never timeout because
8653 * the active command table is going to be re-allocated,
8654 * so there will be nothing to check against a time out.
8655 * Instead, mark the command as failed due to reset.
8656 */
8657 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8658 STAT_BUS_RESET);
8659 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8660 (cmd->cmd_flags & CFLAG_CONFIG) ||
8661 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8662 cmd->cmd_flags |= CFLAG_FINISHED;
8663 cv_broadcast(&mpt->m_passthru_cv);
8664 cv_broadcast(&mpt->m_config_cv);
8665 cv_broadcast(&mpt->m_fw_diag_cv);
8666 }
8667 continue;
8668 }
8669
8670 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8671 slot));
8672 mptsas_dump_cmd(mpt, cmd);
8673
8674 mptsas_remove_cmd(mpt, cmd);
8675 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8676 mptsas_doneq_add(mpt, cmd);
8677 }
8678
8679 /*
8680 * Flush the waitq.
8681 */
8682 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8683 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8684 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8685 (cmd->cmd_flags & CFLAG_CONFIG) ||
8686 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8687 cmd->cmd_flags |= CFLAG_FINISHED;
8688 cv_broadcast(&mpt->m_passthru_cv);
8689 cv_broadcast(&mpt->m_config_cv);
8690 cv_broadcast(&mpt->m_fw_diag_cv);
8691 } else {
8692 mptsas_doneq_add(mpt, cmd);
8693 }
8694 }
8695
8696 /*
8697 * Flush the tx_waitq
8698 */
8699 mutex_enter(&mpt->m_tx_waitq_mutex);
8700 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8701 mutex_exit(&mpt->m_tx_waitq_mutex);
8702 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8703 mptsas_doneq_add(mpt, cmd);
8704 mutex_enter(&mpt->m_tx_waitq_mutex);
8705 }
8706 mutex_exit(&mpt->m_tx_waitq_mutex);
8707 }
8708
8709 /*
8710 * set pkt_reason and OR in pkt_statistics flag
8711 */
8712 static void
8713 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8714 uint_t stat)
8715 {
8716 #ifndef __lock_lint
8717 _NOTE(ARGUNUSED(mpt))
8718 #endif
8719
8720 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8721 (void *)cmd, reason, stat));
8722
8723 if (cmd) {
8724 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8725 cmd->cmd_pkt->pkt_reason = reason;
8726 }
8727 cmd->cmd_pkt->pkt_statistics |= stat;
8728 }
8729 }
8730
8731 static void
8732 mptsas_start_watch_reset_delay()
8733 {
8734 NDBG22(("mptsas_start_watch_reset_delay"));
8735
8736 mutex_enter(&mptsas_global_mutex);
8737 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8738 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8739 drv_usectohz((clock_t)
8740 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8741 ASSERT(mptsas_reset_watch != NULL);
8742 }
8743 mutex_exit(&mptsas_global_mutex);
8744 }
8745
8746 static void
8747 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8748 {
8749 mptsas_target_t *ptgt = NULL;
8750
8751 NDBG22(("mptsas_setup_bus_reset_delay"));
8752 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8753 MPTSAS_HASH_FIRST);
8754 while (ptgt != NULL) {
8755 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8756 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8757
8758 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8759 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8760 }
8761
8762 mptsas_start_watch_reset_delay();
8763 }
8764
8765 /*
8766 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8767 * mpt instance for active reset delays
8768 */
8769 static void
8770 mptsas_watch_reset_delay(void *arg)
8771 {
8772 #ifndef __lock_lint
8773 _NOTE(ARGUNUSED(arg))
8774 #endif
8775
8776 mptsas_t *mpt;
8777 int not_done = 0;
8778
8779 NDBG22(("mptsas_watch_reset_delay"));
8780
8781 mutex_enter(&mptsas_global_mutex);
8782 mptsas_reset_watch = 0;
8783 mutex_exit(&mptsas_global_mutex);
8784 rw_enter(&mptsas_global_rwlock, RW_READER);
8785 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8786 if (mpt->m_tran == 0) {
8787 continue;
8788 }
8789 mutex_enter(&mpt->m_mutex);
8790 not_done += mptsas_watch_reset_delay_subr(mpt);
8791 mutex_exit(&mpt->m_mutex);
8792 }
8793 rw_exit(&mptsas_global_rwlock);
8794
8795 if (not_done) {
8796 mptsas_start_watch_reset_delay();
8797 }
8798 }
8799
8800 static int
8801 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8802 {
8803 int done = 0;
8804 int restart = 0;
8805 mptsas_target_t *ptgt = NULL;
8806
8807 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8808
8809 ASSERT(mutex_owned(&mpt->m_mutex));
8810
8811 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8812 MPTSAS_HASH_FIRST);
8813 while (ptgt != NULL) {
8814 if (ptgt->m_reset_delay != 0) {
8815 ptgt->m_reset_delay -=
8816 MPTSAS_WATCH_RESET_DELAY_TICK;
8817 if (ptgt->m_reset_delay <= 0) {
8818 ptgt->m_reset_delay = 0;
8819 mptsas_set_throttle(mpt, ptgt,
8820 MAX_THROTTLE);
8821 restart++;
8822 } else {
8823 done = -1;
8824 }
8825 }
8826
8827 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8828 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8829 }
8830
8831 if (restart > 0) {
8832 mptsas_restart_hba(mpt);
8833 }
8834 return (done);
8835 }
8836
8837 #ifdef MPTSAS_TEST
8838 static void
8839 mptsas_test_reset(mptsas_t *mpt, int target)
8840 {
8841 mptsas_target_t *ptgt = NULL;
8842
8843 if (mptsas_rtest == target) {
8844 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8845 mptsas_rtest = -1;
8846 }
8847 if (mptsas_rtest == -1) {
8848 NDBG22(("mptsas_test_reset success"));
8849 }
8850 }
8851 }
8852 #endif
8853
8854 /*
8855 * abort handling:
8856 *
8857 * Notes:
8858 * - if pkt is not NULL, abort just that command
8859 * - if pkt is NULL, abort all outstanding commands for target
8860 */
8861 static int
8862 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8863 {
8864 mptsas_t *mpt = ADDR2MPT(ap);
8865 int rval;
8866 mptsas_tgt_private_t *tgt_private;
8867 int target, lun;
8868
8869 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8870 tran_tgt_private;
8871 ASSERT(tgt_private != NULL);
8872 target = tgt_private->t_private->m_devhdl;
8873 lun = tgt_private->t_lun;
8874
8875 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8876
8877 mutex_enter(&mpt->m_mutex);
8878 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8879 mutex_exit(&mpt->m_mutex);
8880 return (rval);
8881 }
8882
8883 static int
8884 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8885 {
8886 mptsas_cmd_t *sp = NULL;
8887 mptsas_slots_t *slots = mpt->m_active;
8888 int rval = FALSE;
8889
8890 ASSERT(mutex_owned(&mpt->m_mutex));
8891
8892 /*
8893 * Abort the command pkt on the target/lun in ap. If pkt is
8894 * NULL, abort all outstanding commands on that target/lun.
8895 * If you can abort them, return 1, else return 0.
8896 * Each packet that's aborted should be sent back to the target
8897 * driver through the callback routine, with pkt_reason set to
8898 * CMD_ABORTED.
8899 *
8900 * abort cmd pkt on HBA hardware; clean out of outstanding
8901 * command lists, etc.
8902 */
8903 if (pkt != NULL) {
8904 /* abort the specified packet */
8905 sp = PKT2CMD(pkt);
8906
8907 if (sp->cmd_queued) {
8908 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8909 (void *)sp));
8910 mptsas_waitq_delete(mpt, sp);
8911 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8912 STAT_ABORTED);
8913 mptsas_doneq_add(mpt, sp);
8914 rval = TRUE;
8915 goto done;
8916 }
8917
8918 /*
8919 * Have mpt firmware abort this command
8920 */
8921
8922 if (slots->m_slot[sp->cmd_slot] != NULL) {
8923 rval = mptsas_ioc_task_management(mpt,
8924 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8925 lun, NULL, 0, 0);
8926
8927 /*
8928 * The transport layer expects only TRUE and FALSE.
8929 * Therefore, if mptsas_ioc_task_management returns
8930 * FAILED we will return FALSE.
8931 */
8932 if (rval == FAILED)
8933 rval = FALSE;
8934 goto done;
8935 }
8936 }
8937
8938 /*
8939 * If pkt is NULL then abort task set
8940 */
8941 rval = mptsas_ioc_task_management(mpt,
8942 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8943
8944 /*
8945 * The transport layer expects only TRUE and FALSE.
8946 * Therefore, if mptsas_ioc_task_management returns
8947 * FAILED we will return FALSE.
8948 */
8949 if (rval == FAILED)
8950 rval = FALSE;
8951
8952 #ifdef MPTSAS_TEST
8953 if (rval && mptsas_test_stop) {
8954 debug_enter("mptsas_do_scsi_abort");
8955 }
8956 #endif
8957
8958 done:
8959 mptsas_doneq_empty(mpt);
8960 return (rval);
8961 }
8962
8963 /*
8964 * capability handling:
8965 * (*tran_getcap). Get the capability named, and return its value.
8966 */
8967 static int
8968 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
8969 {
8970 mptsas_t *mpt = ADDR2MPT(ap);
8971 int ckey;
8972 int rval = FALSE;
8973
8974 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
8975 ap->a_target, cap, tgtonly));
8976
8977 mutex_enter(&mpt->m_mutex);
8978
8979 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
8980 mutex_exit(&mpt->m_mutex);
8981 return (UNDEFINED);
8982 }
8983
8984 switch (ckey) {
8985 case SCSI_CAP_DMA_MAX:
8986 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
8987 break;
8988 case SCSI_CAP_ARQ:
8989 rval = TRUE;
8990 break;
8991 case SCSI_CAP_MSG_OUT:
8992 case SCSI_CAP_PARITY:
8993 case SCSI_CAP_UNTAGGED_QING:
8994 rval = TRUE;
8995 break;
8996 case SCSI_CAP_TAGGED_QING:
8997 rval = TRUE;
8998 break;
8999 case SCSI_CAP_RESET_NOTIFICATION:
9000 rval = TRUE;
9001 break;
9002 case SCSI_CAP_LINKED_CMDS:
9003 rval = FALSE;
9004 break;
9005 case SCSI_CAP_QFULL_RETRIES:
9006 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9007 tran_tgt_private))->t_private->m_qfull_retries;
9008 break;
9009 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9010 rval = drv_hztousec(((mptsas_tgt_private_t *)
9011 (ap->a_hba_tran->tran_tgt_private))->
9012 t_private->m_qfull_retry_interval) / 1000;
9013 break;
9014 case SCSI_CAP_CDB_LEN:
9015 rval = CDB_GROUP4;
9016 break;
9017 case SCSI_CAP_INTERCONNECT_TYPE:
9018 rval = INTERCONNECT_SAS;
9019 break;
9020 case SCSI_CAP_TRAN_LAYER_RETRIES:
9021 if (mpt->m_ioc_capabilities &
9022 MPI2_IOCFACTS_CAPABILITY_TLR)
9023 rval = TRUE;
9024 else
9025 rval = FALSE;
9026 break;
9027 default:
9028 rval = UNDEFINED;
9029 break;
9030 }
9031
9032 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9033
9034 mutex_exit(&mpt->m_mutex);
9035 return (rval);
9036 }
9037
9038 /*
9039 * (*tran_setcap). Set the capability named to the value given.
9040 */
9041 static int
9042 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9043 {
9044 mptsas_t *mpt = ADDR2MPT(ap);
9045 int ckey;
9046 int rval = FALSE;
9047
9048 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9049 ap->a_target, cap, value, tgtonly));
9050
9051 if (!tgtonly) {
9052 return (rval);
9053 }
9054
9055 mutex_enter(&mpt->m_mutex);
9056
9057 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9058 mutex_exit(&mpt->m_mutex);
9059 return (UNDEFINED);
9060 }
9061
9062 switch (ckey) {
9063 case SCSI_CAP_DMA_MAX:
9064 case SCSI_CAP_MSG_OUT:
9065 case SCSI_CAP_PARITY:
9066 case SCSI_CAP_INITIATOR_ID:
9067 case SCSI_CAP_LINKED_CMDS:
9068 case SCSI_CAP_UNTAGGED_QING:
9069 case SCSI_CAP_RESET_NOTIFICATION:
9070 /*
9071 * None of these are settable via
9072 * the capability interface.
9073 */
9074 break;
9075 case SCSI_CAP_ARQ:
9076 /*
9077 * We cannot turn off arq so return false if asked to
9078 */
9079 if (value) {
9080 rval = TRUE;
9081 } else {
9082 rval = FALSE;
9083 }
9084 break;
9085 case SCSI_CAP_TAGGED_QING:
9086 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9087 (ap->a_hba_tran->tran_tgt_private))->t_private,
9088 MAX_THROTTLE);
9089 rval = TRUE;
9090 break;
9091 case SCSI_CAP_QFULL_RETRIES:
9092 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9093 t_private->m_qfull_retries = (uchar_t)value;
9094 rval = TRUE;
9095 break;
9096 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9097 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9098 t_private->m_qfull_retry_interval =
9099 drv_usectohz(value * 1000);
9100 rval = TRUE;
9101 break;
9102 default:
9103 rval = UNDEFINED;
9104 break;
9105 }
9106 mutex_exit(&mpt->m_mutex);
9107 return (rval);
9108 }
9109
9110 /*
9111 * Utility routine for mptsas_ifsetcap/ifgetcap
9112 */
9113 /*ARGSUSED*/
9114 static int
9115 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9116 {
9117 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9118
9119 if (!cap)
9120 return (FALSE);
9121
9122 *cidxp = scsi_hba_lookup_capstr(cap);
9123 return (TRUE);
9124 }
9125
9126 static int
9127 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9128 {
9129 mptsas_slots_t *old_active = mpt->m_active;
9130 mptsas_slots_t *new_active;
9131 size_t size;
9132 int rval = -1, i;
9133
9134 /*
9135 * if there are active commands, then we cannot
9136 * change size of active slots array.
9137 */
9138 ASSERT(mpt->m_ncmds == 0);
9139
9140 size = MPTSAS_SLOTS_SIZE(mpt);
9141 new_active = kmem_zalloc(size, flag);
9142 if (new_active == NULL) {
9143 NDBG1(("new active alloc failed"));
9144 return (rval);
9145 }
9146 /*
9147 * Since SMID 0 is reserved and the TM slot is reserved, the
9148 * number of slots that can be used at any one time is
9149 * m_max_requests - 2.
9150 */
9151 new_active->m_n_slots = (mpt->m_max_requests - 2);
9152 new_active->m_size = size;
9153 new_active->m_tags = 1;
9154 if (old_active) {
9155 new_active->m_tgttbl = old_active->m_tgttbl;
9156 new_active->m_smptbl = old_active->m_smptbl;
9157 new_active->m_num_raid_configs =
9158 old_active->m_num_raid_configs;
9159 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9160 new_active->m_raidconfig[i] =
9161 old_active->m_raidconfig[i];
9162 }
9163 mptsas_free_active_slots(mpt);
9164 }
9165 mpt->m_active = new_active;
9166 rval = 0;
9167
9168 return (rval);
9169 }
9170
9171 static void
9172 mptsas_free_active_slots(mptsas_t *mpt)
9173 {
9174 mptsas_slots_t *active = mpt->m_active;
9175 size_t size;
9176
9177 if (active == NULL)
9178 return;
9179 size = active->m_size;
9180 kmem_free(active, size);
9181 mpt->m_active = NULL;
9182 }
9183
9184 /*
9185 * Error logging, printing, and debug print routines.
9186 */
9187 static char *mptsas_label = "mpt_sas";
9188
9189 /*PRINTFLIKE3*/
9190 void
9191 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9192 {
9193 dev_info_t *dev;
9194 va_list ap;
9195
9196 if (mpt) {
9197 dev = mpt->m_dip;
9198 } else {
9199 dev = 0;
9200 }
9201
9202 mutex_enter(&mptsas_log_mutex);
9203
9204 va_start(ap, fmt);
9205 (void) vsprintf(mptsas_log_buf, fmt, ap);
9206 va_end(ap);
9207
9208 if (level == CE_CONT) {
9209 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9210 } else {
9211 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9212 }
9213
9214 mutex_exit(&mptsas_log_mutex);
9215 }
9216
9217 #ifdef MPTSAS_DEBUG
9218 /*PRINTFLIKE1*/
9219 void
9220 mptsas_printf(char *fmt, ...)
9221 {
9222 dev_info_t *dev = 0;
9223 va_list ap;
9224
9225 mutex_enter(&mptsas_log_mutex);
9226
9227 va_start(ap, fmt);
9228 (void) vsprintf(mptsas_log_buf, fmt, ap);
9229 va_end(ap);
9230
9231 #ifdef PROM_PRINTF
9232 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9233 #else
9234 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9235 #endif
9236 mutex_exit(&mptsas_log_mutex);
9237 }
9238 #endif
9239
9240 /*
9241 * timeout handling
9242 */
9243 static void
9244 mptsas_watch(void *arg)
9245 {
9246 #ifndef __lock_lint
9247 _NOTE(ARGUNUSED(arg))
9248 #endif
9249
9250 mptsas_t *mpt;
9251 uint32_t doorbell;
9252
9253 NDBG30(("mptsas_watch"));
9254
9255 rw_enter(&mptsas_global_rwlock, RW_READER);
9256 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9257
9258 mutex_enter(&mpt->m_mutex);
9259
9260 /* Skip device if not powered on */
9261 if (mpt->m_options & MPTSAS_OPT_PM) {
9262 if (mpt->m_power_level == PM_LEVEL_D0) {
9263 (void) pm_busy_component(mpt->m_dip, 0);
9264 mpt->m_busy = 1;
9265 } else {
9266 mutex_exit(&mpt->m_mutex);
9267 continue;
9268 }
9269 }
9270
9271 /*
9272 * Check if controller is in a FAULT state. If so, reset it.
9273 */
9274 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9275 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9276 doorbell &= MPI2_DOORBELL_DATA_MASK;
9277 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9278 "code: %04x", doorbell);
9279 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9280 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9281 mptsas_log(mpt, CE_WARN, "Reset failed"
9282 "after fault was detected");
9283 }
9284 }
9285
9286 /*
9287 * For now, always call mptsas_watchsubr.
9288 */
9289 mptsas_watchsubr(mpt);
9290
9291 if (mpt->m_options & MPTSAS_OPT_PM) {
9292 mpt->m_busy = 0;
9293 (void) pm_idle_component(mpt->m_dip, 0);
9294 }
9295
9296 mutex_exit(&mpt->m_mutex);
9297 }
9298 rw_exit(&mptsas_global_rwlock);
9299
9300 mutex_enter(&mptsas_global_mutex);
9301 if (mptsas_timeouts_enabled)
9302 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9303 mutex_exit(&mptsas_global_mutex);
9304 }
9305
9306 static void
9307 mptsas_watchsubr(mptsas_t *mpt)
9308 {
9309 int i;
9310 mptsas_cmd_t *cmd;
9311 mptsas_target_t *ptgt = NULL;
9312
9313 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9314
9315 #ifdef MPTSAS_TEST
9316 if (mptsas_enable_untagged) {
9317 mptsas_test_untagged++;
9318 }
9319 #endif
9320
9321 /*
9322 * Check for commands stuck in active slot
9323 * Account for TM requests, which use the last SMID.
9324 */
9325 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9326 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9327 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9328 cmd->cmd_active_timeout -=
9329 mptsas_scsi_watchdog_tick;
9330 if (cmd->cmd_active_timeout <= 0) {
9331 /*
9332 * There seems to be a command stuck
9333 * in the active slot. Drain throttle.
9334 */
9335 mptsas_set_throttle(mpt,
9336 cmd->cmd_tgt_addr,
9337 DRAIN_THROTTLE);
9338 }
9339 }
9340 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9341 (cmd->cmd_flags & CFLAG_CONFIG) ||
9342 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9343 cmd->cmd_active_timeout -=
9344 mptsas_scsi_watchdog_tick;
9345 if (cmd->cmd_active_timeout <= 0) {
9346 /*
9347 * passthrough command timeout
9348 */
9349 cmd->cmd_flags |= (CFLAG_FINISHED |
9350 CFLAG_TIMEOUT);
9351 cv_broadcast(&mpt->m_passthru_cv);
9352 cv_broadcast(&mpt->m_config_cv);
9353 cv_broadcast(&mpt->m_fw_diag_cv);
9354 }
9355 }
9356 }
9357 }
9358
9359 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9360 MPTSAS_HASH_FIRST);
9361 while (ptgt != NULL) {
9362 /*
9363 * If we were draining due to a qfull condition,
9364 * go back to full throttle.
9365 */
9366 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9367 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9368 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9369 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9370 mptsas_restart_hba(mpt);
9371 }
9372
9373 if ((ptgt->m_t_ncmds > 0) &&
9374 (ptgt->m_timebase)) {
9375
9376 if (ptgt->m_timebase <=
9377 mptsas_scsi_watchdog_tick) {
9378 ptgt->m_timebase +=
9379 mptsas_scsi_watchdog_tick;
9380 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9381 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9382 continue;
9383 }
9384
9385 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9386
9387 if (ptgt->m_timeout < 0) {
9388 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9389 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9390 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9391 continue;
9392 }
9393
9394 if ((ptgt->m_timeout) <=
9395 mptsas_scsi_watchdog_tick) {
9396 NDBG23(("pending timeout"));
9397 mptsas_set_throttle(mpt, ptgt,
9398 DRAIN_THROTTLE);
9399 }
9400 }
9401
9402 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9403 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9404 }
9405 }
9406
9407 /*
9408 * timeout recovery
9409 */
9410 static void
9411 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9412 {
9413
9414 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9415 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9416 "Target %d", devhdl);
9417
9418 /*
9419 * If the current target is not the target passed in,
9420 * try to reset that target.
9421 */
9422 NDBG29(("mptsas_cmd_timeout: device reset"));
9423 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9424 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9425 "recovery failed!", devhdl);
9426 }
9427 }
9428
9429 /*
9430 * Device / Hotplug control
9431 */
9432 static int
9433 mptsas_scsi_quiesce(dev_info_t *dip)
9434 {
9435 mptsas_t *mpt;
9436 scsi_hba_tran_t *tran;
9437
9438 tran = ddi_get_driver_private(dip);
9439 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9440 return (-1);
9441
9442 return (mptsas_quiesce_bus(mpt));
9443 }
9444
9445 static int
9446 mptsas_scsi_unquiesce(dev_info_t *dip)
9447 {
9448 mptsas_t *mpt;
9449 scsi_hba_tran_t *tran;
9450
9451 tran = ddi_get_driver_private(dip);
9452 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9453 return (-1);
9454
9455 return (mptsas_unquiesce_bus(mpt));
9456 }
9457
9458 static int
9459 mptsas_quiesce_bus(mptsas_t *mpt)
9460 {
9461 mptsas_target_t *ptgt = NULL;
9462
9463 NDBG28(("mptsas_quiesce_bus"));
9464 mutex_enter(&mpt->m_mutex);
9465
9466 /* Set all the throttles to zero */
9467 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9468 MPTSAS_HASH_FIRST);
9469 while (ptgt != NULL) {
9470 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9471
9472 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9473 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9474 }
9475
9476 /* If there are any outstanding commands in the queue */
9477 if (mpt->m_ncmds) {
9478 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9479 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9480 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9481 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9482 /*
9483 * Quiesce has been interrupted
9484 */
9485 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9486 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9487 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9488 while (ptgt != NULL) {
9489 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9490
9491 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9492 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9493 }
9494 mptsas_restart_hba(mpt);
9495 if (mpt->m_quiesce_timeid != 0) {
9496 timeout_id_t tid = mpt->m_quiesce_timeid;
9497 mpt->m_quiesce_timeid = 0;
9498 mutex_exit(&mpt->m_mutex);
9499 (void) untimeout(tid);
9500 return (-1);
9501 }
9502 mutex_exit(&mpt->m_mutex);
9503 return (-1);
9504 } else {
9505 /* Bus has been quiesced */
9506 ASSERT(mpt->m_quiesce_timeid == 0);
9507 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9508 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9509 mutex_exit(&mpt->m_mutex);
9510 return (0);
9511 }
9512 }
9513 /* Bus was not busy - QUIESCED */
9514 mutex_exit(&mpt->m_mutex);
9515
9516 return (0);
9517 }
9518
9519 static int
9520 mptsas_unquiesce_bus(mptsas_t *mpt)
9521 {
9522 mptsas_target_t *ptgt = NULL;
9523
9524 NDBG28(("mptsas_unquiesce_bus"));
9525 mutex_enter(&mpt->m_mutex);
9526 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9527 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9528 MPTSAS_HASH_FIRST);
9529 while (ptgt != NULL) {
9530 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9531
9532 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9533 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9534 }
9535 mptsas_restart_hba(mpt);
9536 mutex_exit(&mpt->m_mutex);
9537 return (0);
9538 }
9539
9540 static void
9541 mptsas_ncmds_checkdrain(void *arg)
9542 {
9543 mptsas_t *mpt = arg;
9544 mptsas_target_t *ptgt = NULL;
9545
9546 mutex_enter(&mpt->m_mutex);
9547 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9548 mpt->m_quiesce_timeid = 0;
9549 if (mpt->m_ncmds == 0) {
9550 /* Command queue has been drained */
9551 cv_signal(&mpt->m_cv);
9552 } else {
9553 /*
9554 * The throttle may have been reset because
9555 * of a SCSI bus reset
9556 */
9557 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9558 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9559 while (ptgt != NULL) {
9560 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9561
9562 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9563 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9564 }
9565
9566 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9567 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9568 drv_usectohz(1000000)));
9569 }
9570 }
9571 mutex_exit(&mpt->m_mutex);
9572 }
9573
9574 /*ARGSUSED*/
9575 static void
9576 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9577 {
9578 int i;
9579 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9580 char buf[128];
9581
9582 buf[0] = '\0';
9583 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9584 Tgt(cmd), Lun(cmd)));
9585 (void) sprintf(&buf[0], "\tcdb=[");
9586 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9587 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9588 }
9589 (void) sprintf(&buf[strlen(buf)], " ]");
9590 NDBG25(("?%s\n", buf));
9591 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9592 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9593 cmd->cmd_pkt->pkt_state));
9594 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9595 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9596 }
9597
9598 static void
9599 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9600 {
9601 caddr_t memp;
9602 pMPI2RequestHeader_t request_hdrp;
9603 struct scsi_pkt *pkt = cmd->cmd_pkt;
9604 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9605 uint32_t request_size, data_size, dataout_size;
9606 uint32_t direction;
9607 ddi_dma_cookie_t data_cookie;
9608 ddi_dma_cookie_t dataout_cookie;
9609 uint32_t request_desc_low, request_desc_high = 0;
9610 uint32_t i, sense_bufp;
9611 uint8_t desc_type;
9612 uint8_t *request, function;
9613 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9614 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9615
9616 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9617
9618 request = pt->request;
9619 direction = pt->direction;
9620 request_size = pt->request_size;
9621 data_size = pt->data_size;
9622 dataout_size = pt->dataout_size;
9623 data_cookie = pt->data_cookie;
9624 dataout_cookie = pt->dataout_cookie;
9625
9626 /*
9627 * Store the passthrough message in memory location
9628 * corresponding to our slot number
9629 */
9630 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9631 request_hdrp = (pMPI2RequestHeader_t)memp;
9632 bzero(memp, mpt->m_req_frame_size);
9633
9634 for (i = 0; i < request_size; i++) {
9635 bcopy(request + i, memp + i, 1);
9636 }
9637
9638 if (data_size || dataout_size) {
9639 pMpi2SGESimple64_t sgep;
9640 uint32_t sge_flags;
9641
9642 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9643 request_size);
9644 if (dataout_size) {
9645
9646 sge_flags = dataout_size |
9647 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9648 MPI2_SGE_FLAGS_END_OF_BUFFER |
9649 MPI2_SGE_FLAGS_HOST_TO_IOC |
9650 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9651 MPI2_SGE_FLAGS_SHIFT);
9652 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9653 ddi_put32(acc_hdl, &sgep->Address.Low,
9654 (uint32_t)(dataout_cookie.dmac_laddress &
9655 0xffffffffull));
9656 ddi_put32(acc_hdl, &sgep->Address.High,
9657 (uint32_t)(dataout_cookie.dmac_laddress
9658 >> 32));
9659 sgep++;
9660 }
9661 sge_flags = data_size;
9662 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9663 MPI2_SGE_FLAGS_LAST_ELEMENT |
9664 MPI2_SGE_FLAGS_END_OF_BUFFER |
9665 MPI2_SGE_FLAGS_END_OF_LIST |
9666 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9667 MPI2_SGE_FLAGS_SHIFT);
9668 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9669 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9670 MPI2_SGE_FLAGS_SHIFT);
9671 } else {
9672 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9673 MPI2_SGE_FLAGS_SHIFT);
9674 }
9675 ddi_put32(acc_hdl, &sgep->FlagsLength,
9676 sge_flags);
9677 ddi_put32(acc_hdl, &sgep->Address.Low,
9678 (uint32_t)(data_cookie.dmac_laddress &
9679 0xffffffffull));
9680 ddi_put32(acc_hdl, &sgep->Address.High,
9681 (uint32_t)(data_cookie.dmac_laddress >> 32));
9682 }
9683
9684 function = request_hdrp->Function;
9685 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9686 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9687 pMpi2SCSIIORequest_t scsi_io_req;
9688
9689 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9690 /*
9691 * Put SGE for data and data_out buffer at the end of
9692 * scsi_io_request message header.(64 bytes in total)
9693 * Following above SGEs, the residual space will be
9694 * used by sense data.
9695 */
9696 ddi_put8(acc_hdl,
9697 &scsi_io_req->SenseBufferLength,
9698 (uint8_t)(request_size - 64));
9699
9700 sense_bufp = mpt->m_req_frame_dma_addr +
9701 (mpt->m_req_frame_size * cmd->cmd_slot);
9702 sense_bufp += 64;
9703 ddi_put32(acc_hdl,
9704 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9705
9706 /*
9707 * Set SGLOffset0 value
9708 */
9709 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9710 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9711
9712 /*
9713 * Setup descriptor info. RAID passthrough must use the
9714 * default request descriptor which is already set, so if this
9715 * is a SCSI IO request, change the descriptor to SCSI IO.
9716 */
9717 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9718 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9719 request_desc_high = (ddi_get16(acc_hdl,
9720 &scsi_io_req->DevHandle) << 16);
9721 }
9722 }
9723
9724 /*
9725 * We must wait till the message has been completed before
9726 * beginning the next message so we wait for this one to
9727 * finish.
9728 */
9729 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9730 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9731 cmd->cmd_rfm = NULL;
9732 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9733 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9734 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9735 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9736 }
9737 }
9738
9739
9740
9741 static int
9742 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9743 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9744 uint32_t data_size, uint32_t direction, uint8_t *dataout,
9745 uint32_t dataout_size, short timeout, int mode)
9746 {
9747 mptsas_pt_request_t pt;
9748 mptsas_dma_alloc_state_t data_dma_state;
9749 mptsas_dma_alloc_state_t dataout_dma_state;
9750 caddr_t memp;
9751 mptsas_cmd_t *cmd = NULL;
9752 struct scsi_pkt *pkt;
9753 uint32_t reply_len = 0, sense_len = 0;
9754 pMPI2RequestHeader_t request_hdrp;
9755 pMPI2RequestHeader_t request_msg;
9756 pMPI2DefaultReply_t reply_msg;
9757 Mpi2SCSIIOReply_t rep_msg;
9758 int i, status = 0, pt_flags = 0, rv = 0;
9759 int rvalue;
9760 uint8_t function;
9761
9762 ASSERT(mutex_owned(&mpt->m_mutex));
9763
9764 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9765 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9766 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9767
9768 mutex_exit(&mpt->m_mutex);
9769 /*
9770 * copy in the request buffer since it could be used by
9771 * another thread when the pt request into waitq
9772 */
9773 if (ddi_copyin(request, request_msg, request_size, mode)) {
9774 mutex_enter(&mpt->m_mutex);
9775 status = EFAULT;
9776 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9777 goto out;
9778 }
9779 mutex_enter(&mpt->m_mutex);
9780
9781 function = request_msg->Function;
9782 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9783 pMpi2SCSITaskManagementRequest_t task;
9784 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9785 mptsas_setup_bus_reset_delay(mpt);
9786 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9787 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9788 mode);
9789
9790 if (rv != TRUE) {
9791 status = EIO;
9792 mptsas_log(mpt, CE_WARN, "task management failed");
9793 }
9794 goto out;
9795 }
9796
9797 if (data_size != 0) {
9798 data_dma_state.size = data_size;
9799 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9800 status = ENOMEM;
9801 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9802 "resource");
9803 goto out;
9804 }
9805 pt_flags |= MPTSAS_DATA_ALLOCATED;
9806 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9807 mutex_exit(&mpt->m_mutex);
9808 for (i = 0; i < data_size; i++) {
9809 if (ddi_copyin(data + i, (uint8_t *)
9810 data_dma_state.memp + i, 1, mode)) {
9811 mutex_enter(&mpt->m_mutex);
9812 status = EFAULT;
9813 mptsas_log(mpt, CE_WARN, "failed to "
9814 "copy read data");
9815 goto out;
9816 }
9817 }
9818 mutex_enter(&mpt->m_mutex);
9819 }
9820 }
9821
9822 if (dataout_size != 0) {
9823 dataout_dma_state.size = dataout_size;
9824 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9825 status = ENOMEM;
9826 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9827 "resource");
9828 goto out;
9829 }
9830 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9831 mutex_exit(&mpt->m_mutex);
9832 for (i = 0; i < dataout_size; i++) {
9833 if (ddi_copyin(dataout + i, (uint8_t *)
9834 dataout_dma_state.memp + i, 1, mode)) {
9835 mutex_enter(&mpt->m_mutex);
9836 mptsas_log(mpt, CE_WARN, "failed to copy out"
9837 " data");
9838 status = EFAULT;
9839 goto out;
9840 }
9841 }
9842 mutex_enter(&mpt->m_mutex);
9843 }
9844
9845 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9846 status = EAGAIN;
9847 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9848 goto out;
9849 }
9850 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9851
9852 bzero((caddr_t)cmd, sizeof (*cmd));
9853 bzero((caddr_t)pkt, scsi_pkt_size());
9854 bzero((caddr_t)&pt, sizeof (pt));
9855
9856 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9857
9858 pt.request = (uint8_t *)request_msg;
9859 pt.direction = direction;
9860 pt.request_size = request_size;
9861 pt.data_size = data_size;
9862 pt.dataout_size = dataout_size;
9863 pt.data_cookie = data_dma_state.cookie;
9864 pt.dataout_cookie = dataout_dma_state.cookie;
9865
9866 /*
9867 * Form a blank cmd/pkt to store the acknowledgement message
9868 */
9869 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9870 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9871 pkt->pkt_ha_private = (opaque_t)&pt;
9872 pkt->pkt_flags = FLAG_HEAD;
9873 pkt->pkt_time = timeout;
9874 cmd->cmd_pkt = pkt;
9875 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9876
9877 /*
9878 * Save the command in a slot
9879 */
9880 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9881 /*
9882 * Once passthru command get slot, set cmd_flags
9883 * CFLAG_PREPARED.
9884 */
9885 cmd->cmd_flags |= CFLAG_PREPARED;
9886 mptsas_start_passthru(mpt, cmd);
9887 } else {
9888 mptsas_waitq_add(mpt, cmd);
9889 }
9890
9891 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9892 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9893 }
9894
9895 if (cmd->cmd_flags & CFLAG_PREPARED) {
9896 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9897 cmd->cmd_slot);
9898 request_hdrp = (pMPI2RequestHeader_t)memp;
9899 }
9900
9901 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9902 status = ETIMEDOUT;
9903 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9904 pt_flags |= MPTSAS_CMD_TIMEOUT;
9905 goto out;
9906 }
9907
9908 if (cmd->cmd_rfm) {
9909 /*
9910 * cmd_rfm is zero means the command reply is a CONTEXT
9911 * reply and no PCI Write to post the free reply SMFA
9912 * because no reply message frame is used.
9913 * cmd_rfm is non-zero means the reply is a ADDRESS
9914 * reply and reply message frame is used.
9915 */
9916 pt_flags |= MPTSAS_ADDRESS_REPLY;
9917 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9918 DDI_DMA_SYNC_FORCPU);
9919 reply_msg = (pMPI2DefaultReply_t)
9920 (mpt->m_reply_frame + (cmd->cmd_rfm -
9921 mpt->m_reply_frame_dma_addr));
9922 }
9923
9924 mptsas_fma_check(mpt, cmd);
9925 if (pkt->pkt_reason == CMD_TRAN_ERR) {
9926 status = EAGAIN;
9927 mptsas_log(mpt, CE_WARN, "passthru fma error");
9928 goto out;
9929 }
9930 if (pkt->pkt_reason == CMD_RESET) {
9931 status = EAGAIN;
9932 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
9933 goto out;
9934 }
9935
9936 if (pkt->pkt_reason == CMD_INCOMPLETE) {
9937 status = EIO;
9938 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
9939 goto out;
9940 }
9941
9942 mutex_exit(&mpt->m_mutex);
9943 if (cmd->cmd_flags & CFLAG_PREPARED) {
9944 function = request_hdrp->Function;
9945 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9946 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9947 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
9948 sense_len = reply_size - reply_len;
9949 } else {
9950 reply_len = reply_size;
9951 sense_len = 0;
9952 }
9953
9954 for (i = 0; i < reply_len; i++) {
9955 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
9956 mode)) {
9957 mutex_enter(&mpt->m_mutex);
9958 status = EFAULT;
9959 mptsas_log(mpt, CE_WARN, "failed to copy out "
9960 "reply data");
9961 goto out;
9962 }
9963 }
9964 for (i = 0; i < sense_len; i++) {
9965 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
9966 reply + reply_len + i, 1, mode)) {
9967 mutex_enter(&mpt->m_mutex);
9968 status = EFAULT;
9969 mptsas_log(mpt, CE_WARN, "failed to copy out "
9970 "sense data");
9971 goto out;
9972 }
9973 }
9974 }
9975
9976 if (data_size) {
9977 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9978 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
9979 DDI_DMA_SYNC_FORCPU);
9980 for (i = 0; i < data_size; i++) {
9981 if (ddi_copyout((uint8_t *)(
9982 data_dma_state.memp + i), data + i, 1,
9983 mode)) {
9984 mutex_enter(&mpt->m_mutex);
9985 status = EFAULT;
9986 mptsas_log(mpt, CE_WARN, "failed to "
9987 "copy out the reply data");
9988 goto out;
9989 }
9990 }
9991 }
9992 }
9993 mutex_enter(&mpt->m_mutex);
9994 out:
9995 /*
9996 * Put the reply frame back on the free queue, increment the free
9997 * index, and write the new index to the free index register. But only
9998 * if this reply is an ADDRESS reply.
9999 */
10000 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10001 ddi_put32(mpt->m_acc_free_queue_hdl,
10002 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10003 cmd->cmd_rfm);
10004 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10005 DDI_DMA_SYNC_FORDEV);
10006 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10007 mpt->m_free_index = 0;
10008 }
10009 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10010 mpt->m_free_index);
10011 }
10012 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10013 mptsas_remove_cmd(mpt, cmd);
10014 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10015 }
10016 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10017 mptsas_return_to_pool(mpt, cmd);
10018 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10019 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10020 DDI_SUCCESS) {
10021 ddi_fm_service_impact(mpt->m_dip,
10022 DDI_SERVICE_UNAFFECTED);
10023 status = EFAULT;
10024 }
10025 mptsas_dma_free(&data_dma_state);
10026 }
10027 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10028 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10029 DDI_SUCCESS) {
10030 ddi_fm_service_impact(mpt->m_dip,
10031 DDI_SERVICE_UNAFFECTED);
10032 status = EFAULT;
10033 }
10034 mptsas_dma_free(&dataout_dma_state);
10035 }
10036 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10037 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10038 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10039 }
10040 }
10041 if (request_msg)
10042 kmem_free(request_msg, request_size);
10043
10044 return (status);
10045 }
10046
10047 static int
10048 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10049 {
10050 /*
10051 * If timeout is 0, set timeout to default of 60 seconds.
10052 */
10053 if (data->Timeout == 0) {
10054 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10055 }
10056
10057 if (((data->DataSize == 0) &&
10058 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10059 ((data->DataSize != 0) &&
10060 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10061 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10062 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10063 (data->DataOutSize != 0))))) {
10064 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10065 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10066 } else {
10067 data->DataOutSize = 0;
10068 }
10069 /*
10070 * Send passthru request messages
10071 */
10072 return (mptsas_do_passthru(mpt,
10073 (uint8_t *)((uintptr_t)data->PtrRequest),
10074 (uint8_t *)((uintptr_t)data->PtrReply),
10075 (uint8_t *)((uintptr_t)data->PtrData),
10076 data->RequestSize, data->ReplySize,
10077 data->DataSize, data->DataDirection,
10078 (uint8_t *)((uintptr_t)data->PtrDataOut),
10079 data->DataOutSize, data->Timeout, mode));
10080 } else {
10081 return (EINVAL);
10082 }
10083 }
10084
10085 static uint8_t
10086 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10087 {
10088 uint8_t index;
10089
10090 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10091 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10092 return (index);
10093 }
10094 }
10095
10096 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10097 }
10098
10099 static void
10100 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10101 {
10102 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10103 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10104 struct scsi_pkt *pkt = cmd->cmd_pkt;
10105 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10106 uint32_t request_desc_low, i;
10107
10108 ASSERT(mutex_owned(&mpt->m_mutex));
10109
10110 /*
10111 * Form the diag message depending on the post or release function.
10112 */
10113 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10114 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10115 (mpt->m_req_frame + (mpt->m_req_frame_size *
10116 cmd->cmd_slot));
10117 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10118 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10119 diag->function);
10120 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10121 diag->pBuffer->buffer_type);
10122 ddi_put8(mpt->m_acc_req_frame_hdl,
10123 &pDiag_post_msg->ExtendedType,
10124 diag->pBuffer->extended_type);
10125 ddi_put32(mpt->m_acc_req_frame_hdl,
10126 &pDiag_post_msg->BufferLength,
10127 diag->pBuffer->buffer_data.size);
10128 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10129 i++) {
10130 ddi_put32(mpt->m_acc_req_frame_hdl,
10131 &pDiag_post_msg->ProductSpecific[i],
10132 diag->pBuffer->product_specific[i]);
10133 }
10134 ddi_put32(mpt->m_acc_req_frame_hdl,
10135 &pDiag_post_msg->BufferAddress.Low,
10136 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10137 & 0xffffffffull));
10138 ddi_put32(mpt->m_acc_req_frame_hdl,
10139 &pDiag_post_msg->BufferAddress.High,
10140 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10141 >> 32));
10142 } else {
10143 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10144 (mpt->m_req_frame + (mpt->m_req_frame_size *
10145 cmd->cmd_slot));
10146 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10147 ddi_put8(mpt->m_acc_req_frame_hdl,
10148 &pDiag_release_msg->Function, diag->function);
10149 ddi_put8(mpt->m_acc_req_frame_hdl,
10150 &pDiag_release_msg->BufferType,
10151 diag->pBuffer->buffer_type);
10152 }
10153
10154 /*
10155 * Send the message
10156 */
10157 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10158 DDI_DMA_SYNC_FORDEV);
10159 request_desc_low = (cmd->cmd_slot << 16) +
10160 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10161 cmd->cmd_rfm = NULL;
10162 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10163 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10164 DDI_SUCCESS) ||
10165 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10166 DDI_SUCCESS)) {
10167 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10168 }
10169 }
10170
10171 static int
10172 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10173 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10174 {
10175 mptsas_diag_request_t diag;
10176 int status, slot_num, post_flags = 0;
10177 mptsas_cmd_t *cmd = NULL;
10178 struct scsi_pkt *pkt;
10179 pMpi2DiagBufferPostReply_t reply;
10180 uint16_t iocstatus;
10181 uint32_t iocloginfo, transfer_length;
10182
10183 /*
10184 * If buffer is not enabled, just leave.
10185 */
10186 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10187 if (!pBuffer->enabled) {
10188 status = DDI_FAILURE;
10189 goto out;
10190 }
10191
10192 /*
10193 * Clear some flags initially.
10194 */
10195 pBuffer->force_release = FALSE;
10196 pBuffer->valid_data = FALSE;
10197 pBuffer->owned_by_firmware = FALSE;
10198
10199 /*
10200 * Get a cmd buffer from the cmd buffer pool
10201 */
10202 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10203 status = DDI_FAILURE;
10204 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10205 goto out;
10206 }
10207 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10208
10209 bzero((caddr_t)cmd, sizeof (*cmd));
10210 bzero((caddr_t)pkt, scsi_pkt_size());
10211
10212 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10213
10214 diag.pBuffer = pBuffer;
10215 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10216
10217 /*
10218 * Form a blank cmd/pkt to store the acknowledgement message
10219 */
10220 pkt->pkt_ha_private = (opaque_t)&diag;
10221 pkt->pkt_flags = FLAG_HEAD;
10222 pkt->pkt_time = 60;
10223 cmd->cmd_pkt = pkt;
10224 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10225
10226 /*
10227 * Save the command in a slot
10228 */
10229 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10230 /*
10231 * Once passthru command get slot, set cmd_flags
10232 * CFLAG_PREPARED.
10233 */
10234 cmd->cmd_flags |= CFLAG_PREPARED;
10235 mptsas_start_diag(mpt, cmd);
10236 } else {
10237 mptsas_waitq_add(mpt, cmd);
10238 }
10239
10240 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10241 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10242 }
10243
10244 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10245 status = DDI_FAILURE;
10246 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10247 goto out;
10248 }
10249
10250 /*
10251 * cmd_rfm points to the reply message if a reply was given. Check the
10252 * IOCStatus to make sure everything went OK with the FW diag request
10253 * and set buffer flags.
10254 */
10255 if (cmd->cmd_rfm) {
10256 post_flags |= MPTSAS_ADDRESS_REPLY;
10257 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10258 DDI_DMA_SYNC_FORCPU);
10259 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10260 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10261
10262 /*
10263 * Get the reply message data
10264 */
10265 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10266 &reply->IOCStatus);
10267 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10268 &reply->IOCLogInfo);
10269 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10270 &reply->TransferLength);
10271
10272 /*
10273 * If post failed quit.
10274 */
10275 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10276 status = DDI_FAILURE;
10277 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10278 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10279 iocloginfo, transfer_length));
10280 goto out;
10281 }
10282
10283 /*
10284 * Post was successful.
10285 */
10286 pBuffer->valid_data = TRUE;
10287 pBuffer->owned_by_firmware = TRUE;
10288 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10289 status = DDI_SUCCESS;
10290 }
10291
10292 out:
10293 /*
10294 * Put the reply frame back on the free queue, increment the free
10295 * index, and write the new index to the free index register. But only
10296 * if this reply is an ADDRESS reply.
10297 */
10298 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10299 ddi_put32(mpt->m_acc_free_queue_hdl,
10300 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10301 cmd->cmd_rfm);
10302 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10303 DDI_DMA_SYNC_FORDEV);
10304 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10305 mpt->m_free_index = 0;
10306 }
10307 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10308 mpt->m_free_index);
10309 }
10310 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10311 mptsas_remove_cmd(mpt, cmd);
10312 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10313 }
10314 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10315 mptsas_return_to_pool(mpt, cmd);
10316 }
10317
10318 return (status);
10319 }
10320
10321 static int
10322 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10323 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10324 uint32_t diag_type)
10325 {
10326 mptsas_diag_request_t diag;
10327 int status, slot_num, rel_flags = 0;
10328 mptsas_cmd_t *cmd = NULL;
10329 struct scsi_pkt *pkt;
10330 pMpi2DiagReleaseReply_t reply;
10331 uint16_t iocstatus;
10332 uint32_t iocloginfo;
10333
10334 /*
10335 * If buffer is not enabled, just leave.
10336 */
10337 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10338 if (!pBuffer->enabled) {
10339 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10340 "by the IOC");
10341 status = DDI_FAILURE;
10342 goto out;
10343 }
10344
10345 /*
10346 * Clear some flags initially.
10347 */
10348 pBuffer->force_release = FALSE;
10349 pBuffer->valid_data = FALSE;
10350 pBuffer->owned_by_firmware = FALSE;
10351
10352 /*
10353 * Get a cmd buffer from the cmd buffer pool
10354 */
10355 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10356 status = DDI_FAILURE;
10357 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10358 "Diag");
10359 goto out;
10360 }
10361 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10362
10363 bzero((caddr_t)cmd, sizeof (*cmd));
10364 bzero((caddr_t)pkt, scsi_pkt_size());
10365
10366 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10367
10368 diag.pBuffer = pBuffer;
10369 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10370
10371 /*
10372 * Form a blank cmd/pkt to store the acknowledgement message
10373 */
10374 pkt->pkt_ha_private = (opaque_t)&diag;
10375 pkt->pkt_flags = FLAG_HEAD;
10376 pkt->pkt_time = 60;
10377 cmd->cmd_pkt = pkt;
10378 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10379
10380 /*
10381 * Save the command in a slot
10382 */
10383 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10384 /*
10385 * Once passthru command get slot, set cmd_flags
10386 * CFLAG_PREPARED.
10387 */
10388 cmd->cmd_flags |= CFLAG_PREPARED;
10389 mptsas_start_diag(mpt, cmd);
10390 } else {
10391 mptsas_waitq_add(mpt, cmd);
10392 }
10393
10394 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10395 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10396 }
10397
10398 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10399 status = DDI_FAILURE;
10400 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10401 goto out;
10402 }
10403
10404 /*
10405 * cmd_rfm points to the reply message if a reply was given. Check the
10406 * IOCStatus to make sure everything went OK with the FW diag request
10407 * and set buffer flags.
10408 */
10409 if (cmd->cmd_rfm) {
10410 rel_flags |= MPTSAS_ADDRESS_REPLY;
10411 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10412 DDI_DMA_SYNC_FORCPU);
10413 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10414 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10415
10416 /*
10417 * Get the reply message data
10418 */
10419 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10420 &reply->IOCStatus);
10421 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10422 &reply->IOCLogInfo);
10423
10424 /*
10425 * If release failed quit.
10426 */
10427 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10428 pBuffer->owned_by_firmware) {
10429 status = DDI_FAILURE;
10430 NDBG13(("release FW Diag Buffer failed: "
10431 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10432 iocloginfo));
10433 goto out;
10434 }
10435
10436 /*
10437 * Release was successful.
10438 */
10439 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10440 status = DDI_SUCCESS;
10441
10442 /*
10443 * If this was for an UNREGISTER diag type command, clear the
10444 * unique ID.
10445 */
10446 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10447 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10448 }
10449 }
10450
10451 out:
10452 /*
10453 * Put the reply frame back on the free queue, increment the free
10454 * index, and write the new index to the free index register. But only
10455 * if this reply is an ADDRESS reply.
10456 */
10457 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10458 ddi_put32(mpt->m_acc_free_queue_hdl,
10459 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10460 cmd->cmd_rfm);
10461 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10462 DDI_DMA_SYNC_FORDEV);
10463 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10464 mpt->m_free_index = 0;
10465 }
10466 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10467 mpt->m_free_index);
10468 }
10469 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10470 mptsas_remove_cmd(mpt, cmd);
10471 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10472 }
10473 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10474 mptsas_return_to_pool(mpt, cmd);
10475 }
10476
10477 return (status);
10478 }
10479
10480 static int
10481 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10482 uint32_t *return_code)
10483 {
10484 mptsas_fw_diagnostic_buffer_t *pBuffer;
10485 uint8_t extended_type, buffer_type, i;
10486 uint32_t buffer_size;
10487 uint32_t unique_id;
10488 int status;
10489
10490 ASSERT(mutex_owned(&mpt->m_mutex));
10491
10492 extended_type = diag_register->ExtendedType;
10493 buffer_type = diag_register->BufferType;
10494 buffer_size = diag_register->RequestedBufferSize;
10495 unique_id = diag_register->UniqueId;
10496
10497 /*
10498 * Check for valid buffer type
10499 */
10500 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10501 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10502 return (DDI_FAILURE);
10503 }
10504
10505 /*
10506 * Get the current buffer and look up the unique ID. The unique ID
10507 * should not be found. If it is, the ID is already in use.
10508 */
10509 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10510 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10511 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10512 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10513 return (DDI_FAILURE);
10514 }
10515
10516 /*
10517 * The buffer's unique ID should not be registered yet, and the given
10518 * unique ID cannot be 0.
10519 */
10520 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10521 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10522 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10523 return (DDI_FAILURE);
10524 }
10525
10526 /*
10527 * If this buffer is already posted as immediate, just change owner.
10528 */
10529 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10530 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10531 pBuffer->immediate = FALSE;
10532 pBuffer->unique_id = unique_id;
10533 return (DDI_SUCCESS);
10534 }
10535
10536 /*
10537 * Post a new buffer after checking if it's enabled. The DMA buffer
10538 * that is allocated will be contiguous (sgl_len = 1).
10539 */
10540 if (!pBuffer->enabled) {
10541 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10542 return (DDI_FAILURE);
10543 }
10544 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10545 pBuffer->buffer_data.size = buffer_size;
10546 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10547 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10548 "diag buffer: size = %d bytes", buffer_size);
10549 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10550 return (DDI_FAILURE);
10551 }
10552
10553 /*
10554 * Copy the given info to the diag buffer and post the buffer.
10555 */
10556 pBuffer->buffer_type = buffer_type;
10557 pBuffer->immediate = FALSE;
10558 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10559 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10560 i++) {
10561 pBuffer->product_specific[i] =
10562 diag_register->ProductSpecific[i];
10563 }
10564 }
10565 pBuffer->extended_type = extended_type;
10566 pBuffer->unique_id = unique_id;
10567 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10568
10569 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10570 DDI_SUCCESS) {
10571 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10572 "mptsas_diag_register.");
10573 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10574 status = DDI_FAILURE;
10575 }
10576
10577 /*
10578 * In case there was a failure, free the DMA buffer.
10579 */
10580 if (status == DDI_FAILURE) {
10581 mptsas_dma_free(&pBuffer->buffer_data);
10582 }
10583
10584 return (status);
10585 }
10586
10587 static int
10588 mptsas_diag_unregister(mptsas_t *mpt,
10589 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10590 {
10591 mptsas_fw_diagnostic_buffer_t *pBuffer;
10592 uint8_t i;
10593 uint32_t unique_id;
10594 int status;
10595
10596 ASSERT(mutex_owned(&mpt->m_mutex));
10597
10598 unique_id = diag_unregister->UniqueId;
10599
10600 /*
10601 * Get the current buffer and look up the unique ID. The unique ID
10602 * should be there.
10603 */
10604 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10605 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10606 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10607 return (DDI_FAILURE);
10608 }
10609
10610 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10611
10612 /*
10613 * Try to release the buffer from FW before freeing it. If release
10614 * fails, don't free the DMA buffer in case FW tries to access it
10615 * later. If buffer is not owned by firmware, can't release it.
10616 */
10617 if (!pBuffer->owned_by_firmware) {
10618 status = DDI_SUCCESS;
10619 } else {
10620 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10621 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10622 }
10623
10624 /*
10625 * At this point, return the current status no matter what happens with
10626 * the DMA buffer.
10627 */
10628 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10629 if (status == DDI_SUCCESS) {
10630 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10631 DDI_SUCCESS) {
10632 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10633 "in mptsas_diag_unregister.");
10634 ddi_fm_service_impact(mpt->m_dip,
10635 DDI_SERVICE_UNAFFECTED);
10636 }
10637 mptsas_dma_free(&pBuffer->buffer_data);
10638 }
10639
10640 return (status);
10641 }
10642
10643 static int
10644 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10645 uint32_t *return_code)
10646 {
10647 mptsas_fw_diagnostic_buffer_t *pBuffer;
10648 uint8_t i;
10649 uint32_t unique_id;
10650
10651 ASSERT(mutex_owned(&mpt->m_mutex));
10652
10653 unique_id = diag_query->UniqueId;
10654
10655 /*
10656 * If ID is valid, query on ID.
10657 * If ID is invalid, query on buffer type.
10658 */
10659 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10660 i = diag_query->BufferType;
10661 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10662 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10663 return (DDI_FAILURE);
10664 }
10665 } else {
10666 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10667 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10668 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10669 return (DDI_FAILURE);
10670 }
10671 }
10672
10673 /*
10674 * Fill query structure with the diag buffer info.
10675 */
10676 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10677 diag_query->BufferType = pBuffer->buffer_type;
10678 diag_query->ExtendedType = pBuffer->extended_type;
10679 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10680 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10681 i++) {
10682 diag_query->ProductSpecific[i] =
10683 pBuffer->product_specific[i];
10684 }
10685 }
10686 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10687 diag_query->DriverAddedBufferSize = 0;
10688 diag_query->UniqueId = pBuffer->unique_id;
10689 diag_query->ApplicationFlags = 0;
10690 diag_query->DiagnosticFlags = 0;
10691
10692 /*
10693 * Set/Clear application flags
10694 */
10695 if (pBuffer->immediate) {
10696 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10697 } else {
10698 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10699 }
10700 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10701 diag_query->ApplicationFlags |=
10702 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10703 } else {
10704 diag_query->ApplicationFlags &=
10705 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10706 }
10707 if (pBuffer->owned_by_firmware) {
10708 diag_query->ApplicationFlags |=
10709 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10710 } else {
10711 diag_query->ApplicationFlags &=
10712 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10713 }
10714
10715 return (DDI_SUCCESS);
10716 }
10717
10718 static int
10719 mptsas_diag_read_buffer(mptsas_t *mpt,
10720 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10721 uint32_t *return_code, int ioctl_mode)
10722 {
10723 mptsas_fw_diagnostic_buffer_t *pBuffer;
10724 uint8_t i, *pData;
10725 uint32_t unique_id, byte;
10726 int status;
10727
10728 ASSERT(mutex_owned(&mpt->m_mutex));
10729
10730 unique_id = diag_read_buffer->UniqueId;
10731
10732 /*
10733 * Get the current buffer and look up the unique ID. The unique ID
10734 * should be there.
10735 */
10736 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10737 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10738 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10739 return (DDI_FAILURE);
10740 }
10741
10742 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10743
10744 /*
10745 * Make sure requested read is within limits
10746 */
10747 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10748 pBuffer->buffer_data.size) {
10749 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10750 return (DDI_FAILURE);
10751 }
10752
10753 /*
10754 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10755 * buffer that was allocated is one contiguous buffer.
10756 */
10757 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10758 diag_read_buffer->StartingOffset);
10759 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10760 DDI_DMA_SYNC_FORCPU);
10761 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10762 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10763 != 0) {
10764 return (DDI_FAILURE);
10765 }
10766 }
10767 diag_read_buffer->Status = 0;
10768
10769 /*
10770 * Set or clear the Force Release flag.
10771 */
10772 if (pBuffer->force_release) {
10773 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10774 } else {
10775 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10776 }
10777
10778 /*
10779 * If buffer is to be reregistered, make sure it's not already owned by
10780 * firmware first.
10781 */
10782 status = DDI_SUCCESS;
10783 if (!pBuffer->owned_by_firmware) {
10784 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10785 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10786 return_code);
10787 }
10788 }
10789
10790 return (status);
10791 }
10792
10793 static int
10794 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10795 uint32_t *return_code)
10796 {
10797 mptsas_fw_diagnostic_buffer_t *pBuffer;
10798 uint8_t i;
10799 uint32_t unique_id;
10800 int status;
10801
10802 ASSERT(mutex_owned(&mpt->m_mutex));
10803
10804 unique_id = diag_release->UniqueId;
10805
10806 /*
10807 * Get the current buffer and look up the unique ID. The unique ID
10808 * should be there.
10809 */
10810 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10811 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10812 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10813 return (DDI_FAILURE);
10814 }
10815
10816 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10817
10818 /*
10819 * If buffer is not owned by firmware, it's already been released.
10820 */
10821 if (!pBuffer->owned_by_firmware) {
10822 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10823 return (DDI_FAILURE);
10824 }
10825
10826 /*
10827 * Release the buffer.
10828 */
10829 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10830 MPTSAS_FW_DIAG_TYPE_RELEASE);
10831 return (status);
10832 }
10833
10834 static int
10835 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10836 uint32_t length, uint32_t *return_code, int ioctl_mode)
10837 {
10838 mptsas_fw_diag_register_t diag_register;
10839 mptsas_fw_diag_unregister_t diag_unregister;
10840 mptsas_fw_diag_query_t diag_query;
10841 mptsas_diag_read_buffer_t diag_read_buffer;
10842 mptsas_fw_diag_release_t diag_release;
10843 int status = DDI_SUCCESS;
10844 uint32_t original_return_code, read_buf_len;
10845
10846 ASSERT(mutex_owned(&mpt->m_mutex));
10847
10848 original_return_code = *return_code;
10849 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10850
10851 switch (action) {
10852 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10853 if (!length) {
10854 *return_code =
10855 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10856 status = DDI_FAILURE;
10857 break;
10858 }
10859 if (ddi_copyin(diag_action, &diag_register,
10860 sizeof (diag_register), ioctl_mode) != 0) {
10861 return (DDI_FAILURE);
10862 }
10863 status = mptsas_diag_register(mpt, &diag_register,
10864 return_code);
10865 break;
10866
10867 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10868 if (length < sizeof (diag_unregister)) {
10869 *return_code =
10870 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10871 status = DDI_FAILURE;
10872 break;
10873 }
10874 if (ddi_copyin(diag_action, &diag_unregister,
10875 sizeof (diag_unregister), ioctl_mode) != 0) {
10876 return (DDI_FAILURE);
10877 }
10878 status = mptsas_diag_unregister(mpt, &diag_unregister,
10879 return_code);
10880 break;
10881
10882 case MPTSAS_FW_DIAG_TYPE_QUERY:
10883 if (length < sizeof (diag_query)) {
10884 *return_code =
10885 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10886 status = DDI_FAILURE;
10887 break;
10888 }
10889 if (ddi_copyin(diag_action, &diag_query,
10890 sizeof (diag_query), ioctl_mode) != 0) {
10891 return (DDI_FAILURE);
10892 }
10893 status = mptsas_diag_query(mpt, &diag_query,
10894 return_code);
10895 if (status == DDI_SUCCESS) {
10896 if (ddi_copyout(&diag_query, diag_action,
10897 sizeof (diag_query), ioctl_mode) != 0) {
10898 return (DDI_FAILURE);
10899 }
10900 }
10901 break;
10902
10903 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10904 if (ddi_copyin(diag_action, &diag_read_buffer,
10905 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10906 return (DDI_FAILURE);
10907 }
10908 read_buf_len = sizeof (diag_read_buffer) -
10909 sizeof (diag_read_buffer.DataBuffer) +
10910 diag_read_buffer.BytesToRead;
10911 if (length < read_buf_len) {
10912 *return_code =
10913 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10914 status = DDI_FAILURE;
10915 break;
10916 }
10917 status = mptsas_diag_read_buffer(mpt,
10918 &diag_read_buffer, diag_action +
10919 sizeof (diag_read_buffer) - 4, return_code,
10920 ioctl_mode);
10921 if (status == DDI_SUCCESS) {
10922 if (ddi_copyout(&diag_read_buffer, diag_action,
10923 sizeof (diag_read_buffer) - 4, ioctl_mode)
10924 != 0) {
10925 return (DDI_FAILURE);
10926 }
10927 }
10928 break;
10929
10930 case MPTSAS_FW_DIAG_TYPE_RELEASE:
10931 if (length < sizeof (diag_release)) {
10932 *return_code =
10933 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10934 status = DDI_FAILURE;
10935 break;
10936 }
10937 if (ddi_copyin(diag_action, &diag_release,
10938 sizeof (diag_release), ioctl_mode) != 0) {
10939 return (DDI_FAILURE);
10940 }
10941 status = mptsas_diag_release(mpt, &diag_release,
10942 return_code);
10943 break;
10944
10945 default:
10946 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10947 status = DDI_FAILURE;
10948 break;
10949 }
10950
10951 if ((status == DDI_FAILURE) &&
10952 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
10953 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
10954 status = DDI_SUCCESS;
10955 }
10956
10957 return (status);
10958 }
10959
10960 static int
10961 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
10962 {
10963 int status;
10964 mptsas_diag_action_t driver_data;
10965
10966 ASSERT(mutex_owned(&mpt->m_mutex));
10967
10968 /*
10969 * Copy the user data to a driver data buffer.
10970 */
10971 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
10972 mode) == 0) {
10973 /*
10974 * Send diag action request if Action is valid
10975 */
10976 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
10977 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
10978 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
10979 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
10980 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
10981 status = mptsas_do_diag_action(mpt, driver_data.Action,
10982 (void *)(uintptr_t)driver_data.PtrDiagAction,
10983 driver_data.Length, &driver_data.ReturnCode,
10984 mode);
10985 if (status == DDI_SUCCESS) {
10986 if (ddi_copyout(&driver_data.ReturnCode,
10987 &user_data->ReturnCode,
10988 sizeof (user_data->ReturnCode), mode)
10989 != 0) {
10990 status = EFAULT;
10991 } else {
10992 status = 0;
10993 }
10994 } else {
10995 status = EIO;
10996 }
10997 } else {
10998 status = EINVAL;
10999 }
11000 } else {
11001 status = EFAULT;
11002 }
11003
11004 return (status);
11005 }
11006
11007 /*
11008 * This routine handles the "event query" ioctl.
11009 */
11010 static int
11011 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11012 int *rval)
11013 {
11014 int status;
11015 mptsas_event_query_t driverdata;
11016 uint8_t i;
11017
11018 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11019
11020 mutex_enter(&mpt->m_mutex);
11021 for (i = 0; i < 4; i++) {
11022 driverdata.Types[i] = mpt->m_event_mask[i];
11023 }
11024 mutex_exit(&mpt->m_mutex);
11025
11026 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11027 status = EFAULT;
11028 } else {
11029 *rval = MPTIOCTL_STATUS_GOOD;
11030 status = 0;
11031 }
11032
11033 return (status);
11034 }
11035
11036 /*
11037 * This routine handles the "event enable" ioctl.
11038 */
11039 static int
11040 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11041 int *rval)
11042 {
11043 int status;
11044 mptsas_event_enable_t driverdata;
11045 uint8_t i;
11046
11047 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11048 mutex_enter(&mpt->m_mutex);
11049 for (i = 0; i < 4; i++) {
11050 mpt->m_event_mask[i] = driverdata.Types[i];
11051 }
11052 mutex_exit(&mpt->m_mutex);
11053
11054 *rval = MPTIOCTL_STATUS_GOOD;
11055 status = 0;
11056 } else {
11057 status = EFAULT;
11058 }
11059 return (status);
11060 }
11061
11062 /*
11063 * This routine handles the "event report" ioctl.
11064 */
11065 static int
11066 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11067 int *rval)
11068 {
11069 int status;
11070 mptsas_event_report_t driverdata;
11071
11072 mutex_enter(&mpt->m_mutex);
11073
11074 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11075 mode) == 0) {
11076 if (driverdata.Size >= sizeof (mpt->m_events)) {
11077 if (ddi_copyout(mpt->m_events, data->Events,
11078 sizeof (mpt->m_events), mode) != 0) {
11079 status = EFAULT;
11080 } else {
11081 if (driverdata.Size > sizeof (mpt->m_events)) {
11082 driverdata.Size =
11083 sizeof (mpt->m_events);
11084 if (ddi_copyout(&driverdata.Size,
11085 &data->Size,
11086 sizeof (driverdata.Size),
11087 mode) != 0) {
11088 status = EFAULT;
11089 } else {
11090 *rval = MPTIOCTL_STATUS_GOOD;
11091 status = 0;
11092 }
11093 } else {
11094 *rval = MPTIOCTL_STATUS_GOOD;
11095 status = 0;
11096 }
11097 }
11098 } else {
11099 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11100 status = 0;
11101 }
11102 } else {
11103 status = EFAULT;
11104 }
11105
11106 mutex_exit(&mpt->m_mutex);
11107 return (status);
11108 }
11109
11110 static void
11111 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11112 {
11113 int *reg_data;
11114 uint_t reglen;
11115
11116 /*
11117 * Lookup the 'reg' property and extract the other data
11118 */
11119 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11120 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11121 DDI_PROP_SUCCESS) {
11122 /*
11123 * Extract the PCI data from the 'reg' property first DWORD.
11124 * The entry looks like the following:
11125 * First DWORD:
11126 * Bits 0 - 7 8-bit Register number
11127 * Bits 8 - 10 3-bit Function number
11128 * Bits 11 - 15 5-bit Device number
11129 * Bits 16 - 23 8-bit Bus number
11130 * Bits 24 - 25 2-bit Address Space type identifier
11131 *
11132 */
11133 adapter_data->PciInformation.u.bits.BusNumber =
11134 (reg_data[0] & 0x00FF0000) >> 16;
11135 adapter_data->PciInformation.u.bits.DeviceNumber =
11136 (reg_data[0] & 0x0000F800) >> 11;
11137 adapter_data->PciInformation.u.bits.FunctionNumber =
11138 (reg_data[0] & 0x00000700) >> 8;
11139 ddi_prop_free((void *)reg_data);
11140 } else {
11141 /*
11142 * If we can't determine the PCI data then we fill in FF's for
11143 * the data to indicate this.
11144 */
11145 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11146 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11147 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11148 }
11149
11150 /*
11151 * Saved in the mpt->m_fwversion
11152 */
11153 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11154 }
11155
11156 static void
11157 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11158 {
11159 char *driver_verstr = MPTSAS_MOD_STRING;
11160
11161 mptsas_lookup_pci_data(mpt, adapter_data);
11162 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11163 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11164 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11165 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11166 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11167 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11168 adapter_data->BiosVersion = 0;
11169 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11170 }
11171
11172 static void
11173 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11174 {
11175 int *reg_data, i;
11176 uint_t reglen;
11177
11178 /*
11179 * Lookup the 'reg' property and extract the other data
11180 */
11181 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11182 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11183 DDI_PROP_SUCCESS) {
11184 /*
11185 * Extract the PCI data from the 'reg' property first DWORD.
11186 * The entry looks like the following:
11187 * First DWORD:
11188 * Bits 8 - 10 3-bit Function number
11189 * Bits 11 - 15 5-bit Device number
11190 * Bits 16 - 23 8-bit Bus number
11191 */
11192 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11193 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11194 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11195 ddi_prop_free((void *)reg_data);
11196 } else {
11197 /*
11198 * If we can't determine the PCI info then we fill in FF's for
11199 * the data to indicate this.
11200 */
11201 pci_info->BusNumber = 0xFFFFFFFF;
11202 pci_info->DeviceNumber = 0xFF;
11203 pci_info->FunctionNumber = 0xFF;
11204 }
11205
11206 /*
11207 * Now get the interrupt vector and the pci header. The vector can
11208 * only be 0 right now. The header is the first 256 bytes of config
11209 * space.
11210 */
11211 pci_info->InterruptVector = 0;
11212 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11213 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11214 i);
11215 }
11216 }
11217
11218 static int
11219 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11220 {
11221 int status = 0;
11222 mptsas_reg_access_t driverdata;
11223
11224 mutex_enter(&mpt->m_mutex);
11225 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11226 switch (driverdata.Command) {
11227 /*
11228 * IO access is not supported.
11229 */
11230 case REG_IO_READ:
11231 case REG_IO_WRITE:
11232 mptsas_log(mpt, CE_WARN, "IO access is not "
11233 "supported. Use memory access.");
11234 status = EINVAL;
11235 break;
11236
11237 case REG_MEM_READ:
11238 driverdata.RegData = ddi_get32(mpt->m_datap,
11239 (uint32_t *)(void *)mpt->m_reg +
11240 driverdata.RegOffset);
11241 if (ddi_copyout(&driverdata.RegData,
11242 &data->RegData,
11243 sizeof (driverdata.RegData), mode) != 0) {
11244 mptsas_log(mpt, CE_WARN, "Register "
11245 "Read Failed");
11246 status = EFAULT;
11247 }
11248 break;
11249
11250 case REG_MEM_WRITE:
11251 ddi_put32(mpt->m_datap,
11252 (uint32_t *)(void *)mpt->m_reg +
11253 driverdata.RegOffset,
11254 driverdata.RegData);
11255 break;
11256
11257 default:
11258 status = EINVAL;
11259 break;
11260 }
11261 } else {
11262 status = EFAULT;
11263 }
11264
11265 mutex_exit(&mpt->m_mutex);
11266 return (status);
11267 }
11268
11269 static int
11270 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11271 int *rval)
11272 {
11273 int status = 0;
11274 mptsas_t *mpt;
11275 mptsas_update_flash_t flashdata;
11276 mptsas_pass_thru_t passthru_data;
11277 mptsas_adapter_data_t adapter_data;
11278 mptsas_pci_info_t pci_info;
11279 int copylen;
11280
11281 int iport_flag = 0;
11282 dev_info_t *dip = NULL;
11283 mptsas_phymask_t phymask = 0;
11284 struct devctl_iocdata *dcp = NULL;
11285 uint32_t slotstatus = 0;
11286 char *addr = NULL;
11287 mptsas_target_t *ptgt = NULL;
11288
11289 *rval = MPTIOCTL_STATUS_GOOD;
11290 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11291 return (EPERM);
11292 }
11293
11294 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11295 if (mpt == NULL) {
11296 /*
11297 * Called from iport node, get the states
11298 */
11299 iport_flag = 1;
11300 dip = mptsas_get_dip_from_dev(dev, &phymask);
11301 if (dip == NULL) {
11302 return (ENXIO);
11303 }
11304 mpt = DIP2MPT(dip);
11305 }
11306 /* Make sure power level is D0 before accessing registers */
11307 mutex_enter(&mpt->m_mutex);
11308 if (mpt->m_options & MPTSAS_OPT_PM) {
11309 (void) pm_busy_component(mpt->m_dip, 0);
11310 if (mpt->m_power_level != PM_LEVEL_D0) {
11311 mutex_exit(&mpt->m_mutex);
11312 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11313 DDI_SUCCESS) {
11314 mptsas_log(mpt, CE_WARN,
11315 "mptsas%d: mptsas_ioctl: Raise power "
11316 "request failed.", mpt->m_instance);
11317 (void) pm_idle_component(mpt->m_dip, 0);
11318 return (ENXIO);
11319 }
11320 } else {
11321 mutex_exit(&mpt->m_mutex);
11322 }
11323 } else {
11324 mutex_exit(&mpt->m_mutex);
11325 }
11326
11327 if (iport_flag) {
11328 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11329 if (status != 0) {
11330 goto out;
11331 }
11332 /*
11333 * The following code control the OK2RM LED, it doesn't affect
11334 * the ioctl return status.
11335 */
11336 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11337 (cmd == DEVCTL_DEVICE_OFFLINE)) {
11338 if (ndi_dc_allochdl((void *)data, &dcp) !=
11339 NDI_SUCCESS) {
11340 goto out;
11341 }
11342 addr = ndi_dc_getaddr(dcp);
11343 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11344 if (ptgt == NULL) {
11345 NDBG14(("mptsas_ioctl led control: tgt %s not "
11346 "found", addr));
11347 ndi_dc_freehdl(dcp);
11348 goto out;
11349 }
11350 mutex_enter(&mpt->m_mutex);
11351 if (cmd == DEVCTL_DEVICE_ONLINE) {
11352 ptgt->m_tgt_unconfigured = 0;
11353 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11354 ptgt->m_tgt_unconfigured = 1;
11355 }
11356 slotstatus = 0;
11357 #ifdef MPTSAS_GET_LED
11358 /*
11359 * The get led status can't get a valid/reasonable
11360 * state, so ignore the get led status, and write the
11361 * required value directly
11362 */
11363 if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
11364 DDI_SUCCESS) {
11365 NDBG14(("mptsas_ioctl: get LED for tgt %s "
11366 "failed %x", addr, slotstatus));
11367 slotstatus = 0;
11368 }
11369 NDBG14(("mptsas_ioctl: LED status %x for %s",
11370 slotstatus, addr));
11371 #endif
11372 if (cmd == DEVCTL_DEVICE_OFFLINE) {
11373 slotstatus |=
11374 MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11375 } else {
11376 slotstatus &=
11377 ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
11378 }
11379 if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
11380 DDI_SUCCESS) {
11381 NDBG14(("mptsas_ioctl: set LED for tgt %s "
11382 "failed %x", addr, slotstatus));
11383 }
11384 mutex_exit(&mpt->m_mutex);
11385 ndi_dc_freehdl(dcp);
11386 }
11387 goto out;
11388 }
11389 switch (cmd) {
11390 case MPTIOCTL_UPDATE_FLASH:
11391 if (ddi_copyin((void *)data, &flashdata,
11392 sizeof (struct mptsas_update_flash), mode)) {
11393 status = EFAULT;
11394 break;
11395 }
11396
11397 mutex_enter(&mpt->m_mutex);
11398 if (mptsas_update_flash(mpt,
11399 (caddr_t)(long)flashdata.PtrBuffer,
11400 flashdata.ImageSize, flashdata.ImageType, mode)) {
11401 status = EFAULT;
11402 }
11403
11404 /*
11405 * Reset the chip to start using the new
11406 * firmware. Reset if failed also.
11407 */
11408 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11409 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11410 status = EFAULT;
11411 }
11412 mutex_exit(&mpt->m_mutex);
11413 break;
11414 case MPTIOCTL_PASS_THRU:
11415 /*
11416 * The user has requested to pass through a command to
11417 * be executed by the MPT firmware. Call our routine
11418 * which does this. Only allow one passthru IOCTL at
11419 * one time. Other threads will block on
11420 * m_passthru_mutex, which is of adaptive variant.
11421 */
11422 if (ddi_copyin((void *)data, &passthru_data,
11423 sizeof (mptsas_pass_thru_t), mode)) {
11424 status = EFAULT;
11425 break;
11426 }
11427 mutex_enter(&mpt->m_passthru_mutex);
11428 mutex_enter(&mpt->m_mutex);
11429 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11430 mutex_exit(&mpt->m_mutex);
11431 mutex_exit(&mpt->m_passthru_mutex);
11432
11433 break;
11434 case MPTIOCTL_GET_ADAPTER_DATA:
11435 /*
11436 * The user has requested to read adapter data. Call
11437 * our routine which does this.
11438 */
11439 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11440 if (ddi_copyin((void *)data, (void *)&adapter_data,
11441 sizeof (mptsas_adapter_data_t), mode)) {
11442 status = EFAULT;
11443 break;
11444 }
11445 if (adapter_data.StructureLength >=
11446 sizeof (mptsas_adapter_data_t)) {
11447 adapter_data.StructureLength = (uint32_t)
11448 sizeof (mptsas_adapter_data_t);
11449 copylen = sizeof (mptsas_adapter_data_t);
11450 mutex_enter(&mpt->m_mutex);
11451 mptsas_read_adapter_data(mpt, &adapter_data);
11452 mutex_exit(&mpt->m_mutex);
11453 } else {
11454 adapter_data.StructureLength = (uint32_t)
11455 sizeof (mptsas_adapter_data_t);
11456 copylen = sizeof (adapter_data.StructureLength);
11457 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11458 }
11459 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11460 copylen, mode) != 0) {
11461 status = EFAULT;
11462 }
11463 break;
11464 case MPTIOCTL_GET_PCI_INFO:
11465 /*
11466 * The user has requested to read pci info. Call
11467 * our routine which does this.
11468 */
11469 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11470 mutex_enter(&mpt->m_mutex);
11471 mptsas_read_pci_info(mpt, &pci_info);
11472 mutex_exit(&mpt->m_mutex);
11473 if (ddi_copyout((void *)(&pci_info), (void *)data,
11474 sizeof (mptsas_pci_info_t), mode) != 0) {
11475 status = EFAULT;
11476 }
11477 break;
11478 case MPTIOCTL_RESET_ADAPTER:
11479 mutex_enter(&mpt->m_mutex);
11480 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11481 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11482 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11483 "failed");
11484 status = EFAULT;
11485 }
11486 mutex_exit(&mpt->m_mutex);
11487 break;
11488 case MPTIOCTL_DIAG_ACTION:
11489 /*
11490 * The user has done a diag buffer action. Call our
11491 * routine which does this. Only allow one diag action
11492 * at one time.
11493 */
11494 mutex_enter(&mpt->m_mutex);
11495 if (mpt->m_diag_action_in_progress) {
11496 mutex_exit(&mpt->m_mutex);
11497 status = EBUSY;
11498 goto out;
11499 }
11500 mpt->m_diag_action_in_progress = 1;
11501 status = mptsas_diag_action(mpt,
11502 (mptsas_diag_action_t *)data, mode);
11503 mpt->m_diag_action_in_progress = 0;
11504 mutex_exit(&mpt->m_mutex);
11505 break;
11506 case MPTIOCTL_EVENT_QUERY:
11507 /*
11508 * The user has done an event query. Call our routine
11509 * which does this.
11510 */
11511 status = mptsas_event_query(mpt,
11512 (mptsas_event_query_t *)data, mode, rval);
11513 break;
11514 case MPTIOCTL_EVENT_ENABLE:
11515 /*
11516 * The user has done an event enable. Call our routine
11517 * which does this.
11518 */
11519 status = mptsas_event_enable(mpt,
11520 (mptsas_event_enable_t *)data, mode, rval);
11521 break;
11522 case MPTIOCTL_EVENT_REPORT:
11523 /*
11524 * The user has done an event report. Call our routine
11525 * which does this.
11526 */
11527 status = mptsas_event_report(mpt,
11528 (mptsas_event_report_t *)data, mode, rval);
11529 break;
11530 case MPTIOCTL_REG_ACCESS:
11531 /*
11532 * The user has requested register access. Call our
11533 * routine which does this.
11534 */
11535 status = mptsas_reg_access(mpt,
11536 (mptsas_reg_access_t *)data, mode);
11537 break;
11538 default:
11539 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11540 rval);
11541 break;
11542 }
11543
11544 out:
11545 return (status);
11546 }
11547
11548 int
11549 mptsas_restart_ioc(mptsas_t *mpt)
11550 {
11551 int rval = DDI_SUCCESS;
11552 mptsas_target_t *ptgt = NULL;
11553
11554 ASSERT(mutex_owned(&mpt->m_mutex));
11555
11556 /*
11557 * Set a flag telling I/O path that we're processing a reset. This is
11558 * needed because after the reset is complete, the hash table still
11559 * needs to be rebuilt. If I/Os are started before the hash table is
11560 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11561 * so that they can be retried.
11562 */
11563 mpt->m_in_reset = TRUE;
11564
11565 /*
11566 * Set all throttles to HOLD
11567 */
11568 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11569 MPTSAS_HASH_FIRST);
11570 while (ptgt != NULL) {
11571 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11572
11573 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11574 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11575 }
11576
11577 /*
11578 * Disable interrupts
11579 */
11580 MPTSAS_DISABLE_INTR(mpt);
11581
11582 /*
11583 * Abort all commands: outstanding commands, commands in waitq and
11584 * tx_waitq.
11585 */
11586 mptsas_flush_hba(mpt);
11587
11588 /*
11589 * Reinitialize the chip.
11590 */
11591 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11592 rval = DDI_FAILURE;
11593 }
11594
11595 /*
11596 * Enable interrupts again
11597 */
11598 MPTSAS_ENABLE_INTR(mpt);
11599
11600 /*
11601 * If mptsas_init_chip was successful, update the driver data.
11602 */
11603 if (rval == DDI_SUCCESS) {
11604 mptsas_update_driver_data(mpt);
11605 }
11606
11607 /*
11608 * Reset the throttles
11609 */
11610 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11611 MPTSAS_HASH_FIRST);
11612 while (ptgt != NULL) {
11613 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11614
11615 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11616 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11617 }
11618
11619 mptsas_doneq_empty(mpt);
11620 mptsas_restart_hba(mpt);
11621
11622 if (rval != DDI_SUCCESS) {
11623 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11624 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11625 }
11626
11627 /*
11628 * Clear the reset flag so that I/Os can continue.
11629 */
11630 mpt->m_in_reset = FALSE;
11631
11632 return (rval);
11633 }
11634
11635 static int
11636 mptsas_init_chip(mptsas_t *mpt, int first_time)
11637 {
11638 ddi_dma_cookie_t cookie;
11639 uint32_t i;
11640 int rval;
11641
11642 /*
11643 * Check to see if the firmware image is valid
11644 */
11645 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11646 MPI2_DIAG_FLASH_BAD_SIG) {
11647 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11648 goto fail;
11649 }
11650
11651 /*
11652 * Reset the chip
11653 */
11654 rval = mptsas_ioc_reset(mpt, first_time);
11655 if (rval == MPTSAS_RESET_FAIL) {
11656 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11657 goto fail;
11658 }
11659
11660 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11661 goto mur;
11662 }
11663 /*
11664 * Setup configuration space
11665 */
11666 if (mptsas_config_space_init(mpt) == FALSE) {
11667 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11668 "failed!");
11669 goto fail;
11670 }
11671
11672 /*
11673 * IOC facts can change after a diag reset so all buffers that are
11674 * based on these numbers must be de-allocated and re-allocated. Get
11675 * new IOC facts each time chip is initialized.
11676 */
11677 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11678 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11679 goto fail;
11680 }
11681
11682 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11683 goto fail;
11684 }
11685 /*
11686 * Allocate request message frames, reply free queue, reply descriptor
11687 * post queue, and reply message frames using latest IOC facts.
11688 */
11689 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11690 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11691 goto fail;
11692 }
11693 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11694 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11695 goto fail;
11696 }
11697 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11698 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11699 goto fail;
11700 }
11701 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11702 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11703 goto fail;
11704 }
11705
11706 mur:
11707 /*
11708 * Re-Initialize ioc to operational state
11709 */
11710 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11711 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11712 goto fail;
11713 }
11714
11715 mptsas_alloc_reply_args(mpt);
11716
11717 /*
11718 * Initialize reply post index. Reply free index is initialized after
11719 * the next loop.
11720 */
11721 mpt->m_post_index = 0;
11722
11723 /*
11724 * Initialize the Reply Free Queue with the physical addresses of our
11725 * reply frames.
11726 */
11727 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11728 for (i = 0; i < mpt->m_max_replies; i++) {
11729 ddi_put32(mpt->m_acc_free_queue_hdl,
11730 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11731 cookie.dmac_address);
11732 cookie.dmac_address += mpt->m_reply_frame_size;
11733 }
11734 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11735 DDI_DMA_SYNC_FORDEV);
11736
11737 /*
11738 * Initialize the reply free index to one past the last frame on the
11739 * queue. This will signify that the queue is empty to start with.
11740 */
11741 mpt->m_free_index = i;
11742 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11743
11744 /*
11745 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11746 */
11747 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11748 ddi_put64(mpt->m_acc_post_queue_hdl,
11749 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11750 0xFFFFFFFFFFFFFFFF);
11751 }
11752 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11753 DDI_DMA_SYNC_FORDEV);
11754
11755 /*
11756 * Enable ports
11757 */
11758 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11759 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11760 goto fail;
11761 }
11762
11763 /*
11764 * enable events
11765 */
11766 if (mptsas_ioc_enable_event_notification(mpt)) {
11767 goto fail;
11768 }
11769
11770 /*
11771 * We need checks in attach and these.
11772 * chip_init is called in mult. places
11773 */
11774
11775 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11776 DDI_SUCCESS) ||
11777 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11778 DDI_SUCCESS) ||
11779 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11780 DDI_SUCCESS) ||
11781 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11782 DDI_SUCCESS) ||
11783 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11784 DDI_SUCCESS)) {
11785 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11786 goto fail;
11787 }
11788
11789 /* Check all acc handles */
11790 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11791 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11792 DDI_SUCCESS) ||
11793 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11794 DDI_SUCCESS) ||
11795 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11796 DDI_SUCCESS) ||
11797 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11798 DDI_SUCCESS) ||
11799 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11800 DDI_SUCCESS) ||
11801 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11802 DDI_SUCCESS)) {
11803 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11804 goto fail;
11805 }
11806
11807 return (DDI_SUCCESS);
11808
11809 fail:
11810 return (DDI_FAILURE);
11811 }
11812
11813 static int
11814 mptsas_get_pci_cap(mptsas_t *mpt)
11815 {
11816 ushort_t caps_ptr, cap, cap_count;
11817
11818 if (mpt->m_config_handle == NULL)
11819 return (FALSE);
11820 /*
11821 * Check if capabilities list is supported and if so,
11822 * get initial capabilities pointer and clear bits 0,1.
11823 */
11824 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
11825 & PCI_STAT_CAP) {
11826 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11827 PCI_CONF_CAP_PTR), 4);
11828 } else {
11829 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
11830 }
11831
11832 /*
11833 * Walk capabilities if supported.
11834 */
11835 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
11836
11837 /*
11838 * Check that we haven't exceeded the maximum number of
11839 * capabilities and that the pointer is in a valid range.
11840 */
11841 if (++cap_count > 48) {
11842 mptsas_log(mpt, CE_WARN,
11843 "too many device capabilities.\n");
11844 break;
11845 }
11846 if (caps_ptr < 64) {
11847 mptsas_log(mpt, CE_WARN,
11848 "capabilities pointer 0x%x out of range.\n",
11849 caps_ptr);
11850 break;
11851 }
11852
11853 /*
11854 * Get next capability and check that it is valid.
11855 * For now, we only support power management.
11856 */
11857 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
11858 switch (cap) {
11859 case PCI_CAP_ID_PM:
11860 mptsas_log(mpt, CE_NOTE,
11861 "?mptsas%d supports power management.\n",
11862 mpt->m_instance);
11863 mpt->m_options |= MPTSAS_OPT_PM;
11864
11865 /* Save PMCSR offset */
11866 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
11867 break;
11868 /*
11869 * The following capabilities are valid. Any others
11870 * will cause a message to be logged.
11871 */
11872 case PCI_CAP_ID_VPD:
11873 case PCI_CAP_ID_MSI:
11874 case PCI_CAP_ID_PCIX:
11875 case PCI_CAP_ID_PCI_E:
11876 case PCI_CAP_ID_MSI_X:
11877 break;
11878 default:
11879 mptsas_log(mpt, CE_NOTE,
11880 "?mptsas%d unrecognized capability "
11881 "0x%x.\n", mpt->m_instance, cap);
11882 break;
11883 }
11884
11885 /*
11886 * Get next capabilities pointer and clear bits 0,1.
11887 */
11888 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11889 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
11890 }
11891 return (TRUE);
11892 }
11893
11894 static int
11895 mptsas_init_pm(mptsas_t *mpt)
11896 {
11897 char pmc_name[16];
11898 char *pmc[] = {
11899 NULL,
11900 "0=Off (PCI D3 State)",
11901 "3=On (PCI D0 State)",
11902 NULL
11903 };
11904 uint16_t pmcsr_stat;
11905
11906 if (mptsas_get_pci_cap(mpt) == FALSE) {
11907 return (DDI_FAILURE);
11908 }
11909 /*
11910 * If PCI's capability does not support PM, then don't need
11911 * to registe the pm-components
11912 */
11913 if (!(mpt->m_options & MPTSAS_OPT_PM))
11914 return (DDI_SUCCESS);
11915 /*
11916 * If power management is supported by this chip, create
11917 * pm-components property for the power management framework
11918 */
11919 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
11920 pmc[0] = pmc_name;
11921 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
11922 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
11923 mpt->m_options &= ~MPTSAS_OPT_PM;
11924 mptsas_log(mpt, CE_WARN,
11925 "mptsas%d: pm-component property creation failed.",
11926 mpt->m_instance);
11927 return (DDI_FAILURE);
11928 }
11929
11930 /*
11931 * Power on device.
11932 */
11933 (void) pm_busy_component(mpt->m_dip, 0);
11934 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
11935 mpt->m_pmcsr_offset);
11936 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
11937 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
11938 mpt->m_instance);
11939 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
11940 PCI_PMCSR_D0);
11941 }
11942 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
11943 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
11944 return (DDI_FAILURE);
11945 }
11946 mpt->m_power_level = PM_LEVEL_D0;
11947 /*
11948 * Set pm idle delay.
11949 */
11950 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
11951 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
11952
11953 return (DDI_SUCCESS);
11954 }
11955
11956 static int
11957 mptsas_register_intrs(mptsas_t *mpt)
11958 {
11959 dev_info_t *dip;
11960 int intr_types;
11961
11962 dip = mpt->m_dip;
11963
11964 /* Get supported interrupt types */
11965 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
11966 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
11967 "failed\n");
11968 return (FALSE);
11969 }
11970
11971 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
11972
11973 /*
11974 * Try MSI, but fall back to FIXED
11975 */
11976 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
11977 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
11978 NDBG0(("Using MSI interrupt type"));
11979 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
11980 return (TRUE);
11981 }
11982 }
11983 if (intr_types & DDI_INTR_TYPE_FIXED) {
11984 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
11985 NDBG0(("Using FIXED interrupt type"));
11986 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
11987 return (TRUE);
11988 } else {
11989 NDBG0(("FIXED interrupt registration failed"));
11990 return (FALSE);
11991 }
11992 }
11993
11994 return (FALSE);
11995 }
11996
11997 static void
11998 mptsas_unregister_intrs(mptsas_t *mpt)
11999 {
12000 mptsas_rem_intrs(mpt);
12001 }
12002
12003 /*
12004 * mptsas_add_intrs:
12005 *
12006 * Register FIXED or MSI interrupts.
12007 */
12008 static int
12009 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12010 {
12011 dev_info_t *dip = mpt->m_dip;
12012 int avail, actual, count = 0;
12013 int i, flag, ret;
12014
12015 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12016
12017 /* Get number of interrupts */
12018 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12019 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12020 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12021 "ret %d count %d\n", ret, count);
12022
12023 return (DDI_FAILURE);
12024 }
12025
12026 /* Get number of available interrupts */
12027 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12028 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12029 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12030 "ret %d avail %d\n", ret, avail);
12031
12032 return (DDI_FAILURE);
12033 }
12034
12035 if (avail < count) {
12036 mptsas_log(mpt, CE_CONT, "!ddi_intr_get_nvail returned %d, "
12037 "navail() returned %d", count, avail);
12038 }
12039
12040 /* Mpt only have one interrupt routine */
12041 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12042 count = 1;
12043 }
12044
12045 /* Allocate an array of interrupt handles */
12046 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12047 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12048
12049 flag = DDI_INTR_ALLOC_NORMAL;
12050
12051 /* call ddi_intr_alloc() */
12052 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12053 count, &actual, flag);
12054
12055 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12056 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12057 ret);
12058 kmem_free(mpt->m_htable, mpt->m_intr_size);
12059 return (DDI_FAILURE);
12060 }
12061
12062 /* use interrupt count returned or abort? */
12063 if (actual < count) {
12064 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12065 count, actual);
12066 }
12067
12068 mpt->m_intr_cnt = actual;
12069
12070 /*
12071 * Get priority for first msi, assume remaining are all the same
12072 */
12073 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12074 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12075 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12076
12077 /* Free already allocated intr */
12078 for (i = 0; i < actual; i++) {
12079 (void) ddi_intr_free(mpt->m_htable[i]);
12080 }
12081
12082 kmem_free(mpt->m_htable, mpt->m_intr_size);
12083 return (DDI_FAILURE);
12084 }
12085
12086 /* Test for high level mutex */
12087 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12088 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12089 "Hi level interrupt not supported\n");
12090
12091 /* Free already allocated intr */
12092 for (i = 0; i < actual; i++) {
12093 (void) ddi_intr_free(mpt->m_htable[i]);
12094 }
12095
12096 kmem_free(mpt->m_htable, mpt->m_intr_size);
12097 return (DDI_FAILURE);
12098 }
12099
12100 /* Call ddi_intr_add_handler() */
12101 for (i = 0; i < actual; i++) {
12102 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12103 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12104 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12105 "failed %d\n", ret);
12106
12107 /* Free already allocated intr */
12108 for (i = 0; i < actual; i++) {
12109 (void) ddi_intr_free(mpt->m_htable[i]);
12110 }
12111
12112 kmem_free(mpt->m_htable, mpt->m_intr_size);
12113 return (DDI_FAILURE);
12114 }
12115 }
12116
12117 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12118 != DDI_SUCCESS) {
12119 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12120
12121 /* Free already allocated intr */
12122 for (i = 0; i < actual; i++) {
12123 (void) ddi_intr_free(mpt->m_htable[i]);
12124 }
12125
12126 kmem_free(mpt->m_htable, mpt->m_intr_size);
12127 return (DDI_FAILURE);
12128 }
12129
12130 /*
12131 * Enable interrupts
12132 */
12133 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12134 /* Call ddi_intr_block_enable() for MSI interrupts */
12135 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12136 } else {
12137 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12138 for (i = 0; i < mpt->m_intr_cnt; i++) {
12139 (void) ddi_intr_enable(mpt->m_htable[i]);
12140 }
12141 }
12142 return (DDI_SUCCESS);
12143 }
12144
12145 /*
12146 * mptsas_rem_intrs:
12147 *
12148 * Unregister FIXED or MSI interrupts
12149 */
12150 static void
12151 mptsas_rem_intrs(mptsas_t *mpt)
12152 {
12153 int i;
12154
12155 NDBG6(("mptsas_rem_intrs"));
12156
12157 /* Disable all interrupts */
12158 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12159 /* Call ddi_intr_block_disable() */
12160 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12161 } else {
12162 for (i = 0; i < mpt->m_intr_cnt; i++) {
12163 (void) ddi_intr_disable(mpt->m_htable[i]);
12164 }
12165 }
12166
12167 /* Call ddi_intr_remove_handler() */
12168 for (i = 0; i < mpt->m_intr_cnt; i++) {
12169 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12170 (void) ddi_intr_free(mpt->m_htable[i]);
12171 }
12172
12173 kmem_free(mpt->m_htable, mpt->m_intr_size);
12174 }
12175
12176 /*
12177 * The IO fault service error handling callback function
12178 */
12179 /*ARGSUSED*/
12180 static int
12181 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12182 {
12183 /*
12184 * as the driver can always deal with an error in any dma or
12185 * access handle, we can just return the fme_status value.
12186 */
12187 pci_ereport_post(dip, err, NULL);
12188 return (err->fme_status);
12189 }
12190
12191 /*
12192 * mptsas_fm_init - initialize fma capabilities and register with IO
12193 * fault services.
12194 */
12195 static void
12196 mptsas_fm_init(mptsas_t *mpt)
12197 {
12198 /*
12199 * Need to change iblock to priority for new MSI intr
12200 */
12201 ddi_iblock_cookie_t fm_ibc;
12202
12203 /* Only register with IO Fault Services if we have some capability */
12204 if (mpt->m_fm_capabilities) {
12205 /* Adjust access and dma attributes for FMA */
12206 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12207 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12208 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12209
12210 /*
12211 * Register capabilities with IO Fault Services.
12212 * mpt->m_fm_capabilities will be updated to indicate
12213 * capabilities actually supported (not requested.)
12214 */
12215 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12216
12217 /*
12218 * Initialize pci ereport capabilities if ereport
12219 * capable (should always be.)
12220 */
12221 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12222 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12223 pci_ereport_setup(mpt->m_dip);
12224 }
12225
12226 /*
12227 * Register error callback if error callback capable.
12228 */
12229 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12230 ddi_fm_handler_register(mpt->m_dip,
12231 mptsas_fm_error_cb, (void *) mpt);
12232 }
12233 }
12234 }
12235
12236 /*
12237 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12238 * fault services.
12239 *
12240 */
12241 static void
12242 mptsas_fm_fini(mptsas_t *mpt)
12243 {
12244 /* Only unregister FMA capabilities if registered */
12245 if (mpt->m_fm_capabilities) {
12246
12247 /*
12248 * Un-register error callback if error callback capable.
12249 */
12250
12251 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12252 ddi_fm_handler_unregister(mpt->m_dip);
12253 }
12254
12255 /*
12256 * Release any resources allocated by pci_ereport_setup()
12257 */
12258
12259 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12260 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12261 pci_ereport_teardown(mpt->m_dip);
12262 }
12263
12264 /* Unregister from IO Fault Services */
12265 ddi_fm_fini(mpt->m_dip);
12266
12267 /* Adjust access and dma attributes for FMA */
12268 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12269 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12270 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12271
12272 }
12273 }
12274
12275 int
12276 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12277 {
12278 ddi_fm_error_t de;
12279
12280 if (handle == NULL)
12281 return (DDI_FAILURE);
12282 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12283 return (de.fme_status);
12284 }
12285
12286 int
12287 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12288 {
12289 ddi_fm_error_t de;
12290
12291 if (handle == NULL)
12292 return (DDI_FAILURE);
12293 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12294 return (de.fme_status);
12295 }
12296
12297 void
12298 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12299 {
12300 uint64_t ena;
12301 char buf[FM_MAX_CLASS];
12302
12303 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12304 ena = fm_ena_generate(0, FM_ENA_FMT1);
12305 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12306 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12307 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12308 }
12309 }
12310
12311 static int
12312 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12313 uint16_t *dev_handle, mptsas_target_t **pptgt)
12314 {
12315 int rval;
12316 uint32_t dev_info;
12317 uint64_t sas_wwn;
12318 mptsas_phymask_t phymask;
12319 uint8_t physport, phynum, config, disk;
12320 mptsas_slots_t *slots = mpt->m_active;
12321 uint64_t devicename;
12322 uint16_t pdev_hdl;
12323 mptsas_target_t *tmp_tgt = NULL;
12324 uint16_t bay_num, enclosure;
12325
12326 ASSERT(*pptgt == NULL);
12327
12328 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12329 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12330 &bay_num, &enclosure);
12331 if (rval != DDI_SUCCESS) {
12332 rval = DEV_INFO_FAIL_PAGE0;
12333 return (rval);
12334 }
12335
12336 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12337 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12338 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12339 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12340 return (rval);
12341 }
12342
12343 /*
12344 * Check if the dev handle is for a Phys Disk. If so, set return value
12345 * and exit. Don't add Phys Disks to hash.
12346 */
12347 for (config = 0; config < slots->m_num_raid_configs; config++) {
12348 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12349 if (*dev_handle == slots->m_raidconfig[config].
12350 m_physdisk_devhdl[disk]) {
12351 rval = DEV_INFO_PHYS_DISK;
12352 return (rval);
12353 }
12354 }
12355 }
12356
12357 /*
12358 * Get SATA Device Name from SAS device page0 for
12359 * sata device, if device name doesn't exist, set m_sas_wwn to
12360 * 0 for direct attached SATA. For the device behind the expander
12361 * we still can use STP address assigned by expander.
12362 */
12363 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12364 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12365 mutex_exit(&mpt->m_mutex);
12366 /* alloc a tmp_tgt to send the cmd */
12367 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12368 KM_SLEEP);
12369 tmp_tgt->m_devhdl = *dev_handle;
12370 tmp_tgt->m_deviceinfo = dev_info;
12371 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12372 tmp_tgt->m_qfull_retry_interval =
12373 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12374 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12375 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12376 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12377 mutex_enter(&mpt->m_mutex);
12378 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12379 sas_wwn = devicename;
12380 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12381 sas_wwn = 0;
12382 }
12383 }
12384
12385 phymask = mptsas_physport_to_phymask(mpt, physport);
12386 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12387 dev_info, phymask, phynum);
12388 if (*pptgt == NULL) {
12389 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12390 "structure!");
12391 rval = DEV_INFO_FAIL_ALLOC;
12392 return (rval);
12393 }
12394 (*pptgt)->m_enclosure = enclosure;
12395 (*pptgt)->m_slot_num = bay_num;
12396 return (DEV_INFO_SUCCESS);
12397 }
12398
12399 uint64_t
12400 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12401 {
12402 uint64_t sata_guid = 0, *pwwn = NULL;
12403 int target = ptgt->m_devhdl;
12404 uchar_t *inq83 = NULL;
12405 int inq83_len = 0xFF;
12406 uchar_t *dblk = NULL;
12407 int inq83_retry = 3;
12408 int rval = DDI_FAILURE;
12409
12410 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12411
12412 inq83_retry:
12413 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12414 inq83_len, NULL, 1);
12415 if (rval != DDI_SUCCESS) {
12416 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12417 "0x83 for target:%x, lun:%x failed!", target, lun);
12418 goto out;
12419 }
12420 /* According to SAT2, the first descriptor is logic unit name */
12421 dblk = &inq83[4];
12422 if ((dblk[1] & 0x30) != 0) {
12423 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12424 goto out;
12425 }
12426 pwwn = (uint64_t *)(void *)(&dblk[4]);
12427 if ((dblk[4] & 0xf0) == 0x50) {
12428 sata_guid = BE_64(*pwwn);
12429 goto out;
12430 } else if (dblk[4] == 'A') {
12431 NDBG20(("SATA drive has no NAA format GUID."));
12432 goto out;
12433 } else {
12434 /* The data is not ready, wait and retry */
12435 inq83_retry--;
12436 if (inq83_retry <= 0) {
12437 goto out;
12438 }
12439 NDBG20(("The GUID is not ready, retry..."));
12440 delay(1 * drv_usectohz(1000000));
12441 goto inq83_retry;
12442 }
12443 out:
12444 kmem_free(inq83, inq83_len);
12445 return (sata_guid);
12446 }
12447
12448 static int
12449 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12450 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12451 {
12452 uchar_t cdb[CDB_GROUP0];
12453 struct scsi_address ap;
12454 struct buf *data_bp = NULL;
12455 int resid = 0;
12456 int ret = DDI_FAILURE;
12457
12458 ASSERT(len <= 0xffff);
12459
12460 ap.a_target = MPTSAS_INVALID_DEVHDL;
12461 ap.a_lun = (uchar_t)(lun);
12462 ap.a_hba_tran = mpt->m_tran;
12463
12464 data_bp = scsi_alloc_consistent_buf(&ap,
12465 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12466 if (data_bp == NULL) {
12467 return (ret);
12468 }
12469 bzero(cdb, CDB_GROUP0);
12470 cdb[0] = SCMD_INQUIRY;
12471 cdb[1] = evpd;
12472 cdb[2] = page;
12473 cdb[3] = (len & 0xff00) >> 8;
12474 cdb[4] = (len & 0x00ff);
12475 cdb[5] = 0;
12476
12477 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12478 &resid);
12479 if (ret == DDI_SUCCESS) {
12480 if (reallen) {
12481 *reallen = len - resid;
12482 }
12483 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12484 }
12485 if (data_bp) {
12486 scsi_free_consistent_buf(data_bp);
12487 }
12488 return (ret);
12489 }
12490
12491 static int
12492 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12493 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12494 int *resid)
12495 {
12496 struct scsi_pkt *pktp = NULL;
12497 scsi_hba_tran_t *tran_clone = NULL;
12498 mptsas_tgt_private_t *tgt_private = NULL;
12499 int ret = DDI_FAILURE;
12500
12501 /*
12502 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12503 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12504 * to simulate the cmds from sd
12505 */
12506 tran_clone = kmem_alloc(
12507 sizeof (scsi_hba_tran_t), KM_SLEEP);
12508 if (tran_clone == NULL) {
12509 goto out;
12510 }
12511 bcopy((caddr_t)mpt->m_tran,
12512 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12513 tgt_private = kmem_alloc(
12514 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12515 if (tgt_private == NULL) {
12516 goto out;
12517 }
12518 tgt_private->t_lun = ap->a_lun;
12519 tgt_private->t_private = ptgt;
12520 tran_clone->tran_tgt_private = tgt_private;
12521 ap->a_hba_tran = tran_clone;
12522
12523 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12524 data_bp, cdblen, sizeof (struct scsi_arq_status),
12525 0, PKT_CONSISTENT, NULL, NULL);
12526 if (pktp == NULL) {
12527 goto out;
12528 }
12529 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12530 pktp->pkt_flags = FLAG_NOPARITY;
12531 if (scsi_poll(pktp) < 0) {
12532 goto out;
12533 }
12534 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12535 goto out;
12536 }
12537 if (resid != NULL) {
12538 *resid = pktp->pkt_resid;
12539 }
12540
12541 ret = DDI_SUCCESS;
12542 out:
12543 if (pktp) {
12544 scsi_destroy_pkt(pktp);
12545 }
12546 if (tran_clone) {
12547 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12548 }
12549 if (tgt_private) {
12550 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12551 }
12552 return (ret);
12553 }
12554 static int
12555 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12556 {
12557 char *cp = NULL;
12558 char *ptr = NULL;
12559 size_t s = 0;
12560 char *wwid_str = NULL;
12561 char *lun_str = NULL;
12562 long lunnum;
12563 long phyid = -1;
12564 int rc = DDI_FAILURE;
12565
12566 ptr = name;
12567 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12568 ptr++;
12569 if ((cp = strchr(ptr, ',')) == NULL) {
12570 return (DDI_FAILURE);
12571 }
12572
12573 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12574 s = (uintptr_t)cp - (uintptr_t)ptr;
12575
12576 bcopy(ptr, wwid_str, s);
12577 wwid_str[s] = '\0';
12578
12579 ptr = ++cp;
12580
12581 if ((cp = strchr(ptr, '\0')) == NULL) {
12582 goto out;
12583 }
12584 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12585 s = (uintptr_t)cp - (uintptr_t)ptr;
12586
12587 bcopy(ptr, lun_str, s);
12588 lun_str[s] = '\0';
12589
12590 if (name[0] == 'p') {
12591 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12592 } else {
12593 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12594 }
12595 if (rc != DDI_SUCCESS)
12596 goto out;
12597
12598 if (phyid != -1) {
12599 ASSERT(phyid < MPTSAS_MAX_PHYS);
12600 *phy = (uint8_t)phyid;
12601 }
12602 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12603 if (rc != 0)
12604 goto out;
12605
12606 *lun = (int)lunnum;
12607 rc = DDI_SUCCESS;
12608 out:
12609 if (wwid_str)
12610 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12611 if (lun_str)
12612 kmem_free(lun_str, SCSI_MAXNAMELEN);
12613
12614 return (rc);
12615 }
12616
12617 /*
12618 * mptsas_parse_smp_name() is to parse sas wwn string
12619 * which format is "wWWN"
12620 */
12621 static int
12622 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12623 {
12624 char *ptr = name;
12625
12626 if (*ptr != 'w') {
12627 return (DDI_FAILURE);
12628 }
12629
12630 ptr++;
12631 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12632 return (DDI_FAILURE);
12633 }
12634 return (DDI_SUCCESS);
12635 }
12636
12637 static int
12638 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12639 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12640 {
12641 int ret = NDI_FAILURE;
12642 int circ = 0;
12643 int circ1 = 0;
12644 mptsas_t *mpt;
12645 char *ptr = NULL;
12646 char *devnm = NULL;
12647 uint64_t wwid = 0;
12648 uint8_t phy = 0xFF;
12649 int lun = 0;
12650 uint_t mflags = flag;
12651 int bconfig = TRUE;
12652
12653 if (scsi_hba_iport_unit_address(pdip) == 0) {
12654 return (DDI_FAILURE);
12655 }
12656
12657 mpt = DIP2MPT(pdip);
12658 if (!mpt) {
12659 return (DDI_FAILURE);
12660 }
12661 /*
12662 * Hold the nexus across the bus_config
12663 */
12664 ndi_devi_enter(scsi_vhci_dip, &circ);
12665 ndi_devi_enter(pdip, &circ1);
12666 switch (op) {
12667 case BUS_CONFIG_ONE:
12668 /* parse wwid/target name out of name given */
12669 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12670 ret = NDI_FAILURE;
12671 break;
12672 }
12673 ptr++;
12674 if (strncmp((char *)arg, "smp", 3) == 0) {
12675 /*
12676 * This is a SMP target device
12677 */
12678 ret = mptsas_parse_smp_name(ptr, &wwid);
12679 if (ret != DDI_SUCCESS) {
12680 ret = NDI_FAILURE;
12681 break;
12682 }
12683 ret = mptsas_config_smp(pdip, wwid, childp);
12684 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12685 /*
12686 * OBP could pass down a non-canonical form
12687 * bootpath without LUN part when LUN is 0.
12688 * So driver need adjust the string.
12689 */
12690 if (strchr(ptr, ',') == NULL) {
12691 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12692 (void) sprintf(devnm, "%s,0", (char *)arg);
12693 ptr = strchr(devnm, '@');
12694 ptr++;
12695 }
12696
12697 /*
12698 * The device path is wWWID format and the device
12699 * is not SMP target device.
12700 */
12701 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12702 if (ret != DDI_SUCCESS) {
12703 ret = NDI_FAILURE;
12704 break;
12705 }
12706 *childp = NULL;
12707 if (ptr[0] == 'w') {
12708 ret = mptsas_config_one_addr(pdip, wwid,
12709 lun, childp);
12710 } else if (ptr[0] == 'p') {
12711 ret = mptsas_config_one_phy(pdip, phy, lun,
12712 childp);
12713 }
12714
12715 /*
12716 * If this is CD/DVD device in OBP path, the
12717 * ndi_busop_bus_config can be skipped as config one
12718 * operation is done above.
12719 */
12720 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12721 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12722 (strncmp((char *)arg, "disk", 4) == 0)) {
12723 bconfig = FALSE;
12724 ndi_hold_devi(*childp);
12725 }
12726 } else {
12727 ret = NDI_FAILURE;
12728 break;
12729 }
12730
12731 /*
12732 * DDI group instructed us to use this flag.
12733 */
12734 mflags |= NDI_MDI_FALLBACK;
12735 break;
12736 case BUS_CONFIG_DRIVER:
12737 case BUS_CONFIG_ALL:
12738 mptsas_config_all(pdip);
12739 ret = NDI_SUCCESS;
12740 break;
12741 }
12742
12743 if ((ret == NDI_SUCCESS) && bconfig) {
12744 ret = ndi_busop_bus_config(pdip, mflags, op,
12745 (devnm == NULL) ? arg : devnm, childp, 0);
12746 }
12747
12748 ndi_devi_exit(pdip, circ1);
12749 ndi_devi_exit(scsi_vhci_dip, circ);
12750 if (devnm != NULL)
12751 kmem_free(devnm, SCSI_MAXNAMELEN);
12752 return (ret);
12753 }
12754
12755 static int
12756 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12757 mptsas_target_t *ptgt)
12758 {
12759 int rval = DDI_FAILURE;
12760 struct scsi_inquiry *sd_inq = NULL;
12761 mptsas_t *mpt = DIP2MPT(pdip);
12762
12763 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12764
12765 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12766 SUN_INQSIZE, 0, (uchar_t)0);
12767
12768 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12769 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12770 } else {
12771 rval = DDI_FAILURE;
12772 }
12773
12774 kmem_free(sd_inq, SUN_INQSIZE);
12775 return (rval);
12776 }
12777
12778 static int
12779 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12780 dev_info_t **lundip)
12781 {
12782 int rval;
12783 mptsas_t *mpt = DIP2MPT(pdip);
12784 int phymask;
12785 mptsas_target_t *ptgt = NULL;
12786
12787 /*
12788 * Get the physical port associated to the iport
12789 */
12790 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12791 "phymask", 0);
12792
12793 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12794 if (ptgt == NULL) {
12795 /*
12796 * didn't match any device by searching
12797 */
12798 return (DDI_FAILURE);
12799 }
12800 /*
12801 * If the LUN already exists and the status is online,
12802 * we just return the pointer to dev_info_t directly.
12803 * For the mdi_pathinfo node, we'll handle it in
12804 * mptsas_create_virt_lun()
12805 * TODO should be also in mptsas_handle_dr
12806 */
12807
12808 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12809 if (*lundip != NULL) {
12810 /*
12811 * TODO Another senario is, we hotplug the same disk
12812 * on the same slot, the devhdl changed, is this
12813 * possible?
12814 * tgt_private->t_private != ptgt
12815 */
12816 if (sasaddr != ptgt->m_sas_wwn) {
12817 /*
12818 * The device has changed although the devhdl is the
12819 * same (Enclosure mapping mode, change drive on the
12820 * same slot)
12821 */
12822 return (DDI_FAILURE);
12823 }
12824 return (DDI_SUCCESS);
12825 }
12826
12827 if (phymask == 0) {
12828 /*
12829 * Configure IR volume
12830 */
12831 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
12832 return (rval);
12833 }
12834 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12835
12836 return (rval);
12837 }
12838
12839 static int
12840 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
12841 dev_info_t **lundip)
12842 {
12843 int rval;
12844 mptsas_t *mpt = DIP2MPT(pdip);
12845 int phymask;
12846 mptsas_target_t *ptgt = NULL;
12847
12848 /*
12849 * Get the physical port associated to the iport
12850 */
12851 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12852 "phymask", 0);
12853
12854 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
12855 if (ptgt == NULL) {
12856 /*
12857 * didn't match any device by searching
12858 */
12859 return (DDI_FAILURE);
12860 }
12861
12862 /*
12863 * If the LUN already exists and the status is online,
12864 * we just return the pointer to dev_info_t directly.
12865 * For the mdi_pathinfo node, we'll handle it in
12866 * mptsas_create_virt_lun().
12867 */
12868
12869 *lundip = mptsas_find_child_phy(pdip, phy);
12870 if (*lundip != NULL) {
12871 return (DDI_SUCCESS);
12872 }
12873
12874 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12875
12876 return (rval);
12877 }
12878
12879 static int
12880 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
12881 uint8_t *lun_addr_type)
12882 {
12883 uint32_t lun_idx = 0;
12884
12885 ASSERT(lun_num != NULL);
12886 ASSERT(lun_addr_type != NULL);
12887
12888 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
12889 /* determine report luns addressing type */
12890 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
12891 /*
12892 * Vendors in the field have been found to be concatenating
12893 * bus/target/lun to equal the complete lun value instead
12894 * of switching to flat space addressing
12895 */
12896 /* 00b - peripheral device addressing method */
12897 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
12898 /* FALLTHRU */
12899 /* 10b - logical unit addressing method */
12900 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
12901 /* FALLTHRU */
12902 /* 01b - flat space addressing method */
12903 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
12904 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
12905 *lun_addr_type = (buf[lun_idx] &
12906 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
12907 *lun_num = (buf[lun_idx] & 0x3F) << 8;
12908 *lun_num |= buf[lun_idx + 1];
12909 return (DDI_SUCCESS);
12910 default:
12911 return (DDI_FAILURE);
12912 }
12913 }
12914
12915 static int
12916 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
12917 {
12918 struct buf *repluns_bp = NULL;
12919 struct scsi_address ap;
12920 uchar_t cdb[CDB_GROUP5];
12921 int ret = DDI_FAILURE;
12922 int retry = 0;
12923 int lun_list_len = 0;
12924 uint16_t lun_num = 0;
12925 uint8_t lun_addr_type = 0;
12926 uint32_t lun_cnt = 0;
12927 uint32_t lun_total = 0;
12928 dev_info_t *cdip = NULL;
12929 uint16_t *saved_repluns = NULL;
12930 char *buffer = NULL;
12931 int buf_len = 128;
12932 mptsas_t *mpt = DIP2MPT(pdip);
12933 uint64_t sas_wwn = 0;
12934 uint8_t phy = 0xFF;
12935 uint32_t dev_info = 0;
12936
12937 mutex_enter(&mpt->m_mutex);
12938 sas_wwn = ptgt->m_sas_wwn;
12939 phy = ptgt->m_phynum;
12940 dev_info = ptgt->m_deviceinfo;
12941 mutex_exit(&mpt->m_mutex);
12942
12943 if (sas_wwn == 0) {
12944 /*
12945 * It's a SATA without Device Name
12946 * So don't try multi-LUNs
12947 */
12948 if (mptsas_find_child_phy(pdip, phy)) {
12949 return (DDI_SUCCESS);
12950 } else {
12951 /*
12952 * need configure and create node
12953 */
12954 return (DDI_FAILURE);
12955 }
12956 }
12957
12958 /*
12959 * WWN (SAS address or Device Name exist)
12960 */
12961 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12962 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12963 /*
12964 * SATA device with Device Name
12965 * So don't try multi-LUNs
12966 */
12967 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
12968 return (DDI_SUCCESS);
12969 } else {
12970 return (DDI_FAILURE);
12971 }
12972 }
12973
12974 do {
12975 ap.a_target = MPTSAS_INVALID_DEVHDL;
12976 ap.a_lun = 0;
12977 ap.a_hba_tran = mpt->m_tran;
12978 repluns_bp = scsi_alloc_consistent_buf(&ap,
12979 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
12980 if (repluns_bp == NULL) {
12981 retry++;
12982 continue;
12983 }
12984 bzero(cdb, CDB_GROUP5);
12985 cdb[0] = SCMD_REPORT_LUNS;
12986 cdb[6] = (buf_len & 0xff000000) >> 24;
12987 cdb[7] = (buf_len & 0x00ff0000) >> 16;
12988 cdb[8] = (buf_len & 0x0000ff00) >> 8;
12989 cdb[9] = (buf_len & 0x000000ff);
12990
12991 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
12992 repluns_bp, NULL);
12993 if (ret != DDI_SUCCESS) {
12994 scsi_free_consistent_buf(repluns_bp);
12995 retry++;
12996 continue;
12997 }
12998 lun_list_len = BE_32(*(int *)((void *)(
12999 repluns_bp->b_un.b_addr)));
13000 if (buf_len >= lun_list_len + 8) {
13001 ret = DDI_SUCCESS;
13002 break;
13003 }
13004 scsi_free_consistent_buf(repluns_bp);
13005 buf_len = lun_list_len + 8;
13006
13007 } while (retry < 3);
13008
13009 if (ret != DDI_SUCCESS)
13010 return (ret);
13011 buffer = (char *)repluns_bp->b_un.b_addr;
13012 /*
13013 * find out the number of luns returned by the SCSI ReportLun call
13014 * and allocate buffer space
13015 */
13016 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13017 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13018 if (saved_repluns == NULL) {
13019 scsi_free_consistent_buf(repluns_bp);
13020 return (DDI_FAILURE);
13021 }
13022 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13023 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13024 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13025 continue;
13026 }
13027 saved_repluns[lun_cnt] = lun_num;
13028 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13029 ret = DDI_SUCCESS;
13030 else
13031 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13032 ptgt);
13033 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13034 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13035 MPTSAS_DEV_GONE);
13036 }
13037 }
13038 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13039 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13040 scsi_free_consistent_buf(repluns_bp);
13041 return (DDI_SUCCESS);
13042 }
13043
13044 static int
13045 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13046 {
13047 int rval = DDI_FAILURE;
13048 struct scsi_inquiry *sd_inq = NULL;
13049 mptsas_t *mpt = DIP2MPT(pdip);
13050 mptsas_target_t *ptgt = NULL;
13051
13052 mutex_enter(&mpt->m_mutex);
13053 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13054 mutex_exit(&mpt->m_mutex);
13055 if (ptgt == NULL) {
13056 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13057 "not found.", target);
13058 return (rval);
13059 }
13060
13061 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13062 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13063 SUN_INQSIZE, 0, (uchar_t)0);
13064
13065 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13066 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13067 0);
13068 } else {
13069 rval = DDI_FAILURE;
13070 }
13071
13072 kmem_free(sd_inq, SUN_INQSIZE);
13073 return (rval);
13074 }
13075
13076 /*
13077 * configure all RAID volumes for virtual iport
13078 */
13079 static void
13080 mptsas_config_all_viport(dev_info_t *pdip)
13081 {
13082 mptsas_t *mpt = DIP2MPT(pdip);
13083 int config, vol;
13084 int target;
13085 dev_info_t *lundip = NULL;
13086 mptsas_slots_t *slots = mpt->m_active;
13087
13088 /*
13089 * Get latest RAID info and search for any Volume DevHandles. If any
13090 * are found, configure the volume.
13091 */
13092 mutex_enter(&mpt->m_mutex);
13093 for (config = 0; config < slots->m_num_raid_configs; config++) {
13094 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13095 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13096 == 1) {
13097 target = slots->m_raidconfig[config].
13098 m_raidvol[vol].m_raidhandle;
13099 mutex_exit(&mpt->m_mutex);
13100 (void) mptsas_config_raid(pdip, target,
13101 &lundip);
13102 mutex_enter(&mpt->m_mutex);
13103 }
13104 }
13105 }
13106 mutex_exit(&mpt->m_mutex);
13107 }
13108
13109 static void
13110 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13111 int lun_cnt, mptsas_target_t *ptgt)
13112 {
13113 dev_info_t *child = NULL, *savechild = NULL;
13114 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13115 uint64_t sas_wwn, wwid;
13116 uint8_t phy;
13117 int lun;
13118 int i;
13119 int find;
13120 char *addr;
13121 char *nodename;
13122 mptsas_t *mpt = DIP2MPT(pdip);
13123
13124 mutex_enter(&mpt->m_mutex);
13125 wwid = ptgt->m_sas_wwn;
13126 mutex_exit(&mpt->m_mutex);
13127
13128 child = ddi_get_child(pdip);
13129 while (child) {
13130 find = 0;
13131 savechild = child;
13132 child = ddi_get_next_sibling(child);
13133
13134 nodename = ddi_node_name(savechild);
13135 if (strcmp(nodename, "smp") == 0) {
13136 continue;
13137 }
13138
13139 addr = ddi_get_name_addr(savechild);
13140 if (addr == NULL) {
13141 continue;
13142 }
13143
13144 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13145 DDI_SUCCESS) {
13146 continue;
13147 }
13148
13149 if (wwid == sas_wwn) {
13150 for (i = 0; i < lun_cnt; i++) {
13151 if (repluns[i] == lun) {
13152 find = 1;
13153 break;
13154 }
13155 }
13156 } else {
13157 continue;
13158 }
13159 if (find == 0) {
13160 /*
13161 * The lun has not been there already
13162 */
13163 (void) mptsas_offline_lun(pdip, savechild, NULL,
13164 NDI_DEVI_REMOVE);
13165 }
13166 }
13167
13168 pip = mdi_get_next_client_path(pdip, NULL);
13169 while (pip) {
13170 find = 0;
13171 savepip = pip;
13172 addr = MDI_PI(pip)->pi_addr;
13173
13174 pip = mdi_get_next_client_path(pdip, pip);
13175
13176 if (addr == NULL) {
13177 continue;
13178 }
13179
13180 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13181 &lun) != DDI_SUCCESS) {
13182 continue;
13183 }
13184
13185 if (sas_wwn == wwid) {
13186 for (i = 0; i < lun_cnt; i++) {
13187 if (repluns[i] == lun) {
13188 find = 1;
13189 break;
13190 }
13191 }
13192 } else {
13193 continue;
13194 }
13195
13196 if (find == 0) {
13197 /*
13198 * The lun has not been there already
13199 */
13200 (void) mptsas_offline_lun(pdip, NULL, savepip,
13201 NDI_DEVI_REMOVE);
13202 }
13203 }
13204 }
13205
13206 void
13207 mptsas_update_hashtab(struct mptsas *mpt)
13208 {
13209 uint32_t page_address;
13210 int rval = 0;
13211 uint16_t dev_handle;
13212 mptsas_target_t *ptgt = NULL;
13213 mptsas_smp_t smp_node;
13214
13215 /*
13216 * Get latest RAID info.
13217 */
13218 (void) mptsas_get_raid_info(mpt);
13219
13220 dev_handle = mpt->m_smp_devhdl;
13221 for (; mpt->m_done_traverse_smp == 0; ) {
13222 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13223 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13224 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13225 != DDI_SUCCESS) {
13226 break;
13227 }
13228 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13229 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13230 }
13231
13232 /*
13233 * Config target devices
13234 */
13235 dev_handle = mpt->m_dev_handle;
13236
13237 /*
13238 * Do loop to get sas device page 0 by GetNextHandle till the
13239 * the last handle. If the sas device is a SATA/SSP target,
13240 * we try to config it.
13241 */
13242 for (; mpt->m_done_traverse_dev == 0; ) {
13243 ptgt = NULL;
13244 page_address =
13245 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13246 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13247 (uint32_t)dev_handle;
13248 rval = mptsas_get_target_device_info(mpt, page_address,
13249 &dev_handle, &ptgt);
13250 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13251 (rval == DEV_INFO_FAIL_ALLOC)) {
13252 break;
13253 }
13254
13255 mpt->m_dev_handle = dev_handle;
13256 }
13257
13258 }
13259
13260 void
13261 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13262 {
13263 mptsas_hash_data_t *data;
13264 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13265 while (data != NULL) {
13266 data->devhdl = MPTSAS_INVALID_DEVHDL;
13267 data->device_info = 0;
13268 /*
13269 * For tgttbl, clear dr_flag.
13270 */
13271 data->dr_flag = MPTSAS_DR_INACTIVE;
13272 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13273 }
13274 }
13275
13276 void
13277 mptsas_update_driver_data(struct mptsas *mpt)
13278 {
13279 /*
13280 * TODO after hard reset, update the driver data structures
13281 * 1. update port/phymask mapping table mpt->m_phy_info
13282 * 2. invalid all the entries in hash table
13283 * m_devhdl = 0xffff and m_deviceinfo = 0
13284 * 3. call sas_device_page/expander_page to update hash table
13285 */
13286 mptsas_update_phymask(mpt);
13287 /*
13288 * Invalid the existing entries
13289 */
13290 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13291 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13292 mpt->m_done_traverse_dev = 0;
13293 mpt->m_done_traverse_smp = 0;
13294 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13295 mptsas_update_hashtab(mpt);
13296 }
13297
13298 static void
13299 mptsas_config_all(dev_info_t *pdip)
13300 {
13301 dev_info_t *smpdip = NULL;
13302 mptsas_t *mpt = DIP2MPT(pdip);
13303 int phymask = 0;
13304 mptsas_phymask_t phy_mask;
13305 mptsas_target_t *ptgt = NULL;
13306 mptsas_smp_t *psmp;
13307
13308 /*
13309 * Get the phymask associated to the iport
13310 */
13311 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13312 "phymask", 0);
13313
13314 /*
13315 * Enumerate RAID volumes here (phymask == 0).
13316 */
13317 if (phymask == 0) {
13318 mptsas_config_all_viport(pdip);
13319 return;
13320 }
13321
13322 mutex_enter(&mpt->m_mutex);
13323
13324 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13325 mptsas_update_hashtab(mpt);
13326 }
13327
13328 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13329 MPTSAS_HASH_FIRST);
13330 while (psmp != NULL) {
13331 phy_mask = psmp->m_phymask;
13332 if (phy_mask == phymask) {
13333 smpdip = NULL;
13334 mutex_exit(&mpt->m_mutex);
13335 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13336 mutex_enter(&mpt->m_mutex);
13337 }
13338 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13339 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13340 }
13341
13342 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13343 MPTSAS_HASH_FIRST);
13344 while (ptgt != NULL) {
13345 phy_mask = ptgt->m_phymask;
13346 if (phy_mask == phymask) {
13347 mutex_exit(&mpt->m_mutex);
13348 (void) mptsas_config_target(pdip, ptgt);
13349 mutex_enter(&mpt->m_mutex);
13350 }
13351
13352 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13353 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13354 }
13355 mutex_exit(&mpt->m_mutex);
13356 }
13357
13358 static int
13359 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13360 {
13361 int rval = DDI_FAILURE;
13362 dev_info_t *tdip;
13363
13364 rval = mptsas_config_luns(pdip, ptgt);
13365 if (rval != DDI_SUCCESS) {
13366 /*
13367 * The return value means the SCMD_REPORT_LUNS
13368 * did not execute successfully. The target maybe
13369 * doesn't support such command.
13370 */
13371 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13372 }
13373 return (rval);
13374 }
13375
13376 /*
13377 * Return fail if not all the childs/paths are freed.
13378 * if there is any path under the HBA, the return value will be always fail
13379 * because we didn't call mdi_pi_free for path
13380 */
13381 static int
13382 mptsas_offline_target(dev_info_t *pdip, char *name)
13383 {
13384 dev_info_t *child = NULL, *prechild = NULL;
13385 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13386 int tmp_rval, rval = DDI_SUCCESS;
13387 char *addr, *cp;
13388 size_t s;
13389 mptsas_t *mpt = DIP2MPT(pdip);
13390
13391 child = ddi_get_child(pdip);
13392 while (child) {
13393 addr = ddi_get_name_addr(child);
13394 prechild = child;
13395 child = ddi_get_next_sibling(child);
13396
13397 if (addr == NULL) {
13398 continue;
13399 }
13400 if ((cp = strchr(addr, ',')) == NULL) {
13401 continue;
13402 }
13403
13404 s = (uintptr_t)cp - (uintptr_t)addr;
13405
13406 if (strncmp(addr, name, s) != 0) {
13407 continue;
13408 }
13409
13410 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13411 NDI_DEVI_REMOVE);
13412 if (tmp_rval != DDI_SUCCESS) {
13413 rval = DDI_FAILURE;
13414 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13415 prechild, MPTSAS_DEV_GONE) !=
13416 DDI_PROP_SUCCESS) {
13417 mptsas_log(mpt, CE_WARN, "mptsas driver "
13418 "unable to create property for "
13419 "SAS %s (MPTSAS_DEV_GONE)", addr);
13420 }
13421 }
13422 }
13423
13424 pip = mdi_get_next_client_path(pdip, NULL);
13425 while (pip) {
13426 addr = MDI_PI(pip)->pi_addr;
13427 savepip = pip;
13428 pip = mdi_get_next_client_path(pdip, pip);
13429 if (addr == NULL) {
13430 continue;
13431 }
13432
13433 if ((cp = strchr(addr, ',')) == NULL) {
13434 continue;
13435 }
13436
13437 s = (uintptr_t)cp - (uintptr_t)addr;
13438
13439 if (strncmp(addr, name, s) != 0) {
13440 continue;
13441 }
13442
13443 (void) mptsas_offline_lun(pdip, NULL, savepip,
13444 NDI_DEVI_REMOVE);
13445 /*
13446 * driver will not invoke mdi_pi_free, so path will not
13447 * be freed forever, return DDI_FAILURE.
13448 */
13449 rval = DDI_FAILURE;
13450 }
13451 return (rval);
13452 }
13453
13454 static int
13455 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13456 mdi_pathinfo_t *rpip, uint_t flags)
13457 {
13458 int rval = DDI_FAILURE;
13459 char *devname;
13460 dev_info_t *cdip, *parent;
13461
13462 if (rpip != NULL) {
13463 parent = scsi_vhci_dip;
13464 cdip = mdi_pi_get_client(rpip);
13465 } else if (rdip != NULL) {
13466 parent = pdip;
13467 cdip = rdip;
13468 } else {
13469 return (DDI_FAILURE);
13470 }
13471
13472 /*
13473 * Make sure node is attached otherwise
13474 * it won't have related cache nodes to
13475 * clean up. i_ddi_devi_attached is
13476 * similiar to i_ddi_node_state(cdip) >=
13477 * DS_ATTACHED.
13478 */
13479 if (i_ddi_devi_attached(cdip)) {
13480
13481 /* Get full devname */
13482 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13483 (void) ddi_deviname(cdip, devname);
13484 /* Clean cache */
13485 (void) devfs_clean(parent, devname + 1,
13486 DV_CLEAN_FORCE);
13487 kmem_free(devname, MAXNAMELEN + 1);
13488 }
13489 if (rpip != NULL) {
13490 if (MDI_PI_IS_OFFLINE(rpip)) {
13491 rval = DDI_SUCCESS;
13492 } else {
13493 rval = mdi_pi_offline(rpip, 0);
13494 }
13495 } else {
13496 rval = ndi_devi_offline(cdip, flags);
13497 }
13498
13499 return (rval);
13500 }
13501
13502 static dev_info_t *
13503 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13504 {
13505 dev_info_t *child = NULL;
13506 char *smp_wwn = NULL;
13507
13508 child = ddi_get_child(parent);
13509 while (child) {
13510 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13511 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13512 != DDI_SUCCESS) {
13513 child = ddi_get_next_sibling(child);
13514 continue;
13515 }
13516
13517 if (strcmp(smp_wwn, str_wwn) == 0) {
13518 ddi_prop_free(smp_wwn);
13519 break;
13520 }
13521 child = ddi_get_next_sibling(child);
13522 ddi_prop_free(smp_wwn);
13523 }
13524 return (child);
13525 }
13526
13527 static int
13528 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13529 {
13530 int rval = DDI_FAILURE;
13531 char *devname;
13532 char wwn_str[MPTSAS_WWN_STRLEN];
13533 dev_info_t *cdip;
13534
13535 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13536
13537 cdip = mptsas_find_smp_child(pdip, wwn_str);
13538
13539 if (cdip == NULL)
13540 return (DDI_SUCCESS);
13541
13542 /*
13543 * Make sure node is attached otherwise
13544 * it won't have related cache nodes to
13545 * clean up. i_ddi_devi_attached is
13546 * similiar to i_ddi_node_state(cdip) >=
13547 * DS_ATTACHED.
13548 */
13549 if (i_ddi_devi_attached(cdip)) {
13550
13551 /* Get full devname */
13552 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13553 (void) ddi_deviname(cdip, devname);
13554 /* Clean cache */
13555 (void) devfs_clean(pdip, devname + 1,
13556 DV_CLEAN_FORCE);
13557 kmem_free(devname, MAXNAMELEN + 1);
13558 }
13559
13560 rval = ndi_devi_offline(cdip, flags);
13561
13562 return (rval);
13563 }
13564
13565 static dev_info_t *
13566 mptsas_find_child(dev_info_t *pdip, char *name)
13567 {
13568 dev_info_t *child = NULL;
13569 char *rname = NULL;
13570 int rval = DDI_FAILURE;
13571
13572 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13573
13574 child = ddi_get_child(pdip);
13575 while (child) {
13576 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13577 if (rval != DDI_SUCCESS) {
13578 child = ddi_get_next_sibling(child);
13579 bzero(rname, SCSI_MAXNAMELEN);
13580 continue;
13581 }
13582
13583 if (strcmp(rname, name) == 0) {
13584 break;
13585 }
13586 child = ddi_get_next_sibling(child);
13587 bzero(rname, SCSI_MAXNAMELEN);
13588 }
13589
13590 kmem_free(rname, SCSI_MAXNAMELEN);
13591
13592 return (child);
13593 }
13594
13595
13596 static dev_info_t *
13597 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13598 {
13599 dev_info_t *child = NULL;
13600 char *name = NULL;
13601 char *addr = NULL;
13602
13603 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13604 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13605 (void) sprintf(name, "%016"PRIx64, sasaddr);
13606 (void) sprintf(addr, "w%s,%x", name, lun);
13607 child = mptsas_find_child(pdip, addr);
13608 kmem_free(name, SCSI_MAXNAMELEN);
13609 kmem_free(addr, SCSI_MAXNAMELEN);
13610 return (child);
13611 }
13612
13613 static dev_info_t *
13614 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13615 {
13616 dev_info_t *child;
13617 char *addr;
13618
13619 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13620 (void) sprintf(addr, "p%x,0", phy);
13621 child = mptsas_find_child(pdip, addr);
13622 kmem_free(addr, SCSI_MAXNAMELEN);
13623 return (child);
13624 }
13625
13626 static mdi_pathinfo_t *
13627 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13628 {
13629 mdi_pathinfo_t *path;
13630 char *addr = NULL;
13631
13632 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13633 (void) sprintf(addr, "p%x,0", phy);
13634 path = mdi_pi_find(pdip, NULL, addr);
13635 kmem_free(addr, SCSI_MAXNAMELEN);
13636 return (path);
13637 }
13638
13639 static mdi_pathinfo_t *
13640 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13641 {
13642 mdi_pathinfo_t *path;
13643 char *name = NULL;
13644 char *addr = NULL;
13645
13646 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13647 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13648 (void) sprintf(name, "%016"PRIx64, sasaddr);
13649 (void) sprintf(addr, "w%s,%x", name, lun);
13650 path = mdi_pi_find(parent, NULL, addr);
13651 kmem_free(name, SCSI_MAXNAMELEN);
13652 kmem_free(addr, SCSI_MAXNAMELEN);
13653
13654 return (path);
13655 }
13656
13657 static int
13658 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13659 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13660 {
13661 int i = 0;
13662 uchar_t *inq83 = NULL;
13663 int inq83_len1 = 0xFF;
13664 int inq83_len = 0;
13665 int rval = DDI_FAILURE;
13666 ddi_devid_t devid;
13667 char *guid = NULL;
13668 int target = ptgt->m_devhdl;
13669 mdi_pathinfo_t *pip = NULL;
13670 mptsas_t *mpt = DIP2MPT(pdip);
13671
13672 /*
13673 * For DVD/CD ROM and tape devices and optical
13674 * devices, we won't try to enumerate them under
13675 * scsi_vhci, so no need to try page83
13676 */
13677 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13678 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13679 sd_inq->inq_dtype == DTYPE_ESI))
13680 goto create_lun;
13681
13682 /*
13683 * The LCA returns good SCSI status, but corrupt page 83 data the first
13684 * time it is queried. The solution is to keep trying to request page83
13685 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13686 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13687 * give up to get VPD page at this stage and fail the enumeration.
13688 */
13689
13690 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13691
13692 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13693 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13694 inq83_len1, &inq83_len, 1);
13695 if (rval != 0) {
13696 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13697 "0x83 for target:%x, lun:%x failed!", target, lun);
13698 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13699 goto create_lun;
13700 goto out;
13701 }
13702 /*
13703 * create DEVID from inquiry data
13704 */
13705 if ((rval = ddi_devid_scsi_encode(
13706 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13707 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13708 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13709 /*
13710 * extract GUID from DEVID
13711 */
13712 guid = ddi_devid_to_guid(devid);
13713
13714 /*
13715 * Do not enable MPXIO if the strlen(guid) is greater
13716 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13717 * handled by framework later.
13718 */
13719 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13720 ddi_devid_free_guid(guid);
13721 guid = NULL;
13722 if (mpt->m_mpxio_enable == TRUE) {
13723 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13724 "lun:%x doesn't have a valid GUID, "
13725 "multipathing for this drive is "
13726 "not enabled", target, lun);
13727 }
13728 }
13729
13730 /*
13731 * devid no longer needed
13732 */
13733 ddi_devid_free(devid);
13734 break;
13735 } else if (rval == DDI_NOT_WELL_FORMED) {
13736 /*
13737 * return value of ddi_devid_scsi_encode equal to
13738 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13739 * to retry inquiry page 0x83 and get GUID.
13740 */
13741 NDBG20(("Not well formed devid, retry..."));
13742 delay(1 * drv_usectohz(1000000));
13743 continue;
13744 } else {
13745 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13746 "path target:%x, lun:%x", target, lun);
13747 rval = DDI_FAILURE;
13748 goto create_lun;
13749 }
13750 }
13751
13752 if (i == mptsas_inq83_retry_timeout) {
13753 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13754 "for path target:%x, lun:%x", target, lun);
13755 }
13756
13757 rval = DDI_FAILURE;
13758
13759 create_lun:
13760 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13761 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13762 ptgt, lun);
13763 }
13764 if (rval != DDI_SUCCESS) {
13765 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13766 ptgt, lun);
13767
13768 }
13769 out:
13770 if (guid != NULL) {
13771 /*
13772 * guid no longer needed
13773 */
13774 ddi_devid_free_guid(guid);
13775 }
13776 if (inq83 != NULL)
13777 kmem_free(inq83, inq83_len1);
13778 return (rval);
13779 }
13780
13781 static int
13782 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13783 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13784 {
13785 int target;
13786 char *nodename = NULL;
13787 char **compatible = NULL;
13788 int ncompatible = 0;
13789 int mdi_rtn = MDI_FAILURE;
13790 int rval = DDI_FAILURE;
13791 char *old_guid = NULL;
13792 mptsas_t *mpt = DIP2MPT(pdip);
13793 char *lun_addr = NULL;
13794 char *wwn_str = NULL;
13795 char *attached_wwn_str = NULL;
13796 char *component = NULL;
13797 uint8_t phy = 0xFF;
13798 uint64_t sas_wwn;
13799 int64_t lun64 = 0;
13800 uint32_t devinfo;
13801 uint16_t dev_hdl;
13802 uint16_t pdev_hdl;
13803 uint64_t dev_sas_wwn;
13804 uint64_t pdev_sas_wwn;
13805 uint32_t pdev_info;
13806 uint8_t physport;
13807 uint8_t phy_id;
13808 uint32_t page_address;
13809 uint16_t bay_num, enclosure;
13810 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
13811 uint32_t dev_info;
13812
13813 mutex_enter(&mpt->m_mutex);
13814 target = ptgt->m_devhdl;
13815 sas_wwn = ptgt->m_sas_wwn;
13816 devinfo = ptgt->m_deviceinfo;
13817 phy = ptgt->m_phynum;
13818 mutex_exit(&mpt->m_mutex);
13819
13820 if (sas_wwn) {
13821 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13822 } else {
13823 *pip = mptsas_find_path_phy(pdip, phy);
13824 }
13825
13826 if (*pip != NULL) {
13827 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13828 ASSERT(*lun_dip != NULL);
13829 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13830 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
13831 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13832 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13833 /*
13834 * Same path back online again.
13835 */
13836 (void) ddi_prop_free(old_guid);
13837 if ((!MDI_PI_IS_ONLINE(*pip)) &&
13838 (!MDI_PI_IS_STANDBY(*pip)) &&
13839 (ptgt->m_tgt_unconfigured == 0)) {
13840 rval = mdi_pi_online(*pip, 0);
13841 mutex_enter(&mpt->m_mutex);
13842 (void) mptsas_set_led_status(mpt, ptgt,
13843 0);
13844 mutex_exit(&mpt->m_mutex);
13845 } else {
13846 rval = DDI_SUCCESS;
13847 }
13848 if (rval != DDI_SUCCESS) {
13849 mptsas_log(mpt, CE_WARN, "path:target: "
13850 "%x, lun:%x online failed!", target,
13851 lun);
13852 *pip = NULL;
13853 *lun_dip = NULL;
13854 }
13855 return (rval);
13856 } else {
13857 /*
13858 * The GUID of the LUN has changed which maybe
13859 * because customer mapped another volume to the
13860 * same LUN.
13861 */
13862 mptsas_log(mpt, CE_WARN, "The GUID of the "
13863 "target:%x, lun:%x was changed, maybe "
13864 "because someone mapped another volume "
13865 "to the same LUN", target, lun);
13866 (void) ddi_prop_free(old_guid);
13867 if (!MDI_PI_IS_OFFLINE(*pip)) {
13868 rval = mdi_pi_offline(*pip, 0);
13869 if (rval != MDI_SUCCESS) {
13870 mptsas_log(mpt, CE_WARN, "path:"
13871 "target:%x, lun:%x offline "
13872 "failed!", target, lun);
13873 *pip = NULL;
13874 *lun_dip = NULL;
13875 return (DDI_FAILURE);
13876 }
13877 }
13878 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
13879 mptsas_log(mpt, CE_WARN, "path:target:"
13880 "%x, lun:%x free failed!", target,
13881 lun);
13882 *pip = NULL;
13883 *lun_dip = NULL;
13884 return (DDI_FAILURE);
13885 }
13886 }
13887 } else {
13888 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
13889 "property for path:target:%x, lun:%x", target, lun);
13890 *pip = NULL;
13891 *lun_dip = NULL;
13892 return (DDI_FAILURE);
13893 }
13894 }
13895 scsi_hba_nodename_compatible_get(inq, NULL,
13896 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
13897
13898 /*
13899 * if nodename can't be determined then print a message and skip it
13900 */
13901 if (nodename == NULL) {
13902 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
13903 "driver for target%d lun %d dtype:0x%02x", target, lun,
13904 inq->inq_dtype);
13905 return (DDI_FAILURE);
13906 }
13907
13908 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
13909 /* The property is needed by MPAPI */
13910 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
13911
13912 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13913 if (guid) {
13914 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
13915 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
13916 } else {
13917 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
13918 (void) sprintf(wwn_str, "p%x", phy);
13919 }
13920
13921 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
13922 guid, lun_addr, compatible, ncompatible,
13923 0, pip);
13924 if (mdi_rtn == MDI_SUCCESS) {
13925
13926 if (mdi_prop_update_string(*pip, MDI_GUID,
13927 guid) != DDI_SUCCESS) {
13928 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13929 "create prop for target %d lun %d (MDI_GUID)",
13930 target, lun);
13931 mdi_rtn = MDI_FAILURE;
13932 goto virt_create_done;
13933 }
13934
13935 if (mdi_prop_update_int(*pip, LUN_PROP,
13936 lun) != DDI_SUCCESS) {
13937 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13938 "create prop for target %d lun %d (LUN_PROP)",
13939 target, lun);
13940 mdi_rtn = MDI_FAILURE;
13941 goto virt_create_done;
13942 }
13943 lun64 = (int64_t)lun;
13944 if (mdi_prop_update_int64(*pip, LUN64_PROP,
13945 lun64) != DDI_SUCCESS) {
13946 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13947 "create prop for target %d (LUN64_PROP)",
13948 target);
13949 mdi_rtn = MDI_FAILURE;
13950 goto virt_create_done;
13951 }
13952 if (mdi_prop_update_string_array(*pip, "compatible",
13953 compatible, ncompatible) !=
13954 DDI_PROP_SUCCESS) {
13955 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13956 "create prop for target %d lun %d (COMPATIBLE)",
13957 target, lun);
13958 mdi_rtn = MDI_FAILURE;
13959 goto virt_create_done;
13960 }
13961 if (sas_wwn && (mdi_prop_update_string(*pip,
13962 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
13963 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13964 "create prop for target %d lun %d "
13965 "(target-port)", target, lun);
13966 mdi_rtn = MDI_FAILURE;
13967 goto virt_create_done;
13968 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
13969 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
13970 /*
13971 * Direct attached SATA device without DeviceName
13972 */
13973 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13974 "create prop for SAS target %d lun %d "
13975 "(sata-phy)", target, lun);
13976 mdi_rtn = MDI_FAILURE;
13977 goto virt_create_done;
13978 }
13979 mutex_enter(&mpt->m_mutex);
13980
13981 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
13982 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13983 (uint32_t)ptgt->m_devhdl;
13984 rval = mptsas_get_sas_device_page0(mpt, page_address,
13985 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
13986 &phy_id, &pdev_hdl, &bay_num, &enclosure);
13987 if (rval != DDI_SUCCESS) {
13988 mutex_exit(&mpt->m_mutex);
13989 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
13990 "parent device for handle %d", page_address);
13991 mdi_rtn = MDI_FAILURE;
13992 goto virt_create_done;
13993 }
13994
13995 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
13996 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
13997 rval = mptsas_get_sas_device_page0(mpt, page_address,
13998 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
13999 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14000 if (rval != DDI_SUCCESS) {
14001 mutex_exit(&mpt->m_mutex);
14002 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14003 "device info for handle %d", page_address);
14004 mdi_rtn = MDI_FAILURE;
14005 goto virt_create_done;
14006 }
14007
14008 mutex_exit(&mpt->m_mutex);
14009
14010 /*
14011 * If this device direct attached to the controller
14012 * set the attached-port to the base wwid
14013 */
14014 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14015 != DEVINFO_DIRECT_ATTACHED) {
14016 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14017 pdev_sas_wwn);
14018 } else {
14019 /*
14020 * Update the iport's attached-port to guid
14021 */
14022 if (sas_wwn == 0) {
14023 (void) sprintf(wwn_str, "p%x", phy);
14024 } else {
14025 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14026 }
14027 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14028 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14029 DDI_PROP_SUCCESS) {
14030 mptsas_log(mpt, CE_WARN,
14031 "mptsas unable to create "
14032 "property for iport target-port"
14033 " %s (sas_wwn)",
14034 wwn_str);
14035 mdi_rtn = MDI_FAILURE;
14036 goto virt_create_done;
14037 }
14038
14039 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14040 mpt->un.m_base_wwid);
14041 }
14042
14043 if (mdi_prop_update_string(*pip,
14044 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14045 DDI_PROP_SUCCESS) {
14046 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14047 "property for iport attached-port %s (sas_wwn)",
14048 attached_wwn_str);
14049 mdi_rtn = MDI_FAILURE;
14050 goto virt_create_done;
14051 }
14052
14053
14054 if (inq->inq_dtype == 0) {
14055 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14056 /*
14057 * set obp path for pathinfo
14058 */
14059 (void) snprintf(component, MAXPATHLEN,
14060 "disk@%s", lun_addr);
14061
14062 if (mdi_pi_pathname_obp_set(*pip, component) !=
14063 DDI_SUCCESS) {
14064 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14065 "unable to set obp-path for object %s",
14066 component);
14067 mdi_rtn = MDI_FAILURE;
14068 goto virt_create_done;
14069 }
14070 }
14071
14072 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14073 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14074 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14075 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14076 "pm-capable", 1)) !=
14077 DDI_PROP_SUCCESS) {
14078 mptsas_log(mpt, CE_WARN, "mptsas driver"
14079 "failed to create pm-capable "
14080 "property, target %d", target);
14081 mdi_rtn = MDI_FAILURE;
14082 goto virt_create_done;
14083 }
14084 }
14085 /*
14086 * Create the phy-num property
14087 */
14088 if (mdi_prop_update_int(*pip, "phy-num",
14089 ptgt->m_phynum) != DDI_SUCCESS) {
14090 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14091 "create phy-num property for target %d lun %d",
14092 target, lun);
14093 mdi_rtn = MDI_FAILURE;
14094 goto virt_create_done;
14095 }
14096 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14097 mdi_rtn = mdi_pi_online(*pip, 0);
14098 if (mdi_rtn == MDI_SUCCESS) {
14099 mutex_enter(&mpt->m_mutex);
14100 if (mptsas_set_led_status(mpt, ptgt, 0) !=
14101 DDI_SUCCESS) {
14102 NDBG14(("mptsas: clear LED for slot %x "
14103 "failed", ptgt->m_slot_num));
14104 }
14105 mutex_exit(&mpt->m_mutex);
14106 }
14107 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14108 mdi_rtn = MDI_FAILURE;
14109 }
14110 virt_create_done:
14111 if (*pip && mdi_rtn != MDI_SUCCESS) {
14112 (void) mdi_pi_free(*pip, 0);
14113 *pip = NULL;
14114 *lun_dip = NULL;
14115 }
14116 }
14117
14118 scsi_hba_nodename_compatible_free(nodename, compatible);
14119 if (lun_addr != NULL) {
14120 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14121 }
14122 if (wwn_str != NULL) {
14123 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14124 }
14125 if (component != NULL) {
14126 kmem_free(component, MAXPATHLEN);
14127 }
14128
14129 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14130 }
14131
14132 static int
14133 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14134 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14135 {
14136 int target;
14137 int rval;
14138 int ndi_rtn = NDI_FAILURE;
14139 uint64_t be_sas_wwn;
14140 char *nodename = NULL;
14141 char **compatible = NULL;
14142 int ncompatible = 0;
14143 int instance = 0;
14144 mptsas_t *mpt = DIP2MPT(pdip);
14145 char *wwn_str = NULL;
14146 char *component = NULL;
14147 char *attached_wwn_str = NULL;
14148 uint8_t phy = 0xFF;
14149 uint64_t sas_wwn;
14150 uint32_t devinfo;
14151 uint16_t dev_hdl;
14152 uint16_t pdev_hdl;
14153 uint64_t pdev_sas_wwn;
14154 uint64_t dev_sas_wwn;
14155 uint32_t pdev_info;
14156 uint8_t physport;
14157 uint8_t phy_id;
14158 uint32_t page_address;
14159 uint16_t bay_num, enclosure;
14160 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14161 uint32_t dev_info;
14162 int64_t lun64 = 0;
14163
14164 mutex_enter(&mpt->m_mutex);
14165 target = ptgt->m_devhdl;
14166 sas_wwn = ptgt->m_sas_wwn;
14167 devinfo = ptgt->m_deviceinfo;
14168 phy = ptgt->m_phynum;
14169 mutex_exit(&mpt->m_mutex);
14170
14171 /*
14172 * generate compatible property with binding-set "mpt"
14173 */
14174 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14175 &nodename, &compatible, &ncompatible);
14176
14177 /*
14178 * if nodename can't be determined then print a message and skip it
14179 */
14180 if (nodename == NULL) {
14181 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14182 "for target %d lun %d", target, lun);
14183 return (DDI_FAILURE);
14184 }
14185
14186 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14187 DEVI_SID_NODEID, lun_dip);
14188
14189 /*
14190 * if lun alloc success, set props
14191 */
14192 if (ndi_rtn == NDI_SUCCESS) {
14193
14194 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14195 *lun_dip, LUN_PROP, lun) !=
14196 DDI_PROP_SUCCESS) {
14197 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14198 "property for target %d lun %d (LUN_PROP)",
14199 target, lun);
14200 ndi_rtn = NDI_FAILURE;
14201 goto phys_create_done;
14202 }
14203
14204 lun64 = (int64_t)lun;
14205 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14206 *lun_dip, LUN64_PROP, lun64) !=
14207 DDI_PROP_SUCCESS) {
14208 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14209 "property for target %d lun64 %d (LUN64_PROP)",
14210 target, lun);
14211 ndi_rtn = NDI_FAILURE;
14212 goto phys_create_done;
14213 }
14214 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14215 *lun_dip, "compatible", compatible, ncompatible)
14216 != DDI_PROP_SUCCESS) {
14217 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14218 "property for target %d lun %d (COMPATIBLE)",
14219 target, lun);
14220 ndi_rtn = NDI_FAILURE;
14221 goto phys_create_done;
14222 }
14223
14224 /*
14225 * We need the SAS WWN for non-multipath devices, so
14226 * we'll use the same property as that multipathing
14227 * devices need to present for MPAPI. If we don't have
14228 * a WWN (e.g. parallel SCSI), don't create the prop.
14229 */
14230 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14231 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14232 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14233 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14234 != DDI_PROP_SUCCESS) {
14235 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14236 "create property for SAS target %d lun %d "
14237 "(target-port)", target, lun);
14238 ndi_rtn = NDI_FAILURE;
14239 goto phys_create_done;
14240 }
14241
14242 be_sas_wwn = BE_64(sas_wwn);
14243 if (sas_wwn && ndi_prop_update_byte_array(
14244 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14245 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14246 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14247 "create property for SAS target %d lun %d "
14248 "(port-wwn)", target, lun);
14249 ndi_rtn = NDI_FAILURE;
14250 goto phys_create_done;
14251 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14252 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14253 DDI_PROP_SUCCESS)) {
14254 /*
14255 * Direct attached SATA device without DeviceName
14256 */
14257 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14258 "create property for SAS target %d lun %d "
14259 "(sata-phy)", target, lun);
14260 ndi_rtn = NDI_FAILURE;
14261 goto phys_create_done;
14262 }
14263
14264 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14265 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14266 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14267 "create property for SAS target %d lun %d"
14268 " (SAS_PROP)", target, lun);
14269 ndi_rtn = NDI_FAILURE;
14270 goto phys_create_done;
14271 }
14272 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14273 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14274 mptsas_log(mpt, CE_WARN, "mptsas unable "
14275 "to create guid property for target %d "
14276 "lun %d", target, lun);
14277 ndi_rtn = NDI_FAILURE;
14278 goto phys_create_done;
14279 }
14280
14281 /*
14282 * The following code is to set properties for SM-HBA support,
14283 * it doesn't apply to RAID volumes
14284 */
14285 if (ptgt->m_phymask == 0)
14286 goto phys_raid_lun;
14287
14288 mutex_enter(&mpt->m_mutex);
14289
14290 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14291 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14292 (uint32_t)ptgt->m_devhdl;
14293 rval = mptsas_get_sas_device_page0(mpt, page_address,
14294 &dev_hdl, &dev_sas_wwn, &dev_info,
14295 &physport, &phy_id, &pdev_hdl,
14296 &bay_num, &enclosure);
14297 if (rval != DDI_SUCCESS) {
14298 mutex_exit(&mpt->m_mutex);
14299 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14300 "parent device for handle %d.", page_address);
14301 ndi_rtn = NDI_FAILURE;
14302 goto phys_create_done;
14303 }
14304
14305 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14306 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14307 rval = mptsas_get_sas_device_page0(mpt, page_address,
14308 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14309 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14310 if (rval != DDI_SUCCESS) {
14311 mutex_exit(&mpt->m_mutex);
14312 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14313 "device for handle %d.", page_address);
14314 ndi_rtn = NDI_FAILURE;
14315 goto phys_create_done;
14316 }
14317
14318 mutex_exit(&mpt->m_mutex);
14319
14320 /*
14321 * If this device direct attached to the controller
14322 * set the attached-port to the base wwid
14323 */
14324 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14325 != DEVINFO_DIRECT_ATTACHED) {
14326 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14327 pdev_sas_wwn);
14328 } else {
14329 /*
14330 * Update the iport's attached-port to guid
14331 */
14332 if (sas_wwn == 0) {
14333 (void) sprintf(wwn_str, "p%x", phy);
14334 } else {
14335 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14336 }
14337 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14338 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14339 DDI_PROP_SUCCESS) {
14340 mptsas_log(mpt, CE_WARN,
14341 "mptsas unable to create "
14342 "property for iport target-port"
14343 " %s (sas_wwn)",
14344 wwn_str);
14345 ndi_rtn = NDI_FAILURE;
14346 goto phys_create_done;
14347 }
14348
14349 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14350 mpt->un.m_base_wwid);
14351 }
14352
14353 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14354 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14355 DDI_PROP_SUCCESS) {
14356 mptsas_log(mpt, CE_WARN,
14357 "mptsas unable to create "
14358 "property for iport attached-port %s (sas_wwn)",
14359 attached_wwn_str);
14360 ndi_rtn = NDI_FAILURE;
14361 goto phys_create_done;
14362 }
14363
14364 if (IS_SATA_DEVICE(dev_info)) {
14365 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14366 *lun_dip, MPTSAS_VARIANT, "sata") !=
14367 DDI_PROP_SUCCESS) {
14368 mptsas_log(mpt, CE_WARN,
14369 "mptsas unable to create "
14370 "property for device variant ");
14371 ndi_rtn = NDI_FAILURE;
14372 goto phys_create_done;
14373 }
14374 }
14375
14376 if (IS_ATAPI_DEVICE(dev_info)) {
14377 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14378 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14379 DDI_PROP_SUCCESS) {
14380 mptsas_log(mpt, CE_WARN,
14381 "mptsas unable to create "
14382 "property for device variant ");
14383 ndi_rtn = NDI_FAILURE;
14384 goto phys_create_done;
14385 }
14386 }
14387
14388 phys_raid_lun:
14389 /*
14390 * if this is a SAS controller, and the target is a SATA
14391 * drive, set the 'pm-capable' property for sd and if on
14392 * an OPL platform, also check if this is an ATAPI
14393 * device.
14394 */
14395 instance = ddi_get_instance(mpt->m_dip);
14396 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14397 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14398 NDBG2(("mptsas%d: creating pm-capable property, "
14399 "target %d", instance, target));
14400
14401 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14402 *lun_dip, "pm-capable", 1)) !=
14403 DDI_PROP_SUCCESS) {
14404 mptsas_log(mpt, CE_WARN, "mptsas "
14405 "failed to create pm-capable "
14406 "property, target %d", target);
14407 ndi_rtn = NDI_FAILURE;
14408 goto phys_create_done;
14409 }
14410
14411 }
14412
14413 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14414 /*
14415 * add 'obp-path' properties for devinfo
14416 */
14417 bzero(wwn_str, sizeof (wwn_str));
14418 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14419 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14420 if (guid) {
14421 (void) snprintf(component, MAXPATHLEN,
14422 "disk@w%s,%x", wwn_str, lun);
14423 } else {
14424 (void) snprintf(component, MAXPATHLEN,
14425 "disk@p%x,%x", phy, lun);
14426 }
14427 if (ddi_pathname_obp_set(*lun_dip, component)
14428 != DDI_SUCCESS) {
14429 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14430 "unable to set obp-path for SAS "
14431 "object %s", component);
14432 ndi_rtn = NDI_FAILURE;
14433 goto phys_create_done;
14434 }
14435 }
14436 /*
14437 * Create the phy-num property for non-raid disk
14438 */
14439 if (ptgt->m_phymask != 0) {
14440 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14441 *lun_dip, "phy-num", ptgt->m_phynum) !=
14442 DDI_PROP_SUCCESS) {
14443 mptsas_log(mpt, CE_WARN, "mptsas driver "
14444 "failed to create phy-num property for "
14445 "target %d", target);
14446 ndi_rtn = NDI_FAILURE;
14447 goto phys_create_done;
14448 }
14449 }
14450 phys_create_done:
14451 /*
14452 * If props were setup ok, online the lun
14453 */
14454 if (ndi_rtn == NDI_SUCCESS) {
14455 /*
14456 * Try to online the new node
14457 */
14458 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14459 }
14460 if (ndi_rtn == NDI_SUCCESS) {
14461 mutex_enter(&mpt->m_mutex);
14462 if (mptsas_set_led_status(mpt, ptgt, 0) !=
14463 DDI_SUCCESS) {
14464 NDBG14(("mptsas: clear LED for tgt %x "
14465 "failed", ptgt->m_slot_num));
14466 }
14467 mutex_exit(&mpt->m_mutex);
14468 }
14469
14470 /*
14471 * If success set rtn flag, else unwire alloc'd lun
14472 */
14473 if (ndi_rtn != NDI_SUCCESS) {
14474 NDBG12(("mptsas driver unable to online "
14475 "target %d lun %d", target, lun));
14476 ndi_prop_remove_all(*lun_dip);
14477 (void) ndi_devi_free(*lun_dip);
14478 *lun_dip = NULL;
14479 }
14480 }
14481
14482 scsi_hba_nodename_compatible_free(nodename, compatible);
14483
14484 if (wwn_str != NULL) {
14485 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14486 }
14487 if (component != NULL) {
14488 kmem_free(component, MAXPATHLEN);
14489 }
14490
14491
14492 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14493 }
14494
14495 static int
14496 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14497 {
14498 mptsas_t *mpt = DIP2MPT(pdip);
14499 struct smp_device smp_sd;
14500
14501 /* XXX An HBA driver should not be allocating an smp_device. */
14502 bzero(&smp_sd, sizeof (struct smp_device));
14503 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14504 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14505
14506 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14507 return (NDI_FAILURE);
14508 return (NDI_SUCCESS);
14509 }
14510
14511 static int
14512 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14513 {
14514 mptsas_t *mpt = DIP2MPT(pdip);
14515 mptsas_smp_t *psmp = NULL;
14516 int rval;
14517 int phymask;
14518
14519 /*
14520 * Get the physical port associated to the iport
14521 * PHYMASK TODO
14522 */
14523 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14524 "phymask", 0);
14525 /*
14526 * Find the smp node in hash table with specified sas address and
14527 * physical port
14528 */
14529 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14530 if (psmp == NULL) {
14531 return (DDI_FAILURE);
14532 }
14533
14534 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14535
14536 return (rval);
14537 }
14538
14539 static int
14540 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14541 dev_info_t **smp_dip)
14542 {
14543 char wwn_str[MPTSAS_WWN_STRLEN];
14544 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14545 int ndi_rtn = NDI_FAILURE;
14546 int rval = 0;
14547 mptsas_smp_t dev_info;
14548 uint32_t page_address;
14549 mptsas_t *mpt = DIP2MPT(pdip);
14550 uint16_t dev_hdl;
14551 uint64_t sas_wwn;
14552 uint64_t smp_sas_wwn;
14553 uint8_t physport;
14554 uint8_t phy_id;
14555 uint16_t pdev_hdl;
14556 uint8_t numphys = 0;
14557 uint16_t i = 0;
14558 char phymask[MPTSAS_MAX_PHYS];
14559 char *iport = NULL;
14560 mptsas_phymask_t phy_mask = 0;
14561 uint16_t attached_devhdl;
14562 uint16_t bay_num, enclosure;
14563
14564 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14565
14566 /*
14567 * Probe smp device, prevent the node of removed device from being
14568 * configured succesfully
14569 */
14570 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
14571 return (DDI_FAILURE);
14572 }
14573
14574 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14575 return (DDI_SUCCESS);
14576 }
14577
14578 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14579
14580 /*
14581 * if lun alloc success, set props
14582 */
14583 if (ndi_rtn == NDI_SUCCESS) {
14584 /*
14585 * Set the flavor of the child to be SMP flavored
14586 */
14587 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14588
14589 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14590 *smp_dip, SMP_WWN, wwn_str) !=
14591 DDI_PROP_SUCCESS) {
14592 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14593 "property for smp device %s (sas_wwn)",
14594 wwn_str);
14595 ndi_rtn = NDI_FAILURE;
14596 goto smp_create_done;
14597 }
14598 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
14599 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14600 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14601 DDI_PROP_SUCCESS) {
14602 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14603 "property for iport target-port %s (sas_wwn)",
14604 wwn_str);
14605 ndi_rtn = NDI_FAILURE;
14606 goto smp_create_done;
14607 }
14608
14609 mutex_enter(&mpt->m_mutex);
14610
14611 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14612 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14613 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14614 &dev_info);
14615 if (rval != DDI_SUCCESS) {
14616 mutex_exit(&mpt->m_mutex);
14617 mptsas_log(mpt, CE_WARN,
14618 "mptsas unable to get expander "
14619 "parent device info for %x", page_address);
14620 ndi_rtn = NDI_FAILURE;
14621 goto smp_create_done;
14622 }
14623
14624 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14625 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14626 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14627 (uint32_t)dev_info.m_pdevhdl;
14628 rval = mptsas_get_sas_device_page0(mpt, page_address,
14629 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14630 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14631 if (rval != DDI_SUCCESS) {
14632 mutex_exit(&mpt->m_mutex);
14633 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14634 "device info for %x", page_address);
14635 ndi_rtn = NDI_FAILURE;
14636 goto smp_create_done;
14637 }
14638
14639 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14640 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14641 (uint32_t)dev_info.m_devhdl;
14642 rval = mptsas_get_sas_device_page0(mpt, page_address,
14643 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14644 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14645 if (rval != DDI_SUCCESS) {
14646 mutex_exit(&mpt->m_mutex);
14647 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14648 "device info for %x", page_address);
14649 ndi_rtn = NDI_FAILURE;
14650 goto smp_create_done;
14651 }
14652 mutex_exit(&mpt->m_mutex);
14653
14654 /*
14655 * If this smp direct attached to the controller
14656 * set the attached-port to the base wwid
14657 */
14658 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14659 != DEVINFO_DIRECT_ATTACHED) {
14660 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14661 sas_wwn);
14662 } else {
14663 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14664 mpt->un.m_base_wwid);
14665 }
14666
14667 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14668 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14669 DDI_PROP_SUCCESS) {
14670 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14671 "property for smp attached-port %s (sas_wwn)",
14672 attached_wwn_str);
14673 ndi_rtn = NDI_FAILURE;
14674 goto smp_create_done;
14675 }
14676
14677 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14678 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14679 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14680 "create property for SMP %s (SMP_PROP) ",
14681 wwn_str);
14682 ndi_rtn = NDI_FAILURE;
14683 goto smp_create_done;
14684 }
14685
14686 /*
14687 * check the smp to see whether it direct
14688 * attached to the controller
14689 */
14690 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14691 != DEVINFO_DIRECT_ATTACHED) {
14692 goto smp_create_done;
14693 }
14694 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14695 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14696 if (numphys > 0) {
14697 goto smp_create_done;
14698 }
14699 /*
14700 * this iport is an old iport, we need to
14701 * reconfig the props for it.
14702 */
14703 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14704 MPTSAS_VIRTUAL_PORT, 0) !=
14705 DDI_PROP_SUCCESS) {
14706 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14707 MPTSAS_VIRTUAL_PORT);
14708 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14709 "prop update failed");
14710 goto smp_create_done;
14711 }
14712
14713 mutex_enter(&mpt->m_mutex);
14714 numphys = 0;
14715 iport = ddi_get_name_addr(pdip);
14716 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14717 bzero(phymask, sizeof (phymask));
14718 (void) sprintf(phymask,
14719 "%x", mpt->m_phy_info[i].phy_mask);
14720 if (strcmp(phymask, iport) == 0) {
14721 phy_mask = mpt->m_phy_info[i].phy_mask;
14722 break;
14723 }
14724 }
14725
14726 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14727 if ((phy_mask >> i) & 0x01) {
14728 numphys++;
14729 }
14730 }
14731 /*
14732 * Update PHY info for smhba
14733 */
14734 if (mptsas_smhba_phy_init(mpt)) {
14735 mutex_exit(&mpt->m_mutex);
14736 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14737 "failed");
14738 goto smp_create_done;
14739 }
14740 mutex_exit(&mpt->m_mutex);
14741
14742 mptsas_smhba_set_phy_props(mpt, iport, pdip,
14743 numphys, &attached_devhdl);
14744
14745 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14746 MPTSAS_NUM_PHYS, numphys) !=
14747 DDI_PROP_SUCCESS) {
14748 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14749 MPTSAS_NUM_PHYS);
14750 mptsas_log(mpt, CE_WARN, "mptsas update "
14751 "num phys props failed");
14752 goto smp_create_done;
14753 }
14754 /*
14755 * Add parent's props for SMHBA support
14756 */
14757 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14758 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14759 DDI_PROP_SUCCESS) {
14760 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14761 SCSI_ADDR_PROP_ATTACHED_PORT);
14762 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14763 "attached-port failed");
14764 goto smp_create_done;
14765 }
14766
14767 smp_create_done:
14768 /*
14769 * If props were setup ok, online the lun
14770 */
14771 if (ndi_rtn == NDI_SUCCESS) {
14772 /*
14773 * Try to online the new node
14774 */
14775 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14776 }
14777
14778 /*
14779 * If success set rtn flag, else unwire alloc'd lun
14780 */
14781 if (ndi_rtn != NDI_SUCCESS) {
14782 NDBG12(("mptsas unable to online "
14783 "SMP target %s", wwn_str));
14784 ndi_prop_remove_all(*smp_dip);
14785 (void) ndi_devi_free(*smp_dip);
14786 }
14787 }
14788
14789 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14790 }
14791
14792 /* smp transport routine */
14793 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14794 {
14795 uint64_t wwn;
14796 Mpi2SmpPassthroughRequest_t req;
14797 Mpi2SmpPassthroughReply_t rep;
14798 uint32_t direction = 0;
14799 mptsas_t *mpt;
14800 int ret;
14801 uint64_t tmp64;
14802
14803 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14804 smp_a_hba_tran->smp_tran_hba_private;
14805
14806 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14807 /*
14808 * Need to compose a SMP request message
14809 * and call mptsas_do_passthru() function
14810 */
14811 bzero(&req, sizeof (req));
14812 bzero(&rep, sizeof (rep));
14813 req.PassthroughFlags = 0;
14814 req.PhysicalPort = 0xff;
14815 req.ChainOffset = 0;
14816 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14817
14818 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14819 smp_pkt->smp_pkt_reason = ERANGE;
14820 return (DDI_FAILURE);
14821 }
14822 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14823
14824 req.MsgFlags = 0;
14825 tmp64 = LE_64(wwn);
14826 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14827 if (smp_pkt->smp_pkt_rspsize > 0) {
14828 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14829 }
14830 if (smp_pkt->smp_pkt_reqsize > 0) {
14831 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
14832 }
14833
14834 mutex_enter(&mpt->m_mutex);
14835 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
14836 (uint8_t *)smp_pkt->smp_pkt_rsp,
14837 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
14838 smp_pkt->smp_pkt_rspsize - 4, direction,
14839 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
14840 smp_pkt->smp_pkt_timeout, FKIOCTL);
14841 mutex_exit(&mpt->m_mutex);
14842 if (ret != 0) {
14843 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
14844 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
14845 return (DDI_FAILURE);
14846 }
14847 /* do passthrough success, check the smp status */
14848 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14849 switch (LE_16(rep.IOCStatus)) {
14850 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
14851 smp_pkt->smp_pkt_reason = ENODEV;
14852 break;
14853 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
14854 smp_pkt->smp_pkt_reason = EOVERFLOW;
14855 break;
14856 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
14857 smp_pkt->smp_pkt_reason = EIO;
14858 break;
14859 default:
14860 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
14861 "status:%x", LE_16(rep.IOCStatus));
14862 smp_pkt->smp_pkt_reason = EIO;
14863 break;
14864 }
14865 return (DDI_FAILURE);
14866 }
14867 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
14868 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
14869 rep.SASStatus);
14870 smp_pkt->smp_pkt_reason = EIO;
14871 return (DDI_FAILURE);
14872 }
14873
14874 return (DDI_SUCCESS);
14875 }
14876
14877 /*
14878 * If we didn't get a match, we need to get sas page0 for each device, and
14879 * untill we get a match. If failed, return NULL
14880 */
14881 static mptsas_target_t *
14882 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
14883 {
14884 int i, j = 0;
14885 int rval = 0;
14886 uint16_t cur_handle;
14887 uint32_t page_address;
14888 mptsas_target_t *ptgt = NULL;
14889
14890 /*
14891 * PHY named device must be direct attached and attaches to
14892 * narrow port, if the iport is not parent of the device which
14893 * we are looking for.
14894 */
14895 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14896 if ((1 << i) & phymask)
14897 j++;
14898 }
14899
14900 if (j > 1)
14901 return (NULL);
14902
14903 /*
14904 * Must be a narrow port and single device attached to the narrow port
14905 * So the physical port num of device which is equal to the iport's
14906 * port num is the device what we are looking for.
14907 */
14908
14909 if (mpt->m_phy_info[phy].phy_mask != phymask)
14910 return (NULL);
14911
14912 mutex_enter(&mpt->m_mutex);
14913
14914 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14915 MPTSAS_HASH_FIRST);
14916 while (ptgt != NULL) {
14917 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14918 mutex_exit(&mpt->m_mutex);
14919 return (ptgt);
14920 }
14921
14922 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14923 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14924 }
14925
14926 if (mpt->m_done_traverse_dev) {
14927 mutex_exit(&mpt->m_mutex);
14928 return (NULL);
14929 }
14930
14931 /* If didn't get a match, come here */
14932 cur_handle = mpt->m_dev_handle;
14933 for (; ; ) {
14934 ptgt = NULL;
14935 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14936 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
14937 rval = mptsas_get_target_device_info(mpt, page_address,
14938 &cur_handle, &ptgt);
14939 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14940 (rval == DEV_INFO_FAIL_ALLOC)) {
14941 break;
14942 }
14943 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
14944 (rval == DEV_INFO_PHYS_DISK)) {
14945 continue;
14946 }
14947 mpt->m_dev_handle = cur_handle;
14948
14949 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14950 break;
14951 }
14952 }
14953
14954 mutex_exit(&mpt->m_mutex);
14955 return (ptgt);
14956 }
14957
14958 /*
14959 * The ptgt->m_sas_wwn contains the wwid for each disk.
14960 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
14961 * If we didn't get a match, we need to get sas page0 for each device, and
14962 * untill we get a match
14963 * If failed, return NULL
14964 */
14965 static mptsas_target_t *
14966 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
14967 {
14968 int rval = 0;
14969 uint16_t cur_handle;
14970 uint32_t page_address;
14971 mptsas_target_t *tmp_tgt = NULL;
14972
14973 mutex_enter(&mpt->m_mutex);
14974 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14975 &mpt->m_active->m_tgttbl, wwid, phymask);
14976 if (tmp_tgt != NULL) {
14977 mutex_exit(&mpt->m_mutex);
14978 return (tmp_tgt);
14979 }
14980
14981 if (phymask == 0) {
14982 /*
14983 * It's IR volume
14984 */
14985 rval = mptsas_get_raid_info(mpt);
14986 if (rval) {
14987 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14988 &mpt->m_active->m_tgttbl, wwid, phymask);
14989 }
14990 mutex_exit(&mpt->m_mutex);
14991 return (tmp_tgt);
14992 }
14993
14994 if (mpt->m_done_traverse_dev) {
14995 mutex_exit(&mpt->m_mutex);
14996 return (NULL);
14997 }
14998
14999 /* If didn't get a match, come here */
15000 cur_handle = mpt->m_dev_handle;
15001 for (; ; ) {
15002 tmp_tgt = NULL;
15003 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15004 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15005 rval = mptsas_get_target_device_info(mpt, page_address,
15006 &cur_handle, &tmp_tgt);
15007 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15008 (rval == DEV_INFO_FAIL_ALLOC)) {
15009 tmp_tgt = NULL;
15010 break;
15011 }
15012 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15013 (rval == DEV_INFO_PHYS_DISK)) {
15014 continue;
15015 }
15016 mpt->m_dev_handle = cur_handle;
15017 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15018 (tmp_tgt->m_phymask == phymask)) {
15019 break;
15020 }
15021 }
15022
15023 mutex_exit(&mpt->m_mutex);
15024 return (tmp_tgt);
15025 }
15026
15027 static mptsas_smp_t *
15028 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15029 {
15030 int rval = 0;
15031 uint16_t cur_handle;
15032 uint32_t page_address;
15033 mptsas_smp_t smp_node, *psmp = NULL;
15034
15035 mutex_enter(&mpt->m_mutex);
15036 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15037 wwid, phymask);
15038 if (psmp != NULL) {
15039 mutex_exit(&mpt->m_mutex);
15040 return (psmp);
15041 }
15042
15043 if (mpt->m_done_traverse_smp) {
15044 mutex_exit(&mpt->m_mutex);
15045 return (NULL);
15046 }
15047
15048 /* If didn't get a match, come here */
15049 cur_handle = mpt->m_smp_devhdl;
15050 for (; ; ) {
15051 psmp = NULL;
15052 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15053 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15054 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15055 &smp_node);
15056 if (rval != DDI_SUCCESS) {
15057 break;
15058 }
15059 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15060 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15061 ASSERT(psmp);
15062 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15063 (psmp->m_phymask == phymask)) {
15064 break;
15065 }
15066 }
15067
15068 mutex_exit(&mpt->m_mutex);
15069 return (psmp);
15070 }
15071
15072 /* helper functions using hash */
15073
15074 /*
15075 * Can't have duplicate entries for same devhdl,
15076 * if there are invalid entries, the devhdl should be set to 0xffff
15077 */
15078 static void *
15079 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15080 {
15081 mptsas_hash_data_t *data;
15082
15083 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15084 while (data != NULL) {
15085 if (data->devhdl == devhdl) {
15086 break;
15087 }
15088 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15089 }
15090 return (data);
15091 }
15092
15093 mptsas_target_t *
15094 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15095 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15096 {
15097 mptsas_target_t *tmp_tgt = NULL;
15098
15099 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15100 if (tmp_tgt != NULL) {
15101 NDBG20(("Hash item already exist"));
15102 tmp_tgt->m_deviceinfo = devinfo;
15103 tmp_tgt->m_devhdl = devhdl;
15104 return (tmp_tgt);
15105 }
15106 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15107 if (tmp_tgt == NULL) {
15108 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15109 return (NULL);
15110 }
15111 tmp_tgt->m_devhdl = devhdl;
15112 tmp_tgt->m_sas_wwn = wwid;
15113 tmp_tgt->m_deviceinfo = devinfo;
15114 tmp_tgt->m_phymask = phymask;
15115 tmp_tgt->m_phynum = phynum;
15116 /* Initialized the tgt structure */
15117 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15118 tmp_tgt->m_qfull_retry_interval =
15119 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15120 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15121
15122 mptsas_hash_add(hashtab, tmp_tgt);
15123
15124 return (tmp_tgt);
15125 }
15126
15127 static void
15128 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15129 mptsas_phymask_t phymask)
15130 {
15131 mptsas_target_t *tmp_tgt;
15132 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15133 if (tmp_tgt == NULL) {
15134 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15135 } else {
15136 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15137 }
15138 }
15139
15140 /*
15141 * Return the entry in the hash table
15142 */
15143 static mptsas_smp_t *
15144 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15145 {
15146 uint64_t key1 = data->m_sasaddr;
15147 mptsas_phymask_t key2 = data->m_phymask;
15148 mptsas_smp_t *ret_data;
15149
15150 ret_data = mptsas_hash_search(hashtab, key1, key2);
15151 if (ret_data != NULL) {
15152 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15153 return (ret_data);
15154 }
15155
15156 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15157 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15158 mptsas_hash_add(hashtab, ret_data);
15159 return (ret_data);
15160 }
15161
15162 static void
15163 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15164 mptsas_phymask_t phymask)
15165 {
15166 mptsas_smp_t *tmp_smp;
15167 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15168 if (tmp_smp == NULL) {
15169 cmn_err(CE_WARN, "Smp element not found, nothing to free");
15170 } else {
15171 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15172 }
15173 }
15174
15175 /*
15176 * Hash operation functions
15177 * key1 is the sas_wwn, key2 is the phymask
15178 */
15179 static void
15180 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15181 {
15182 if (hashtab == NULL) {
15183 return;
15184 }
15185 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15186 MPTSAS_HASH_ARRAY_SIZE);
15187 hashtab->cur = NULL;
15188 hashtab->line = 0;
15189 }
15190
15191 static void
15192 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15193 {
15194 uint16_t line = 0;
15195 mptsas_hash_node_t *cur = NULL, *last = NULL;
15196
15197 if (hashtab == NULL) {
15198 return;
15199 }
15200 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15201 cur = hashtab->head[line];
15202 while (cur != NULL) {
15203 last = cur;
15204 cur = cur->next;
15205 kmem_free(last->data, datalen);
15206 kmem_free(last, sizeof (mptsas_hash_node_t));
15207 }
15208 }
15209 }
15210
15211 /*
15212 * You must guarantee the element doesn't exist in the hash table
15213 * before you call mptsas_hash_add()
15214 */
15215 static void
15216 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15217 {
15218 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15219 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15220 mptsas_hash_node_t **head = NULL;
15221 mptsas_hash_node_t *node = NULL;
15222
15223 if (hashtab == NULL) {
15224 return;
15225 }
15226 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15227 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15228 node->data = data;
15229
15230 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15231 if (*head == NULL) {
15232 *head = node;
15233 } else {
15234 node->next = *head;
15235 *head = node;
15236 }
15237 }
15238
15239 static void *
15240 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15241 mptsas_phymask_t key2)
15242 {
15243 mptsas_hash_node_t **head = NULL;
15244 mptsas_hash_node_t *last = NULL, *cur = NULL;
15245 mptsas_hash_data_t *data;
15246 if (hashtab == NULL) {
15247 return (NULL);
15248 }
15249 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15250 cur = *head;
15251 while (cur != NULL) {
15252 data = cur->data;
15253 if ((data->key1 == key1) && (data->key2 == key2)) {
15254 if (last == NULL) {
15255 (*head) = cur->next;
15256 } else {
15257 last->next = cur->next;
15258 }
15259 kmem_free(cur, sizeof (mptsas_hash_node_t));
15260 return (data);
15261 } else {
15262 last = cur;
15263 cur = cur->next;
15264 }
15265 }
15266 return (NULL);
15267 }
15268
15269 static void *
15270 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15271 mptsas_phymask_t key2)
15272 {
15273 mptsas_hash_node_t *cur = NULL;
15274 mptsas_hash_data_t *data;
15275 if (hashtab == NULL) {
15276 return (NULL);
15277 }
15278 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15279 while (cur != NULL) {
15280 data = cur->data;
15281 if ((data->key1 == key1) && (data->key2 == key2)) {
15282 return (data);
15283 } else {
15284 cur = cur->next;
15285 }
15286 }
15287 return (NULL);
15288 }
15289
15290 static void *
15291 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15292 {
15293 mptsas_hash_node_t *this = NULL;
15294
15295 if (hashtab == NULL) {
15296 return (NULL);
15297 }
15298
15299 if (pos == MPTSAS_HASH_FIRST) {
15300 hashtab->line = 0;
15301 hashtab->cur = NULL;
15302 this = hashtab->head[0];
15303 } else {
15304 if (hashtab->cur == NULL) {
15305 return (NULL);
15306 } else {
15307 this = hashtab->cur->next;
15308 }
15309 }
15310
15311 while (this == NULL) {
15312 hashtab->line++;
15313 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
15314 /* the traverse reaches the end */
15315 hashtab->cur = NULL;
15316 return (NULL);
15317 } else {
15318 this = hashtab->head[hashtab->line];
15319 }
15320 }
15321 hashtab->cur = this;
15322 return (this->data);
15323 }
15324
15325 /*
15326 * Functions for SGPIO LED support
15327 */
15328 static dev_info_t *
15329 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15330 {
15331 dev_info_t *dip;
15332 int prop;
15333 dip = e_ddi_hold_devi_by_dev(dev, 0);
15334 if (dip == NULL)
15335 return (dip);
15336 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15337 "phymask", 0);
15338 *phymask = (mptsas_phymask_t)prop;
15339 ddi_release_devi(dip);
15340 return (dip);
15341 }
15342 static mptsas_target_t *
15343 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15344 {
15345 uint8_t phynum;
15346 uint64_t wwn;
15347 int lun;
15348 mptsas_target_t *ptgt = NULL;
15349
15350 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
15351 return (NULL);
15352 }
15353 if (addr[0] == 'w') {
15354 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15355 } else {
15356 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15357 }
15358 return (ptgt);
15359 }
15360
15361 #ifdef MPTSAS_GET_LED
15362 static int
15363 mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
15364 uint32_t *slotstatus)
15365 {
15366 return (mptsas_send_sep(mpt, ptgt, slotstatus,
15367 MPI2_SEP_REQ_ACTION_READ_STATUS));
15368 }
15369 #endif
15370 static int
15371 mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
15372 {
15373 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
15374 slotstatus, ptgt->m_slot_num));
15375 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
15376 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
15377 }
15378 /*
15379 * send sep request, use enclosure/slot addressing
15380 */
15381 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
15382 uint32_t *status, uint8_t act)
15383 {
15384 Mpi2SepRequest_t req;
15385 Mpi2SepReply_t rep;
15386 int ret;
15387
15388 ASSERT(mutex_owned(&mpt->m_mutex));
15389
15390 bzero(&req, sizeof (req));
15391 bzero(&rep, sizeof (rep));
15392
15393 /* Do nothing for RAID volumes */
15394 if (ptgt->m_phymask == 0) {
15395 NDBG14(("mptsas_send_sep: Skip RAID volumes"));
15396 return (DDI_FAILURE);
15397 }
15398
15399 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
15400 req.Action = act;
15401 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
15402 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
15403 req.Slot = LE_16(ptgt->m_slot_num);
15404 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15405 req.SlotStatus = LE_32(*status);
15406 }
15407 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
15408 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
15409 if (ret != 0) {
15410 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
15411 "Processor Request message error %d", ret);
15412 return (DDI_FAILURE);
15413 }
15414 /* do passthrough success, check the ioc status */
15415 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15416 if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
15417 MPI2_IOCSTATUS_INVALID_FIELD) {
15418 mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
15419 "supported action, loginfo %x", act,
15420 LE_32(rep.IOCLogInfo));
15421 return (DDI_FAILURE);
15422 }
15423 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
15424 "status:%x", act, LE_16(rep.IOCStatus));
15425 return (DDI_FAILURE);
15426 }
15427 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15428 *status = LE_32(rep.SlotStatus);
15429 }
15430
15431 return (DDI_SUCCESS);
15432 }
15433
15434 int
15435 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15436 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15437 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15438 {
15439 ddi_dma_cookie_t new_cookie;
15440 size_t alloc_len;
15441 uint_t ncookie;
15442
15443 if (cookiep == NULL)
15444 cookiep = &new_cookie;
15445
15446 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15447 NULL, dma_hdp) != DDI_SUCCESS) {
15448 dma_hdp = NULL;
15449 return (FALSE);
15450 }
15451
15452 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15453 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15454 acc_hdp) != DDI_SUCCESS) {
15455 ddi_dma_free_handle(dma_hdp);
15456 dma_hdp = NULL;
15457 return (FALSE);
15458 }
15459
15460 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15461 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15462 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15463 (void) ddi_dma_mem_free(acc_hdp);
15464 ddi_dma_free_handle(dma_hdp);
15465 dma_hdp = NULL;
15466 return (FALSE);
15467 }
15468
15469 return (TRUE);
15470 }
15471
15472 void
15473 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15474 {
15475 if (*dma_hdp == NULL)
15476 return;
15477
15478 (void) ddi_dma_unbind_handle(*dma_hdp);
15479 (void) ddi_dma_mem_free(acc_hdp);
15480 ddi_dma_free_handle(dma_hdp);
15481 dma_hdp = NULL;
15482 }