1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2019 Nexenta Systems, Inc.
25 * Copyright (c) 2017, Joyent, Inc.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 */
56
57 /*
58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 *
60 */
61
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/sata/sata_hba.h>
80 #include <sys/scsi/generic/sas.h>
81 #include <sys/scsi/impl/scsi_sas.h>
82 #include <sys/sdt.h>
83 #include <sys/mdi_impldefs.h>
84
85 #pragma pack(1)
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
91 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
92 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
93 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
94 #pragma pack()
95
96 /*
97 * private header files.
98 *
99 */
100 #include <sys/scsi/impl/scsi_reset_notify.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
102 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
103 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
104 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
105 #include <sys/raidioctl.h>
106
107 /*
108 * FMA header files
109 */
110 #include <sys/ddifm.h>
111 #include <sys/fm/protocol.h>
112 #include <sys/fm/util.h>
113 #include <sys/fm/io/ddi.h>
114
115 /*
116 * autoconfiguration data and routines.
117 */
118 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
119 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
120 static int mptsas_power(dev_info_t *dip, int component, int level);
121
122 /*
123 * cb_ops function
124 */
125 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126 cred_t *credp, int *rval);
127 #ifdef __sparc
128 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
129 #else /* __sparc */
130 static int mptsas_quiesce(dev_info_t *devi);
131 #endif /* __sparc */
132
133 /*
134 * Resource initilaization for hardware
135 */
136 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
137 static void mptsas_disable_bus_master(mptsas_t *mpt);
138 static void mptsas_hba_fini(mptsas_t *mpt);
139 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
140 static int mptsas_hba_setup(mptsas_t *mpt);
141 static void mptsas_hba_teardown(mptsas_t *mpt);
142 static int mptsas_config_space_init(mptsas_t *mpt);
143 static void mptsas_config_space_fini(mptsas_t *mpt);
144 static void mptsas_iport_register(mptsas_t *mpt);
145 static int mptsas_smp_setup(mptsas_t *mpt);
146 static void mptsas_smp_teardown(mptsas_t *mpt);
147 static int mptsas_enc_setup(mptsas_t *mpt);
148 static void mptsas_enc_teardown(mptsas_t *mpt);
149 static int mptsas_cache_create(mptsas_t *mpt);
150 static void mptsas_cache_destroy(mptsas_t *mpt);
151 static int mptsas_alloc_request_frames(mptsas_t *mpt);
152 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
153 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
154 static int mptsas_alloc_free_queue(mptsas_t *mpt);
155 static int mptsas_alloc_post_queue(mptsas_t *mpt);
156 static void mptsas_alloc_reply_args(mptsas_t *mpt);
157 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
158 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
159 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
160 static void mptsas_update_hashtab(mptsas_t *mpt);
161
162 /*
163 * SCSA function prototypes
164 */
165 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
166 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
167 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
168 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
169 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
170 int tgtonly);
171 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
172 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
173 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
174 int tgtlen, int flags, int (*callback)(), caddr_t arg);
175 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
176 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
177 struct scsi_pkt *pkt);
178 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
179 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
180 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
181 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
182 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
183 void (*callback)(caddr_t), caddr_t arg);
184 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
185 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
186 static int mptsas_scsi_quiesce(dev_info_t *dip);
187 static int mptsas_scsi_unquiesce(dev_info_t *dip);
188 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
189 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
190
191 /*
192 * SMP functions
193 */
194 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
195
196 /*
197 * internal function prototypes.
198 */
199 static void mptsas_list_add(mptsas_t *mpt);
200 static void mptsas_list_del(mptsas_t *mpt);
201
202 static int mptsas_quiesce_bus(mptsas_t *mpt);
203 static int mptsas_unquiesce_bus(mptsas_t *mpt);
204
205 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
206 static void mptsas_free_handshake_msg(mptsas_t *mpt);
207
208 static void mptsas_ncmds_checkdrain(void *arg);
209
210 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
211 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
212 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
213 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
214
215 static int mptsas_do_detach(dev_info_t *dev);
216 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
217 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
218 struct scsi_pkt *pkt);
219 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
220
221 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
222 static void mptsas_handle_event(void *args);
223 static int mptsas_handle_event_sync(void *args);
224 static void mptsas_handle_dr(void *args);
225 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
226 dev_info_t *pdip);
227
228 static void mptsas_restart_cmd(void *);
229
230 static void mptsas_flush_hba(mptsas_t *mpt);
231 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
232 uint8_t tasktype);
233 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
234 uchar_t reason, uint_t stat);
235
236 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
237 static void mptsas_process_intr(mptsas_t *mpt,
238 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
239 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
240 pMpi2ReplyDescriptorsUnion_t reply_desc);
241 static void mptsas_handle_address_reply(mptsas_t *mpt,
242 pMpi2ReplyDescriptorsUnion_t reply_desc);
243 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
244 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
245 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
246
247 static void mptsas_watch(void *arg);
248 static void mptsas_watchsubr(mptsas_t *mpt);
249 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
250
251 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
252 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
253 uint8_t *data, uint32_t request_size, uint32_t reply_size,
254 uint32_t data_size, uint32_t direction, uint8_t *dataout,
255 uint32_t dataout_size, short timeout, int mode);
256 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
257
258 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
259 uint32_t unique_id);
260 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
261 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
262 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
263 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
264 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
265 uint32_t diag_type);
266 static int mptsas_diag_register(mptsas_t *mpt,
267 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
268 static int mptsas_diag_unregister(mptsas_t *mpt,
269 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
270 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
271 uint32_t *return_code);
272 static int mptsas_diag_read_buffer(mptsas_t *mpt,
273 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
274 uint32_t *return_code, int ioctl_mode);
275 static int mptsas_diag_release(mptsas_t *mpt,
276 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
277 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
278 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
279 int ioctl_mode);
280 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
281 int mode);
282
283 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
284 int cmdlen, int tgtlen, int statuslen, int kf);
285 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
286
287 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
288 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
289
290 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
291 int kmflags);
292 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
293
294 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
295 mptsas_cmd_t *cmd);
296 static void mptsas_check_task_mgt(mptsas_t *mpt,
297 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
298 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
299 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
300 int *resid);
301
302 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
303 static void mptsas_free_active_slots(mptsas_t *mpt);
304 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
305
306 static void mptsas_restart_hba(mptsas_t *mpt);
307 static void mptsas_restart_waitq(mptsas_t *mpt);
308
309 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
310 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
311 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
312
313 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
314 static void mptsas_doneq_empty(mptsas_t *mpt);
315 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
316
317 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
318 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
319 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
320 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
321
322
323 static void mptsas_start_watch_reset_delay();
324 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
325 static void mptsas_watch_reset_delay(void *arg);
326 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
327
328 /*
329 * helper functions
330 */
331 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
332
333 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
334 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
335 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
336 int lun);
337 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
338 int lun);
339 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
340 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
341
342 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
343 int *lun);
344 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
345
346 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
347 mptsas_phymask_t phymask, uint8_t phy);
348 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
349 mptsas_phymask_t phymask, uint64_t wwid);
350 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
351 mptsas_phymask_t phymask, uint64_t wwid);
352
353 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
354 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
355
356 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
357 uint16_t *handle, mptsas_target_t **pptgt);
358 static void mptsas_update_phymask(mptsas_t *mpt);
359
360 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep,
361 uint16_t idx);
362 static int mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
363 uint32_t *status, uint8_t cmd);
364 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
365 mptsas_phymask_t *phymask);
366 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
367 mptsas_phymask_t phymask);
368
369
370 /*
371 * Enumeration / DR functions
372 */
373 static void mptsas_config_all(dev_info_t *pdip);
374 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
375 dev_info_t **lundip);
376 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
377 dev_info_t **lundip);
378
379 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
380 static int mptsas_offline_target(dev_info_t *pdip, char *name);
381
382 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
383 dev_info_t **dip);
384
385 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
386 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
387 dev_info_t **dip, mptsas_target_t *ptgt);
388
389 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
390 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
391
392 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
393 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
394 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
395 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
396 int lun);
397
398 static void mptsas_offline_missed_luns(dev_info_t *pdip,
399 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
400 static int mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip);
401
402 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
403 dev_info_t **smp_dip);
404 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node);
405
406 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
407 int mode, int *rval);
408 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
409 int mode, int *rval);
410 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
411 int mode, int *rval);
412 static void mptsas_record_event(void *args);
413 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
414 int mode);
415
416 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
417 uint32_t, mptsas_phymask_t, uint8_t);
418 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
419 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
420 dev_info_t **smp_dip);
421
422 /*
423 * Power management functions
424 */
425 static int mptsas_get_pci_cap(mptsas_t *mpt);
426 static int mptsas_init_pm(mptsas_t *mpt);
427
428 /*
429 * MPT MSI tunable:
430 *
431 * By default MSI is enabled on all supported platforms.
432 */
433 boolean_t mptsas_enable_msi = B_TRUE;
434 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
435
436 /*
437 * Global switch for use of MPI2.5 FAST PATH.
438 * We don't really know what FAST PATH actually does, so if it is suspected
439 * to cause problems it can be turned off by setting this variable to B_FALSE.
440 */
441 boolean_t mptsas_use_fastpath = B_TRUE;
442
443 static int mptsas_register_intrs(mptsas_t *);
444 static void mptsas_unregister_intrs(mptsas_t *);
445 static int mptsas_add_intrs(mptsas_t *, int);
446 static void mptsas_rem_intrs(mptsas_t *);
447
448 /*
449 * FMA Prototypes
450 */
451 static void mptsas_fm_init(mptsas_t *mpt);
452 static void mptsas_fm_fini(mptsas_t *mpt);
453 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
454
455 extern pri_t minclsyspri, maxclsyspri;
456
457 /*
458 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
459 * under this device that the paths to a physical device are created when
460 * MPxIO is used.
461 */
462 extern dev_info_t *scsi_vhci_dip;
463
464 /*
465 * Tunable timeout value for Inquiry VPD page 0x83
466 * By default the value is 30 seconds.
467 */
468 int mptsas_inq83_retry_timeout = 30;
469
470 /*
471 * Tunable for default SCSI pkt timeout. Defaults to 5 seconds, which should
472 * be plenty for INQUIRY and REPORT_LUNS, which are the only commands currently
473 * issued by mptsas directly.
474 */
475 int mptsas_scsi_pkt_time = 5;
476
477 /*
478 * This is used to allocate memory for message frame storage, not for
479 * data I/O DMA. All message frames must be stored in the first 4G of
480 * physical memory.
481 */
482 ddi_dma_attr_t mptsas_dma_attrs = {
483 DMA_ATTR_V0, /* attribute layout version */
484 0x0ull, /* address low - should be 0 (longlong) */
485 0xffffffffull, /* address high - 32-bit max range */
486 0x00ffffffull, /* count max - max DMA object size */
487 4, /* allocation alignment requirements */
488 0x78, /* burstsizes - binary encoded values */
489 1, /* minxfer - gran. of DMA engine */
490 0x00ffffffull, /* maxxfer - gran. of DMA engine */
491 0xffffffffull, /* max segment size (DMA boundary) */
492 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
493 512, /* granularity - device transfer size */
494 0 /* flags, set to 0 */
495 };
496
497 /*
498 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
499 * physical addresses are supported.)
500 */
501 ddi_dma_attr_t mptsas_dma_attrs64 = {
502 DMA_ATTR_V0, /* attribute layout version */
503 0x0ull, /* address low - should be 0 (longlong) */
504 0xffffffffffffffffull, /* address high - 64-bit max */
505 0x00ffffffull, /* count max - max DMA object size */
506 4, /* allocation alignment requirements */
507 0x78, /* burstsizes - binary encoded values */
508 1, /* minxfer - gran. of DMA engine */
509 0x00ffffffull, /* maxxfer - gran. of DMA engine */
510 0xffffffffull, /* max segment size (DMA boundary) */
511 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
512 512, /* granularity - device transfer size */
513 0 /* flags, set to 0 */
514 };
515
516 ddi_device_acc_attr_t mptsas_dev_attr = {
517 DDI_DEVICE_ATTR_V1,
518 DDI_STRUCTURE_LE_ACC,
519 DDI_STRICTORDER_ACC,
520 DDI_DEFAULT_ACC
521 };
522
523 static struct cb_ops mptsas_cb_ops = {
524 scsi_hba_open, /* open */
525 scsi_hba_close, /* close */
526 nodev, /* strategy */
527 nodev, /* print */
528 nodev, /* dump */
529 nodev, /* read */
530 nodev, /* write */
531 mptsas_ioctl, /* ioctl */
532 nodev, /* devmap */
533 nodev, /* mmap */
534 nodev, /* segmap */
535 nochpoll, /* chpoll */
536 ddi_prop_op, /* cb_prop_op */
537 NULL, /* streamtab */
538 D_MP, /* cb_flag */
539 CB_REV, /* rev */
540 nodev, /* aread */
541 nodev /* awrite */
542 };
543
544 static struct dev_ops mptsas_ops = {
545 DEVO_REV, /* devo_rev, */
546 0, /* refcnt */
547 ddi_no_info, /* info */
548 nulldev, /* identify */
549 nulldev, /* probe */
550 mptsas_attach, /* attach */
551 mptsas_detach, /* detach */
552 #ifdef __sparc
553 mptsas_reset,
554 #else
555 nodev, /* reset */
556 #endif /* __sparc */
557 &mptsas_cb_ops, /* driver operations */
558 NULL, /* bus operations */
559 mptsas_power, /* power management */
560 #ifdef __sparc
561 ddi_quiesce_not_needed
562 #else
563 mptsas_quiesce /* quiesce */
564 #endif /* __sparc */
565 };
566
567
568 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
569
570 static struct modldrv modldrv = {
571 &mod_driverops, /* Type of module. This one is a driver */
572 MPTSAS_MOD_STRING, /* Name of the module. */
573 &mptsas_ops, /* driver ops */
574 };
575
576 static struct modlinkage modlinkage = {
577 MODREV_1, &modldrv, NULL
578 };
579 #define TARGET_PROP "target"
580 #define LUN_PROP "lun"
581 #define LUN64_PROP "lun64"
582 #define SAS_PROP "sas-mpt"
583 #define MDI_GUID "wwn"
584 #define NDI_GUID "guid"
585 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
586
587 /*
588 * Local static data
589 */
590 #if defined(MPTSAS_DEBUG)
591 /*
592 * Flags to indicate which debug messages are to be printed and which go to the
593 * debug log ring buffer. Default is to not print anything, and to log
594 * everything except the watchsubr() output which normally happens every second.
595 */
596 uint32_t mptsas_debugprt_flags = 0x0;
597 uint32_t mptsas_debuglog_flags = ~(1U << 30);
598 #endif /* defined(MPTSAS_DEBUG) */
599 uint32_t mptsas_debug_resets = 0;
600
601 static kmutex_t mptsas_global_mutex;
602 static void *mptsas_state; /* soft state ptr */
603 static krwlock_t mptsas_global_rwlock;
604
605 static kmutex_t mptsas_log_mutex;
606 static char mptsas_log_buf[256];
607 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
608
609 static mptsas_t *mptsas_head, *mptsas_tail;
610 static clock_t mptsas_scsi_watchdog_tick;
611 static clock_t mptsas_tick;
612 static timeout_id_t mptsas_reset_watch;
613 static timeout_id_t mptsas_timeout_id;
614 static int mptsas_timeouts_enabled = 0;
615
616 /*
617 * Default length for extended auto request sense buffers.
618 * All sense buffers need to be under the same alloc because there
619 * is only one common top 32bits (of 64bits) address register.
620 * Most requests only require 32 bytes, but some request >256.
621 * We use rmalloc()/rmfree() on this additional memory to manage the
622 * "extended" requests.
623 */
624 int mptsas_extreq_sense_bufsize = 256*64;
625
626 /*
627 * We believe that all software resrictions of having to run with DMA
628 * attributes to limit allocation to the first 4G are removed.
629 * However, this flag remains to enable quick switchback should suspicious
630 * problems emerge.
631 * Note that scsi_alloc_consistent_buf() does still adhere to allocating
632 * 32 bit addressable memory, but we can cope if that is changed now.
633 */
634 int mptsas_use_64bit_msgaddr = 1;
635
636 /*
637 * warlock directives
638 */
639 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
640 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
641 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
642 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
643 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
644 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
645
646 /*
647 * SM - HBA statics
648 */
649 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
650
651 #ifdef MPTSAS_DEBUG
652 void debug_enter(char *);
653 #endif
654
655 /*
656 * Notes:
657 * - scsi_hba_init(9F) initializes SCSI HBA modules
658 * - must call scsi_hba_fini(9F) if modload() fails
659 */
660 int
661 _init(void)
662 {
663 int status;
664 /* CONSTCOND */
665 ASSERT(NO_COMPETING_THREADS);
666
667 NDBG0(("_init"));
668
669 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
670 MPTSAS_INITIAL_SOFT_SPACE);
671 if (status != 0) {
672 return (status);
673 }
674
675 if ((status = scsi_hba_init(&modlinkage)) != 0) {
676 ddi_soft_state_fini(&mptsas_state);
677 return (status);
678 }
679
680 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
681 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
682 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
683
684 if ((status = mod_install(&modlinkage)) != 0) {
685 mutex_destroy(&mptsas_log_mutex);
686 rw_destroy(&mptsas_global_rwlock);
687 mutex_destroy(&mptsas_global_mutex);
688 ddi_soft_state_fini(&mptsas_state);
689 scsi_hba_fini(&modlinkage);
690 }
691
692 return (status);
693 }
694
695 /*
696 * Notes:
697 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
698 */
699 int
700 _fini(void)
701 {
702 int status;
703 /* CONSTCOND */
704 ASSERT(NO_COMPETING_THREADS);
705
706 NDBG0(("_fini"));
707
708 if ((status = mod_remove(&modlinkage)) == 0) {
709 ddi_soft_state_fini(&mptsas_state);
710 scsi_hba_fini(&modlinkage);
711 mutex_destroy(&mptsas_global_mutex);
712 rw_destroy(&mptsas_global_rwlock);
713 mutex_destroy(&mptsas_log_mutex);
714 }
715 return (status);
716 }
717
718 /*
719 * The loadable-module _info(9E) entry point
720 */
721 int
722 _info(struct modinfo *modinfop)
723 {
724 /* CONSTCOND */
725 ASSERT(NO_COMPETING_THREADS);
726 NDBG0(("mptsas _info"));
727
728 return (mod_info(&modlinkage, modinfop));
729 }
730
731 static int
732 mptsas_target_eval_devhdl(const void *op, void *arg)
733 {
734 uint16_t dh = *(uint16_t *)arg;
735 const mptsas_target_t *tp = op;
736
737 return ((int)tp->m_devhdl - (int)dh);
738 }
739
740 static int
741 mptsas_target_eval_nowwn(const void *op, void *arg)
742 {
743 uint8_t phy = *(uint8_t *)arg;
744 const mptsas_target_t *tp = op;
745
746 if (tp->m_addr.mta_wwn != 0)
747 return (-1);
748
749 return ((int)tp->m_phynum - (int)phy);
750 }
751
752 static int
753 mptsas_smp_eval_devhdl(const void *op, void *arg)
754 {
755 uint16_t dh = *(uint16_t *)arg;
756 const mptsas_smp_t *sp = op;
757
758 return ((int)sp->m_devhdl - (int)dh);
759 }
760
761 static uint64_t
762 mptsas_target_addr_hash(const void *tp)
763 {
764 const mptsas_target_addr_t *tap = tp;
765
766 return ((tap->mta_wwn & 0xffffffffffffULL) |
767 ((uint64_t)tap->mta_phymask << 48));
768 }
769
770 static int
771 mptsas_target_addr_cmp(const void *a, const void *b)
772 {
773 const mptsas_target_addr_t *aap = a;
774 const mptsas_target_addr_t *bap = b;
775
776 if (aap->mta_wwn < bap->mta_wwn)
777 return (-1);
778 if (aap->mta_wwn > bap->mta_wwn)
779 return (1);
780 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
781 }
782
783 static uint64_t
784 mptsas_tmp_target_hash(const void *tp)
785 {
786 return ((uint64_t)(uintptr_t)tp);
787 }
788
789 static int
790 mptsas_tmp_target_cmp(const void *a, const void *b)
791 {
792 if (a > b)
793 return (1);
794 if (b < a)
795 return (-1);
796
797 return (0);
798 }
799
800 static void
801 mptsas_target_free(void *op)
802 {
803 kmem_free(op, sizeof (mptsas_target_t));
804 }
805
806 static void
807 mptsas_smp_free(void *op)
808 {
809 kmem_free(op, sizeof (mptsas_smp_t));
810 }
811
812 static void
813 mptsas_destroy_hashes(mptsas_t *mpt)
814 {
815 mptsas_target_t *tp;
816 mptsas_smp_t *sp;
817
818 for (tp = refhash_first(mpt->m_targets); tp != NULL;
819 tp = refhash_next(mpt->m_targets, tp)) {
820 refhash_remove(mpt->m_targets, tp);
821 }
822 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
823 sp = refhash_next(mpt->m_smp_targets, sp)) {
824 refhash_remove(mpt->m_smp_targets, sp);
825 }
826 refhash_destroy(mpt->m_tmp_targets);
827 refhash_destroy(mpt->m_targets);
828 refhash_destroy(mpt->m_smp_targets);
829 mpt->m_targets = NULL;
830 mpt->m_smp_targets = NULL;
831 }
832
833 static int
834 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
835 {
836 dev_info_t *pdip;
837 mptsas_t *mpt;
838 scsi_hba_tran_t *hba_tran;
839 char *iport = NULL;
840 char phymask[MPTSAS_MAX_PHYS];
841 mptsas_phymask_t phy_mask = 0;
842 int dynamic_port = 0;
843 uint32_t page_address;
844 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
845 int rval = DDI_FAILURE;
846 int i = 0;
847 uint8_t numphys = 0;
848 uint8_t phy_id;
849 uint8_t phy_port = 0;
850 uint16_t attached_devhdl = 0;
851 uint32_t dev_info;
852 uint64_t attached_sas_wwn;
853 uint16_t dev_hdl;
854 uint16_t pdev_hdl;
855 uint16_t bay_num, enclosure, io_flags;
856 char attached_wwnstr[MPTSAS_WWN_STRLEN];
857
858 /* CONSTCOND */
859 ASSERT(NO_COMPETING_THREADS);
860
861 switch (cmd) {
862 case DDI_ATTACH:
863 break;
864
865 case DDI_RESUME:
866 /*
867 * If this a scsi-iport node, nothing to do here.
868 */
869 return (DDI_SUCCESS);
870
871 default:
872 return (DDI_FAILURE);
873 }
874
875 pdip = ddi_get_parent(dip);
876
877 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
878 NULL) {
879 cmn_err(CE_WARN, "Failed attach iport because fail to "
880 "get tran vector for the HBA node");
881 return (DDI_FAILURE);
882 }
883
884 mpt = TRAN2MPT(hba_tran);
885 ASSERT(mpt != NULL);
886 if (mpt == NULL)
887 return (DDI_FAILURE);
888
889 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
890 NULL) {
891 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
892 "get tran vector for the iport node");
893 return (DDI_FAILURE);
894 }
895
896 /*
897 * Overwrite parent's tran_hba_private to iport's tran vector
898 */
899 hba_tran->tran_hba_private = mpt;
900
901 ddi_report_dev(dip);
902
903 /*
904 * Get SAS address for initiator port according dev_handle
905 */
906 iport = ddi_get_name_addr(dip);
907 if (iport && strncmp(iport, "v0", 2) == 0) {
908 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
909 MPTSAS_VIRTUAL_PORT, 1) !=
910 DDI_PROP_SUCCESS) {
911 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
912 MPTSAS_VIRTUAL_PORT);
913 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
914 "prop update failed");
915 return (DDI_FAILURE);
916 }
917 return (DDI_SUCCESS);
918 }
919
920 mutex_enter(&mpt->m_mutex);
921 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
922 bzero(phymask, sizeof (phymask));
923 (void) sprintf(phymask,
924 "%x", mpt->m_phy_info[i].phy_mask);
925 if (strcmp(phymask, iport) == 0) {
926 break;
927 }
928 }
929
930 if (i == MPTSAS_MAX_PHYS) {
931 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
932 "seems not exist", iport);
933 mutex_exit(&mpt->m_mutex);
934 return (DDI_FAILURE);
935 }
936
937 phy_mask = mpt->m_phy_info[i].phy_mask;
938
939 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
940 dynamic_port = 1;
941 else
942 dynamic_port = 0;
943
944 /*
945 * Update PHY info for smhba
946 */
947 if (mptsas_smhba_phy_init(mpt)) {
948 mutex_exit(&mpt->m_mutex);
949 mptsas_log(mpt, CE_WARN, "mptsas phy update "
950 "failed");
951 return (DDI_FAILURE);
952 }
953
954 mutex_exit(&mpt->m_mutex);
955
956 numphys = 0;
957 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
958 if ((phy_mask >> i) & 0x01) {
959 numphys++;
960 }
961 }
962
963 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
964 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
965 mpt->un.m_base_wwid);
966
967 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
968 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
969 DDI_PROP_SUCCESS) {
970 (void) ddi_prop_remove(DDI_DEV_T_NONE,
971 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
972 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
973 "prop update failed");
974 return (DDI_FAILURE);
975 }
976 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
977 MPTSAS_NUM_PHYS, numphys) !=
978 DDI_PROP_SUCCESS) {
979 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
980 return (DDI_FAILURE);
981 }
982
983 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
984 "phymask", phy_mask) !=
985 DDI_PROP_SUCCESS) {
986 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
987 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
988 "prop update failed");
989 return (DDI_FAILURE);
990 }
991
992 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
993 "dynamic-port", dynamic_port) !=
994 DDI_PROP_SUCCESS) {
995 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
996 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
997 "prop update failed");
998 return (DDI_FAILURE);
999 }
1000 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
1001 MPTSAS_VIRTUAL_PORT, 0) !=
1002 DDI_PROP_SUCCESS) {
1003 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
1004 MPTSAS_VIRTUAL_PORT);
1005 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
1006 "prop update failed");
1007 return (DDI_FAILURE);
1008 }
1009 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
1010 &attached_devhdl);
1011
1012 mutex_enter(&mpt->m_mutex);
1013 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
1014 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
1015 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1016 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1017 &pdev_hdl, &bay_num, &enclosure, &io_flags);
1018 if (rval != DDI_SUCCESS) {
1019 mptsas_log(mpt, CE_WARN,
1020 "Failed to get device page0 for handle:%d",
1021 attached_devhdl);
1022 mutex_exit(&mpt->m_mutex);
1023 return (DDI_FAILURE);
1024 }
1025
1026 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1027 bzero(phymask, sizeof (phymask));
1028 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1029 if (strcmp(phymask, iport) == 0) {
1030 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1031 "%x",
1032 mpt->m_phy_info[i].phy_mask);
1033 }
1034 }
1035 mutex_exit(&mpt->m_mutex);
1036
1037 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1038 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1039 attached_sas_wwn);
1040 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1041 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1042 DDI_PROP_SUCCESS) {
1043 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1044 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1045 return (DDI_FAILURE);
1046 }
1047
1048 /* Create kstats for each phy on this iport */
1049
1050 mptsas_create_phy_stats(mpt, iport, dip);
1051
1052 /*
1053 * register sas hba iport with mdi (MPxIO/vhci)
1054 */
1055 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1056 dip, 0) == MDI_SUCCESS) {
1057 mpt->m_mpxio_enable = TRUE;
1058 }
1059 return (DDI_SUCCESS);
1060 }
1061
1062 /*
1063 * Notes:
1064 * Set up all device state and allocate data structures,
1065 * mutexes, condition variables, etc. for device operation.
1066 * Add interrupts needed.
1067 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1068 */
1069 static int
1070 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1071 {
1072 mptsas_t *mpt = NULL;
1073 int instance, i, j;
1074 int doneq_thread_num;
1075 char intr_added = 0;
1076 char map_setup = 0;
1077 char config_setup = 0;
1078 char hba_attach_setup = 0;
1079 char smp_attach_setup = 0;
1080 char enc_attach_setup = 0;
1081 char mutex_init_done = 0;
1082 char event_taskq_create = 0;
1083 char dr_taskq_create = 0;
1084 char doneq_thread_create = 0;
1085 char added_watchdog = 0;
1086 scsi_hba_tran_t *hba_tran;
1087 uint_t mem_bar = MEM_SPACE;
1088 int rval = DDI_FAILURE;
1089
1090 /* CONSTCOND */
1091 ASSERT(NO_COMPETING_THREADS);
1092
1093 if (scsi_hba_iport_unit_address(dip)) {
1094 return (mptsas_iport_attach(dip, cmd));
1095 }
1096
1097 switch (cmd) {
1098 case DDI_ATTACH:
1099 break;
1100
1101 case DDI_RESUME:
1102 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1103 return (DDI_FAILURE);
1104
1105 mpt = TRAN2MPT(hba_tran);
1106
1107 if (!mpt) {
1108 return (DDI_FAILURE);
1109 }
1110
1111 /*
1112 * Reset hardware and softc to "no outstanding commands"
1113 * Note that a check condition can result on first command
1114 * to a target.
1115 */
1116 mutex_enter(&mpt->m_mutex);
1117
1118 /*
1119 * raise power.
1120 */
1121 if (mpt->m_options & MPTSAS_OPT_PM) {
1122 mutex_exit(&mpt->m_mutex);
1123 (void) pm_busy_component(dip, 0);
1124 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1125 if (rval == DDI_SUCCESS) {
1126 mutex_enter(&mpt->m_mutex);
1127 } else {
1128 /*
1129 * The pm_raise_power() call above failed,
1130 * and that can only occur if we were unable
1131 * to reset the hardware. This is probably
1132 * due to unhealty hardware, and because
1133 * important filesystems(such as the root
1134 * filesystem) could be on the attached disks,
1135 * it would not be a good idea to continue,
1136 * as we won't be entirely certain we are
1137 * writing correct data. So we panic() here
1138 * to not only prevent possible data corruption,
1139 * but to give developers or end users a hope
1140 * of identifying and correcting any problems.
1141 */
1142 fm_panic("mptsas could not reset hardware "
1143 "during resume");
1144 }
1145 }
1146
1147 mpt->m_suspended = 0;
1148
1149 /*
1150 * Reinitialize ioc
1151 */
1152 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1153 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1154 mutex_exit(&mpt->m_mutex);
1155 if (mpt->m_options & MPTSAS_OPT_PM) {
1156 (void) pm_idle_component(dip, 0);
1157 }
1158 fm_panic("mptsas init chip fail during resume");
1159 }
1160 /*
1161 * mptsas_update_driver_data needs interrupts so enable them
1162 * first.
1163 */
1164 MPTSAS_ENABLE_INTR(mpt);
1165 mptsas_update_driver_data(mpt);
1166
1167 /* start requests, if possible */
1168 mptsas_restart_hba(mpt);
1169
1170 mutex_exit(&mpt->m_mutex);
1171
1172 /*
1173 * Restart watch thread
1174 */
1175 mutex_enter(&mptsas_global_mutex);
1176 if (mptsas_timeout_id == 0) {
1177 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1178 mptsas_tick);
1179 mptsas_timeouts_enabled = 1;
1180 }
1181 mutex_exit(&mptsas_global_mutex);
1182
1183 /* report idle status to pm framework */
1184 if (mpt->m_options & MPTSAS_OPT_PM) {
1185 (void) pm_idle_component(dip, 0);
1186 }
1187
1188 return (DDI_SUCCESS);
1189
1190 default:
1191 return (DDI_FAILURE);
1192
1193 }
1194
1195 instance = ddi_get_instance(dip);
1196
1197 /*
1198 * Allocate softc information.
1199 */
1200 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1201 mptsas_log(NULL, CE_WARN, "cannot allocate soft state");
1202 goto fail;
1203 }
1204
1205 mpt = ddi_get_soft_state(mptsas_state, instance);
1206
1207 if (mpt == NULL) {
1208 mptsas_log(NULL, CE_WARN, "cannot get soft state");
1209 goto fail;
1210 }
1211
1212 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1213 scsi_size_clean(dip);
1214
1215 mpt->m_dip = dip;
1216 mpt->m_instance = instance;
1217
1218 /* Make a per-instance copy of the structures */
1219 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1220 if (mptsas_use_64bit_msgaddr) {
1221 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1222 } else {
1223 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1224 }
1225 mpt->m_reg_acc_attr = mptsas_dev_attr;
1226 mpt->m_dev_acc_attr = mptsas_dev_attr;
1227
1228 /*
1229 * Size of individual request sense buffer
1230 */
1231 mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1232
1233 /*
1234 * Initialize FMA
1235 */
1236 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1237 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1238 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1239 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1240
1241 mptsas_fm_init(mpt);
1242
1243 if (mptsas_alloc_handshake_msg(mpt,
1244 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1245 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1246 goto fail;
1247 }
1248
1249 /*
1250 * Setup configuration space
1251 */
1252 if (mptsas_config_space_init(mpt) == FALSE) {
1253 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1254 goto fail;
1255 }
1256 config_setup++;
1257
1258 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1259 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1260 mptsas_log(mpt, CE_WARN, "map setup failed");
1261 goto fail;
1262 }
1263 map_setup++;
1264
1265 /*
1266 * A taskq is created for dealing with the event handler
1267 */
1268 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1269 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1270 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1271 goto fail;
1272 }
1273 event_taskq_create++;
1274
1275 /*
1276 * A taskq is created for dealing with dr events
1277 */
1278 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1279 "mptsas_dr_taskq",
1280 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1281 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1282 "failed");
1283 goto fail;
1284 }
1285 dr_taskq_create++;
1286
1287 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1288 0, "mptsas_doneq_thread_threshold_prop", 10);
1289 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1290 0, "mptsas_doneq_length_threshold_prop", 8);
1291 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1292 0, "mptsas_doneq_thread_n_prop", 8);
1293 mpt->m_max_tune_throttle = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1294 0, "mptsas_max_throttle", MAX_THROTTLE);
1295
1296 /*
1297 * Error check to make sure value is withing range. If nothing
1298 * is set default to original design value.
1299 */
1300 if (mpt->m_max_tune_throttle < THROTTLE_LO) {
1301 mpt->m_max_tune_throttle = MAX_THROTTLE;
1302 } else if (mpt->m_max_tune_throttle > THROTTLE_HI) {
1303 mpt->m_max_tune_throttle = THROTTLE_HI;
1304 }
1305
1306 if (mpt->m_doneq_thread_n) {
1307 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1308 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1309
1310 mutex_enter(&mpt->m_doneq_mutex);
1311 mpt->m_doneq_thread_id =
1312 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1313 * mpt->m_doneq_thread_n, KM_SLEEP);
1314
1315 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1316 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1317 CV_DRIVER, NULL);
1318 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1319 MUTEX_DRIVER, NULL);
1320 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1321 mpt->m_doneq_thread_id[j].flag |=
1322 MPTSAS_DONEQ_THREAD_ACTIVE;
1323 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1324 mpt->m_doneq_thread_id[j].arg.t = j;
1325 mpt->m_doneq_thread_id[j].threadp =
1326 thread_create(NULL, 0, mptsas_doneq_thread,
1327 &mpt->m_doneq_thread_id[j].arg,
1328 0, &p0, TS_RUN, minclsyspri);
1329 mpt->m_doneq_thread_id[j].donetail =
1330 &mpt->m_doneq_thread_id[j].doneq;
1331 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1332 }
1333 mutex_exit(&mpt->m_doneq_mutex);
1334 doneq_thread_create++;
1335 }
1336
1337 /*
1338 * Disable hardware interrupt since we're not ready to
1339 * handle it yet.
1340 */
1341 MPTSAS_DISABLE_INTR(mpt);
1342 if (mptsas_register_intrs(mpt) == FALSE)
1343 goto fail;
1344 intr_added++;
1345
1346 /* Initialize mutex used in interrupt handler */
1347 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1348 DDI_INTR_PRI(mpt->m_intr_pri));
1349 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1350 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1351 DDI_INTR_PRI(mpt->m_intr_pri));
1352 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1353 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1354 NULL, MUTEX_DRIVER,
1355 DDI_INTR_PRI(mpt->m_intr_pri));
1356 }
1357
1358 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1359 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1360 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1361 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1362 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1363 mutex_init_done++;
1364
1365 #ifdef MPTSAS_FAULTINJECTION
1366 TAILQ_INIT(&mpt->m_fminj_cmdq);
1367 #endif
1368
1369 mutex_enter(&mpt->m_mutex);
1370 /*
1371 * Initialize power management component
1372 */
1373 if (mpt->m_options & MPTSAS_OPT_PM) {
1374 if (mptsas_init_pm(mpt)) {
1375 mutex_exit(&mpt->m_mutex);
1376 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1377 "failed");
1378 goto fail;
1379 }
1380 }
1381
1382 /*
1383 * Initialize chip using Message Unit Reset, if allowed
1384 */
1385 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1386 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1387 mutex_exit(&mpt->m_mutex);
1388 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1389 goto fail;
1390 }
1391
1392 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1393 mptsas_target_addr_hash, mptsas_target_addr_cmp,
1394 mptsas_target_free, sizeof (mptsas_target_t),
1395 offsetof(mptsas_target_t, m_link),
1396 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1397
1398 /*
1399 * The refhash for temporary targets uses the address of the target
1400 * struct itself as tag, so the tag offset is 0. See the implementation
1401 * of mptsas_tmp_target_hash() and mptsas_tmp_target_cmp().
1402 */
1403 mpt->m_tmp_targets = refhash_create(MPTSAS_TMP_TARGET_BUCKET_COUNT,
1404 mptsas_tmp_target_hash, mptsas_tmp_target_cmp,
1405 mptsas_target_free, sizeof (mptsas_target_t),
1406 offsetof(mptsas_target_t, m_link), 0, KM_SLEEP);
1407
1408 /*
1409 * Fill in the phy_info structure and get the base WWID
1410 */
1411 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1412 mptsas_log(mpt, CE_WARN,
1413 "mptsas_get_manufacture_page5 failed!");
1414 goto fail;
1415 }
1416
1417 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1418 mptsas_log(mpt, CE_WARN,
1419 "mptsas_get_sas_io_unit_page_hndshk failed!");
1420 goto fail;
1421 }
1422
1423 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1424 mptsas_log(mpt, CE_WARN,
1425 "mptsas_get_manufacture_page0 failed!");
1426 goto fail;
1427 }
1428
1429 mutex_exit(&mpt->m_mutex);
1430
1431 /*
1432 * Register the iport for multiple port HBA
1433 */
1434 mptsas_iport_register(mpt);
1435
1436 /*
1437 * initialize SCSI HBA transport structure
1438 */
1439 if (mptsas_hba_setup(mpt) == FALSE)
1440 goto fail;
1441 hba_attach_setup++;
1442
1443 if (mptsas_smp_setup(mpt) == FALSE)
1444 goto fail;
1445 smp_attach_setup++;
1446
1447 if (mptsas_enc_setup(mpt) == FALSE)
1448 goto fail;
1449 enc_attach_setup++;
1450
1451 if (mptsas_cache_create(mpt) == FALSE)
1452 goto fail;
1453
1454 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1455 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1456 if (mpt->m_scsi_reset_delay == 0) {
1457 mptsas_log(mpt, CE_NOTE,
1458 "scsi_reset_delay of 0 is not recommended,"
1459 " resetting to SCSI_DEFAULT_RESET_DELAY");
1460 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1461 }
1462
1463 /*
1464 * Initialize the wait and done FIFO queue
1465 */
1466 mpt->m_donetail = &mpt->m_doneq;
1467 mpt->m_waitqtail = &mpt->m_waitq;
1468 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1469 mpt->m_tx_draining = 0;
1470
1471 /*
1472 * ioc cmd queue initialize
1473 */
1474 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1475 mpt->m_dev_handle = 0xFFFF;
1476
1477 MPTSAS_ENABLE_INTR(mpt);
1478
1479 /*
1480 * enable event notification
1481 */
1482 mutex_enter(&mpt->m_mutex);
1483 if (mptsas_ioc_enable_event_notification(mpt)) {
1484 mutex_exit(&mpt->m_mutex);
1485 goto fail;
1486 }
1487 mutex_exit(&mpt->m_mutex);
1488
1489 /*
1490 * used for mptsas_watch
1491 */
1492 mptsas_list_add(mpt);
1493
1494 mutex_enter(&mptsas_global_mutex);
1495 if (mptsas_timeouts_enabled == 0) {
1496 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1497 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1498
1499 mptsas_tick = mptsas_scsi_watchdog_tick *
1500 drv_usectohz((clock_t)1000000);
1501
1502 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1503 mptsas_timeouts_enabled = 1;
1504 }
1505 mutex_exit(&mptsas_global_mutex);
1506 added_watchdog++;
1507
1508 /*
1509 * Initialize PHY info for smhba.
1510 * This requires watchdog to be enabled otherwise if interrupts
1511 * don't work the system will hang.
1512 */
1513 if (mptsas_smhba_setup(mpt)) {
1514 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1515 "failed");
1516 goto fail;
1517 }
1518
1519 /* Check all dma handles allocated in attach */
1520 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1521 != DDI_SUCCESS) ||
1522 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1523 != DDI_SUCCESS) ||
1524 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1525 != DDI_SUCCESS) ||
1526 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1527 != DDI_SUCCESS) ||
1528 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1529 != DDI_SUCCESS) ||
1530 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1531 != DDI_SUCCESS)) {
1532 goto fail;
1533 }
1534
1535 /* Check all acc handles allocated in attach */
1536 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1537 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1538 != DDI_SUCCESS) ||
1539 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1540 != DDI_SUCCESS) ||
1541 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1542 != DDI_SUCCESS) ||
1543 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1544 != DDI_SUCCESS) ||
1545 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1546 != DDI_SUCCESS) ||
1547 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1548 != DDI_SUCCESS) ||
1549 (mptsas_check_acc_handle(mpt->m_config_handle)
1550 != DDI_SUCCESS)) {
1551 goto fail;
1552 }
1553
1554 /*
1555 * After this point, we are not going to fail the attach.
1556 */
1557
1558 /* Print message of HBA present */
1559 ddi_report_dev(dip);
1560
1561 /* report idle status to pm framework */
1562 if (mpt->m_options & MPTSAS_OPT_PM) {
1563 (void) pm_idle_component(dip, 0);
1564 }
1565
1566 return (DDI_SUCCESS);
1567
1568 fail:
1569 mptsas_log(mpt, CE_WARN, "attach failed");
1570 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1571 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1572 if (mpt) {
1573 /* deallocate in reverse order */
1574 if (added_watchdog) {
1575 mptsas_list_del(mpt);
1576 mutex_enter(&mptsas_global_mutex);
1577
1578 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1579 timeout_id_t tid = mptsas_timeout_id;
1580 mptsas_timeouts_enabled = 0;
1581 mptsas_timeout_id = 0;
1582 mutex_exit(&mptsas_global_mutex);
1583 (void) untimeout(tid);
1584 mutex_enter(&mptsas_global_mutex);
1585 }
1586 mutex_exit(&mptsas_global_mutex);
1587 }
1588
1589 mptsas_cache_destroy(mpt);
1590
1591 if (smp_attach_setup) {
1592 mptsas_smp_teardown(mpt);
1593 }
1594 if (enc_attach_setup) {
1595 mptsas_enc_teardown(mpt);
1596 }
1597 if (hba_attach_setup) {
1598 mptsas_hba_teardown(mpt);
1599 }
1600
1601 if (mpt->m_tmp_targets)
1602 refhash_destroy(mpt->m_tmp_targets);
1603 if (mpt->m_targets)
1604 refhash_destroy(mpt->m_targets);
1605 if (mpt->m_smp_targets)
1606 refhash_destroy(mpt->m_smp_targets);
1607
1608 if (mpt->m_active) {
1609 mptsas_free_active_slots(mpt);
1610 }
1611 if (intr_added) {
1612 mptsas_unregister_intrs(mpt);
1613 }
1614
1615 if (doneq_thread_create) {
1616 mutex_enter(&mpt->m_doneq_mutex);
1617 doneq_thread_num = mpt->m_doneq_thread_n;
1618 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1619 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1620 mpt->m_doneq_thread_id[j].flag &=
1621 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1622 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1623 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1624 }
1625 while (mpt->m_doneq_thread_n) {
1626 cv_wait(&mpt->m_doneq_thread_cv,
1627 &mpt->m_doneq_mutex);
1628 }
1629 for (j = 0; j < doneq_thread_num; j++) {
1630 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1631 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1632 }
1633 kmem_free(mpt->m_doneq_thread_id,
1634 sizeof (mptsas_doneq_thread_list_t)
1635 * doneq_thread_num);
1636 mutex_exit(&mpt->m_doneq_mutex);
1637 cv_destroy(&mpt->m_doneq_thread_cv);
1638 mutex_destroy(&mpt->m_doneq_mutex);
1639 }
1640 if (event_taskq_create) {
1641 ddi_taskq_destroy(mpt->m_event_taskq);
1642 }
1643 if (dr_taskq_create) {
1644 ddi_taskq_destroy(mpt->m_dr_taskq);
1645 }
1646 if (mutex_init_done) {
1647 mutex_destroy(&mpt->m_tx_waitq_mutex);
1648 mutex_destroy(&mpt->m_passthru_mutex);
1649 mutex_destroy(&mpt->m_mutex);
1650 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1651 mutex_destroy(
1652 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1653 }
1654 cv_destroy(&mpt->m_cv);
1655 cv_destroy(&mpt->m_passthru_cv);
1656 cv_destroy(&mpt->m_fw_cv);
1657 cv_destroy(&mpt->m_config_cv);
1658 cv_destroy(&mpt->m_fw_diag_cv);
1659 }
1660
1661 if (map_setup) {
1662 mptsas_cfg_fini(mpt);
1663 }
1664 if (config_setup) {
1665 mptsas_config_space_fini(mpt);
1666 }
1667 mptsas_free_handshake_msg(mpt);
1668 mptsas_hba_fini(mpt);
1669
1670 mptsas_fm_fini(mpt);
1671 ddi_soft_state_free(mptsas_state, instance);
1672 ddi_prop_remove_all(dip);
1673 }
1674 return (DDI_FAILURE);
1675 }
1676
1677 static int
1678 mptsas_suspend(dev_info_t *devi)
1679 {
1680 mptsas_t *mpt, *g;
1681 scsi_hba_tran_t *tran;
1682
1683 if (scsi_hba_iport_unit_address(devi)) {
1684 return (DDI_SUCCESS);
1685 }
1686
1687 if ((tran = ddi_get_driver_private(devi)) == NULL)
1688 return (DDI_SUCCESS);
1689
1690 mpt = TRAN2MPT(tran);
1691 if (!mpt) {
1692 return (DDI_SUCCESS);
1693 }
1694
1695 mutex_enter(&mpt->m_mutex);
1696
1697 if (mpt->m_suspended++) {
1698 mutex_exit(&mpt->m_mutex);
1699 return (DDI_SUCCESS);
1700 }
1701
1702 /*
1703 * Cancel timeout threads for this mpt
1704 */
1705 if (mpt->m_quiesce_timeid) {
1706 timeout_id_t tid = mpt->m_quiesce_timeid;
1707 mpt->m_quiesce_timeid = 0;
1708 mutex_exit(&mpt->m_mutex);
1709 (void) untimeout(tid);
1710 mutex_enter(&mpt->m_mutex);
1711 }
1712
1713 if (mpt->m_restart_cmd_timeid) {
1714 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1715 mpt->m_restart_cmd_timeid = 0;
1716 mutex_exit(&mpt->m_mutex);
1717 (void) untimeout(tid);
1718 mutex_enter(&mpt->m_mutex);
1719 }
1720
1721 mutex_exit(&mpt->m_mutex);
1722
1723 (void) pm_idle_component(mpt->m_dip, 0);
1724
1725 /*
1726 * Cancel watch threads if all mpts suspended
1727 */
1728 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1729 for (g = mptsas_head; g != NULL; g = g->m_next) {
1730 if (!g->m_suspended)
1731 break;
1732 }
1733 rw_exit(&mptsas_global_rwlock);
1734
1735 mutex_enter(&mptsas_global_mutex);
1736 if (g == NULL) {
1737 timeout_id_t tid;
1738
1739 mptsas_timeouts_enabled = 0;
1740 if (mptsas_timeout_id) {
1741 tid = mptsas_timeout_id;
1742 mptsas_timeout_id = 0;
1743 mutex_exit(&mptsas_global_mutex);
1744 (void) untimeout(tid);
1745 mutex_enter(&mptsas_global_mutex);
1746 }
1747 if (mptsas_reset_watch) {
1748 tid = mptsas_reset_watch;
1749 mptsas_reset_watch = 0;
1750 mutex_exit(&mptsas_global_mutex);
1751 (void) untimeout(tid);
1752 mutex_enter(&mptsas_global_mutex);
1753 }
1754 }
1755 mutex_exit(&mptsas_global_mutex);
1756
1757 mutex_enter(&mpt->m_mutex);
1758
1759 /*
1760 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1761 */
1762 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1763 (mpt->m_power_level != PM_LEVEL_D0)) {
1764 mutex_exit(&mpt->m_mutex);
1765 return (DDI_SUCCESS);
1766 }
1767
1768 /* Disable HBA interrupts in hardware */
1769 MPTSAS_DISABLE_INTR(mpt);
1770 /*
1771 * Send RAID action system shutdown to sync IR
1772 */
1773 mptsas_raid_action_system_shutdown(mpt);
1774
1775 mutex_exit(&mpt->m_mutex);
1776
1777 /* drain the taskq */
1778 ddi_taskq_wait(mpt->m_event_taskq);
1779 ddi_taskq_wait(mpt->m_dr_taskq);
1780
1781 return (DDI_SUCCESS);
1782 }
1783
1784 #ifdef __sparc
1785 /*ARGSUSED*/
1786 static int
1787 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1788 {
1789 mptsas_t *mpt;
1790 scsi_hba_tran_t *tran;
1791
1792 /*
1793 * If this call is for iport, just return.
1794 */
1795 if (scsi_hba_iport_unit_address(devi))
1796 return (DDI_SUCCESS);
1797
1798 if ((tran = ddi_get_driver_private(devi)) == NULL)
1799 return (DDI_SUCCESS);
1800
1801 if ((mpt = TRAN2MPT(tran)) == NULL)
1802 return (DDI_SUCCESS);
1803
1804 /*
1805 * Send RAID action system shutdown to sync IR. Disable HBA
1806 * interrupts in hardware first.
1807 */
1808 MPTSAS_DISABLE_INTR(mpt);
1809 mptsas_raid_action_system_shutdown(mpt);
1810
1811 return (DDI_SUCCESS);
1812 }
1813 #else /* __sparc */
1814 /*
1815 * quiesce(9E) entry point.
1816 *
1817 * This function is called when the system is single-threaded at high
1818 * PIL with preemption disabled. Therefore, this function must not be
1819 * blocked.
1820 *
1821 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1822 * DDI_FAILURE indicates an error condition and should almost never happen.
1823 */
1824 static int
1825 mptsas_quiesce(dev_info_t *devi)
1826 {
1827 mptsas_t *mpt;
1828 scsi_hba_tran_t *tran;
1829
1830 /*
1831 * If this call is for iport, just return.
1832 */
1833 if (scsi_hba_iport_unit_address(devi))
1834 return (DDI_SUCCESS);
1835
1836 if ((tran = ddi_get_driver_private(devi)) == NULL)
1837 return (DDI_SUCCESS);
1838
1839 if ((mpt = TRAN2MPT(tran)) == NULL)
1840 return (DDI_SUCCESS);
1841
1842 /* Disable HBA interrupts in hardware */
1843 MPTSAS_DISABLE_INTR(mpt);
1844 /* Send RAID action system shutdonw to sync IR */
1845 mptsas_raid_action_system_shutdown(mpt);
1846
1847 return (DDI_SUCCESS);
1848 }
1849 #endif /* __sparc */
1850
1851 /*
1852 * detach(9E). Remove all device allocations and system resources;
1853 * disable device interrupts.
1854 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1855 */
1856 static int
1857 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1858 {
1859 /* CONSTCOND */
1860 ASSERT(NO_COMPETING_THREADS);
1861 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1862
1863 switch (cmd) {
1864 case DDI_DETACH:
1865 return (mptsas_do_detach(devi));
1866
1867 case DDI_SUSPEND:
1868 return (mptsas_suspend(devi));
1869
1870 default:
1871 return (DDI_FAILURE);
1872 }
1873 /* NOTREACHED */
1874 }
1875
1876 static int
1877 mptsas_do_detach(dev_info_t *dip)
1878 {
1879 mptsas_t *mpt;
1880 scsi_hba_tran_t *tran;
1881 int circ = 0;
1882 int circ1 = 0;
1883 mdi_pathinfo_t *pip = NULL;
1884 int i;
1885 int doneq_thread_num = 0;
1886
1887 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1888
1889 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1890 return (DDI_FAILURE);
1891
1892 mpt = TRAN2MPT(tran);
1893 if (!mpt) {
1894 return (DDI_FAILURE);
1895 }
1896 /*
1897 * Still have pathinfo child, should not detach mpt driver
1898 */
1899 if (scsi_hba_iport_unit_address(dip)) {
1900 if (mpt->m_mpxio_enable) {
1901 /*
1902 * MPxIO enabled for the iport
1903 */
1904 ndi_devi_enter(scsi_vhci_dip, &circ1);
1905 ndi_devi_enter(dip, &circ);
1906 while ((pip = mdi_get_next_client_path(dip, NULL)) !=
1907 NULL) {
1908 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1909 continue;
1910 }
1911 ndi_devi_exit(dip, circ);
1912 ndi_devi_exit(scsi_vhci_dip, circ1);
1913 NDBG12(("detach failed because of "
1914 "outstanding path info"));
1915 return (DDI_FAILURE);
1916 }
1917 ndi_devi_exit(dip, circ);
1918 ndi_devi_exit(scsi_vhci_dip, circ1);
1919 (void) mdi_phci_unregister(dip, 0);
1920 }
1921
1922 ddi_prop_remove_all(dip);
1923
1924 return (DDI_SUCCESS);
1925 }
1926
1927 /* Make sure power level is D0 before accessing registers */
1928 if (mpt->m_options & MPTSAS_OPT_PM) {
1929 (void) pm_busy_component(dip, 0);
1930 if (mpt->m_power_level != PM_LEVEL_D0) {
1931 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1932 DDI_SUCCESS) {
1933 mptsas_log(mpt, CE_WARN,
1934 "raise power request failed");
1935 (void) pm_idle_component(dip, 0);
1936 return (DDI_FAILURE);
1937 }
1938 }
1939 }
1940
1941 /*
1942 * Send RAID action system shutdown to sync IR. After action, send a
1943 * Message Unit Reset. Since after that DMA resource will be freed,
1944 * set ioc to READY state will avoid HBA initiated DMA operation.
1945 */
1946 mutex_enter(&mpt->m_mutex);
1947 MPTSAS_DISABLE_INTR(mpt);
1948 mptsas_raid_action_system_shutdown(mpt);
1949 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1950 (void) mptsas_ioc_reset(mpt, FALSE);
1951 mutex_exit(&mpt->m_mutex);
1952 mptsas_rem_intrs(mpt);
1953 ddi_taskq_destroy(mpt->m_event_taskq);
1954 ddi_taskq_destroy(mpt->m_dr_taskq);
1955
1956 if (mpt->m_doneq_thread_n) {
1957 mutex_enter(&mpt->m_doneq_mutex);
1958 doneq_thread_num = mpt->m_doneq_thread_n;
1959 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1960 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1961 mpt->m_doneq_thread_id[i].flag &=
1962 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1963 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1964 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1965 }
1966 while (mpt->m_doneq_thread_n) {
1967 cv_wait(&mpt->m_doneq_thread_cv,
1968 &mpt->m_doneq_mutex);
1969 }
1970 for (i = 0; i < doneq_thread_num; i++) {
1971 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1972 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1973 }
1974 kmem_free(mpt->m_doneq_thread_id,
1975 sizeof (mptsas_doneq_thread_list_t)
1976 * doneq_thread_num);
1977 mutex_exit(&mpt->m_doneq_mutex);
1978 cv_destroy(&mpt->m_doneq_thread_cv);
1979 mutex_destroy(&mpt->m_doneq_mutex);
1980 }
1981
1982 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1983
1984 mptsas_list_del(mpt);
1985
1986 /*
1987 * Cancel timeout threads for this mpt
1988 */
1989 mutex_enter(&mpt->m_mutex);
1990 if (mpt->m_quiesce_timeid) {
1991 timeout_id_t tid = mpt->m_quiesce_timeid;
1992 mpt->m_quiesce_timeid = 0;
1993 mutex_exit(&mpt->m_mutex);
1994 (void) untimeout(tid);
1995 mutex_enter(&mpt->m_mutex);
1996 }
1997
1998 if (mpt->m_restart_cmd_timeid) {
1999 timeout_id_t tid = mpt->m_restart_cmd_timeid;
2000 mpt->m_restart_cmd_timeid = 0;
2001 mutex_exit(&mpt->m_mutex);
2002 (void) untimeout(tid);
2003 mutex_enter(&mpt->m_mutex);
2004 }
2005
2006 mutex_exit(&mpt->m_mutex);
2007
2008 /*
2009 * last mpt? ... if active, CANCEL watch threads.
2010 */
2011 mutex_enter(&mptsas_global_mutex);
2012 if (mptsas_head == NULL) {
2013 timeout_id_t tid;
2014 /*
2015 * Clear mptsas_timeouts_enable so that the watch thread
2016 * gets restarted on DDI_ATTACH
2017 */
2018 mptsas_timeouts_enabled = 0;
2019 if (mptsas_timeout_id) {
2020 tid = mptsas_timeout_id;
2021 mptsas_timeout_id = 0;
2022 mutex_exit(&mptsas_global_mutex);
2023 (void) untimeout(tid);
2024 mutex_enter(&mptsas_global_mutex);
2025 }
2026 if (mptsas_reset_watch) {
2027 tid = mptsas_reset_watch;
2028 mptsas_reset_watch = 0;
2029 mutex_exit(&mptsas_global_mutex);
2030 (void) untimeout(tid);
2031 mutex_enter(&mptsas_global_mutex);
2032 }
2033 }
2034 mutex_exit(&mptsas_global_mutex);
2035
2036 /*
2037 * Delete Phy stats
2038 */
2039 mptsas_destroy_phy_stats(mpt);
2040
2041 mptsas_destroy_hashes(mpt);
2042
2043 /*
2044 * Delete nt_active.
2045 */
2046 mutex_enter(&mpt->m_mutex);
2047 mptsas_free_active_slots(mpt);
2048 mutex_exit(&mpt->m_mutex);
2049
2050 /* deallocate everything that was allocated in mptsas_attach */
2051 mptsas_cache_destroy(mpt);
2052
2053 mptsas_hba_fini(mpt);
2054 mptsas_cfg_fini(mpt);
2055
2056 /* Lower the power informing PM Framework */
2057 if (mpt->m_options & MPTSAS_OPT_PM) {
2058 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2059 mptsas_log(mpt, CE_WARN,
2060 "lower power request failed during detach, "
2061 "ignoring");
2062 }
2063
2064 mutex_destroy(&mpt->m_tx_waitq_mutex);
2065 mutex_destroy(&mpt->m_passthru_mutex);
2066 mutex_destroy(&mpt->m_mutex);
2067 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2068 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2069 }
2070 cv_destroy(&mpt->m_cv);
2071 cv_destroy(&mpt->m_passthru_cv);
2072 cv_destroy(&mpt->m_fw_cv);
2073 cv_destroy(&mpt->m_config_cv);
2074 cv_destroy(&mpt->m_fw_diag_cv);
2075
2076 #ifdef MPTSAS_FAULTINJECTION
2077 ASSERT(TAILQ_EMPTY(&mpt->m_fminj_cmdq));
2078 #endif
2079
2080 mptsas_smp_teardown(mpt);
2081 mptsas_enc_teardown(mpt);
2082 mptsas_hba_teardown(mpt);
2083
2084 mptsas_config_space_fini(mpt);
2085
2086 mptsas_free_handshake_msg(mpt);
2087
2088 mptsas_fm_fini(mpt);
2089 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2090 ddi_prop_remove_all(dip);
2091
2092 return (DDI_SUCCESS);
2093 }
2094
2095 static void
2096 mptsas_list_add(mptsas_t *mpt)
2097 {
2098 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2099
2100 if (mptsas_head == NULL) {
2101 mptsas_head = mpt;
2102 } else {
2103 mptsas_tail->m_next = mpt;
2104 }
2105 mptsas_tail = mpt;
2106 rw_exit(&mptsas_global_rwlock);
2107 }
2108
2109 static void
2110 mptsas_list_del(mptsas_t *mpt)
2111 {
2112 mptsas_t *m;
2113 /*
2114 * Remove device instance from the global linked list
2115 */
2116 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2117 if (mptsas_head == mpt) {
2118 m = mptsas_head = mpt->m_next;
2119 } else {
2120 for (m = mptsas_head; m != NULL; m = m->m_next) {
2121 if (m->m_next == mpt) {
2122 m->m_next = mpt->m_next;
2123 break;
2124 }
2125 }
2126 if (m == NULL) {
2127 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2128 }
2129 }
2130
2131 if (mptsas_tail == mpt) {
2132 mptsas_tail = m;
2133 }
2134 rw_exit(&mptsas_global_rwlock);
2135 }
2136
2137 static int
2138 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2139 {
2140 ddi_dma_attr_t task_dma_attrs;
2141
2142 mpt->m_hshk_dma_size = 0;
2143 task_dma_attrs = mpt->m_msg_dma_attr;
2144 task_dma_attrs.dma_attr_sgllen = 1;
2145 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2146
2147 /* allocate Task Management ddi_dma resources */
2148 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2149 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2150 alloc_size, NULL) == FALSE) {
2151 return (DDI_FAILURE);
2152 }
2153 mpt->m_hshk_dma_size = alloc_size;
2154
2155 return (DDI_SUCCESS);
2156 }
2157
2158 static void
2159 mptsas_free_handshake_msg(mptsas_t *mpt)
2160 {
2161 if (mpt->m_hshk_dma_size == 0)
2162 return;
2163 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2164 mpt->m_hshk_dma_size = 0;
2165 }
2166
2167 static int
2168 mptsas_hba_setup(mptsas_t *mpt)
2169 {
2170 scsi_hba_tran_t *hba_tran;
2171 int tran_flags;
2172
2173 /* Allocate a transport structure */
2174 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2175 SCSI_HBA_CANSLEEP);
2176 ASSERT(mpt->m_tran != NULL);
2177
2178 hba_tran->tran_hba_private = mpt;
2179 hba_tran->tran_tgt_private = NULL;
2180
2181 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2182 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2183
2184 hba_tran->tran_start = mptsas_scsi_start;
2185 hba_tran->tran_reset = mptsas_scsi_reset;
2186 hba_tran->tran_abort = mptsas_scsi_abort;
2187 hba_tran->tran_getcap = mptsas_scsi_getcap;
2188 hba_tran->tran_setcap = mptsas_scsi_setcap;
2189 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2190 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2191
2192 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2193 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2194 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2195
2196 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2197 hba_tran->tran_get_name = mptsas_get_name;
2198
2199 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2200 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2201 hba_tran->tran_bus_reset = NULL;
2202
2203 hba_tran->tran_add_eventcall = NULL;
2204 hba_tran->tran_get_eventcookie = NULL;
2205 hba_tran->tran_post_event = NULL;
2206 hba_tran->tran_remove_eventcall = NULL;
2207
2208 hba_tran->tran_bus_config = mptsas_bus_config;
2209
2210 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2211
2212 /*
2213 * All children of the HBA are iports. We need tran was cloned.
2214 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2215 * inherited to iport's tran vector.
2216 */
2217 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2218
2219 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2220 hba_tran, tran_flags) != DDI_SUCCESS) {
2221 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2222 scsi_hba_tran_free(hba_tran);
2223 mpt->m_tran = NULL;
2224 return (FALSE);
2225 }
2226 return (TRUE);
2227 }
2228
2229 static void
2230 mptsas_hba_teardown(mptsas_t *mpt)
2231 {
2232 (void) scsi_hba_detach(mpt->m_dip);
2233 if (mpt->m_tran != NULL) {
2234 scsi_hba_tran_free(mpt->m_tran);
2235 mpt->m_tran = NULL;
2236 }
2237 }
2238
2239 static void
2240 mptsas_iport_register(mptsas_t *mpt)
2241 {
2242 int i, j;
2243 mptsas_phymask_t mask = 0x0;
2244 /*
2245 * initial value of mask is 0
2246 */
2247 mutex_enter(&mpt->m_mutex);
2248 for (i = 0; i < mpt->m_num_phys; i++) {
2249 mptsas_phymask_t phy_mask = 0x0;
2250 char phy_mask_name[MPTSAS_MAX_PHYS];
2251 uint8_t current_port;
2252
2253 if (mpt->m_phy_info[i].attached_devhdl == 0)
2254 continue;
2255
2256 bzero(phy_mask_name, sizeof (phy_mask_name));
2257
2258 current_port = mpt->m_phy_info[i].port_num;
2259
2260 if ((mask & (1 << i)) != 0)
2261 continue;
2262
2263 for (j = 0; j < mpt->m_num_phys; j++) {
2264 if (mpt->m_phy_info[j].attached_devhdl &&
2265 (mpt->m_phy_info[j].port_num == current_port)) {
2266 phy_mask |= (1 << j);
2267 }
2268 }
2269 mask = mask | phy_mask;
2270
2271 for (j = 0; j < mpt->m_num_phys; j++) {
2272 if ((phy_mask >> j) & 0x01) {
2273 mpt->m_phy_info[j].phy_mask = phy_mask;
2274 }
2275 }
2276
2277 (void) sprintf(phy_mask_name, "%x", phy_mask);
2278
2279 mutex_exit(&mpt->m_mutex);
2280 /*
2281 * register a iport
2282 */
2283 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2284 mutex_enter(&mpt->m_mutex);
2285 }
2286 mutex_exit(&mpt->m_mutex);
2287 /*
2288 * register a virtual port for RAID volume always
2289 */
2290 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2291
2292 }
2293
2294 static int
2295 mptsas_smp_setup(mptsas_t *mpt)
2296 {
2297 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2298 ASSERT(mpt->m_smptran != NULL);
2299 mpt->m_smptran->smp_tran_hba_private = mpt;
2300 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2301 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2302 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2303 smp_hba_tran_free(mpt->m_smptran);
2304 mpt->m_smptran = NULL;
2305 return (FALSE);
2306 }
2307 /*
2308 * Initialize smp hash table
2309 */
2310 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2311 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2312 mptsas_smp_free, sizeof (mptsas_smp_t),
2313 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2314 KM_SLEEP);
2315 mpt->m_smp_devhdl = 0xFFFF;
2316
2317 return (TRUE);
2318 }
2319
2320 static void
2321 mptsas_smp_teardown(mptsas_t *mpt)
2322 {
2323 (void) smp_hba_detach(mpt->m_dip);
2324 if (mpt->m_smptran != NULL) {
2325 smp_hba_tran_free(mpt->m_smptran);
2326 mpt->m_smptran = NULL;
2327 }
2328 mpt->m_smp_devhdl = 0;
2329 }
2330
2331 static int
2332 mptsas_enc_setup(mptsas_t *mpt)
2333 {
2334 list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2335 offsetof(mptsas_enclosure_t, me_link));
2336 return (TRUE);
2337 }
2338
2339 static void
2340 mptsas_enc_free(mptsas_enclosure_t *mep)
2341 {
2342 if (mep == NULL)
2343 return;
2344 if (mep->me_slotleds != NULL) {
2345 VERIFY3U(mep->me_nslots, >, 0);
2346 kmem_free(mep->me_slotleds, sizeof (uint8_t) * mep->me_nslots);
2347 }
2348 kmem_free(mep, sizeof (mptsas_enclosure_t));
2349 }
2350
2351 static void
2352 mptsas_enc_teardown(mptsas_t *mpt)
2353 {
2354 mptsas_enclosure_t *mep;
2355
2356 while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2357 mptsas_enc_free(mep);
2358 }
2359 list_destroy(&mpt->m_enclosures);
2360 }
2361
2362 static mptsas_enclosure_t *
2363 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2364 {
2365 mptsas_enclosure_t *mep;
2366
2367 ASSERT(MUTEX_HELD(&mpt->m_mutex));
2368
2369 for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2370 mep = list_next(&mpt->m_enclosures, mep)) {
2371 if (hdl == mep->me_enchdl) {
2372 return (mep);
2373 }
2374 }
2375
2376 return (NULL);
2377 }
2378
2379 static int
2380 mptsas_cache_create(mptsas_t *mpt)
2381 {
2382 int instance = mpt->m_instance;
2383 char buf[64];
2384
2385 /*
2386 * create kmem cache for packets
2387 */
2388 (void) sprintf(buf, "mptsas%d_cache", instance);
2389 mpt->m_kmem_cache = kmem_cache_create(buf,
2390 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2391 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2392 NULL, (void *)mpt, NULL, 0);
2393
2394 if (mpt->m_kmem_cache == NULL) {
2395 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2396 return (FALSE);
2397 }
2398
2399 /*
2400 * create kmem cache for extra SGL frames if SGL cannot
2401 * be accomodated into main request frame.
2402 */
2403 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2404 mpt->m_cache_frames = kmem_cache_create(buf,
2405 sizeof (mptsas_cache_frames_t), 8,
2406 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2407 NULL, (void *)mpt, NULL, 0);
2408
2409 if (mpt->m_cache_frames == NULL) {
2410 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2411 return (FALSE);
2412 }
2413
2414 return (TRUE);
2415 }
2416
2417 static void
2418 mptsas_cache_destroy(mptsas_t *mpt)
2419 {
2420 /* deallocate in reverse order */
2421 if (mpt->m_cache_frames) {
2422 kmem_cache_destroy(mpt->m_cache_frames);
2423 mpt->m_cache_frames = NULL;
2424 }
2425 if (mpt->m_kmem_cache) {
2426 kmem_cache_destroy(mpt->m_kmem_cache);
2427 mpt->m_kmem_cache = NULL;
2428 }
2429 }
2430
2431 static int
2432 mptsas_power(dev_info_t *dip, int component, int level)
2433 {
2434 #ifndef __lock_lint
2435 _NOTE(ARGUNUSED(component))
2436 #endif
2437 mptsas_t *mpt;
2438 int rval = DDI_SUCCESS;
2439 int polls = 0;
2440 uint32_t ioc_status;
2441
2442 if (scsi_hba_iport_unit_address(dip) != 0)
2443 return (DDI_SUCCESS);
2444
2445 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2446 if (mpt == NULL) {
2447 return (DDI_FAILURE);
2448 }
2449
2450 mutex_enter(&mpt->m_mutex);
2451
2452 /*
2453 * If the device is busy, don't lower its power level
2454 */
2455 if (mpt->m_busy && (mpt->m_power_level > level)) {
2456 mutex_exit(&mpt->m_mutex);
2457 return (DDI_FAILURE);
2458 }
2459 switch (level) {
2460 case PM_LEVEL_D0:
2461 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2462 MPTSAS_POWER_ON(mpt);
2463 /*
2464 * Wait up to 30 seconds for IOC to come out of reset.
2465 */
2466 while (((ioc_status = ddi_get32(mpt->m_datap,
2467 &mpt->m_reg->Doorbell)) &
2468 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2469 if (polls++ > 3000) {
2470 break;
2471 }
2472 delay(drv_usectohz(10000));
2473 }
2474 /*
2475 * If IOC is not in operational state, try to hard reset it.
2476 */
2477 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2478 MPI2_IOC_STATE_OPERATIONAL) {
2479 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2480 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2481 mptsas_log(mpt, CE_WARN,
2482 "mptsas_power: hard reset failed");
2483 mutex_exit(&mpt->m_mutex);
2484 return (DDI_FAILURE);
2485 }
2486 }
2487 mpt->m_power_level = PM_LEVEL_D0;
2488 break;
2489 case PM_LEVEL_D3:
2490 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2491 MPTSAS_POWER_OFF(mpt);
2492 break;
2493 default:
2494 mptsas_log(mpt, CE_WARN, "unknown power level <%x>", level);
2495 rval = DDI_FAILURE;
2496 break;
2497 }
2498 mutex_exit(&mpt->m_mutex);
2499 return (rval);
2500 }
2501
2502 /*
2503 * Initialize configuration space and figure out which
2504 * chip and revison of the chip the mpt driver is using.
2505 */
2506 static int
2507 mptsas_config_space_init(mptsas_t *mpt)
2508 {
2509 NDBG0(("mptsas_config_space_init"));
2510
2511 if (mpt->m_config_handle != NULL)
2512 return (TRUE);
2513
2514 if (pci_config_setup(mpt->m_dip,
2515 &mpt->m_config_handle) != DDI_SUCCESS) {
2516 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2517 return (FALSE);
2518 }
2519
2520 /*
2521 * This is a workaround for a XMITS ASIC bug which does not
2522 * drive the CBE upper bits.
2523 */
2524 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2525 PCI_STAT_PERROR) {
2526 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2527 PCI_STAT_PERROR);
2528 }
2529
2530 mptsas_setup_cmd_reg(mpt);
2531
2532 /*
2533 * Get the chip device id:
2534 */
2535 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2536
2537 /*
2538 * Save the revision.
2539 */
2540 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2541
2542 /*
2543 * Save the SubSystem Vendor and Device IDs
2544 */
2545 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2546 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2547
2548 /*
2549 * Set the latency timer to 0x40 as specified by the upa -> pci
2550 * bridge chip design team. This may be done by the sparc pci
2551 * bus nexus driver, but the driver should make sure the latency
2552 * timer is correct for performance reasons.
2553 */
2554 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2555 MPTSAS_LATENCY_TIMER);
2556
2557 (void) mptsas_get_pci_cap(mpt);
2558 return (TRUE);
2559 }
2560
2561 static void
2562 mptsas_config_space_fini(mptsas_t *mpt)
2563 {
2564 if (mpt->m_config_handle != NULL) {
2565 mptsas_disable_bus_master(mpt);
2566 pci_config_teardown(&mpt->m_config_handle);
2567 mpt->m_config_handle = NULL;
2568 }
2569 }
2570
2571 static void
2572 mptsas_setup_cmd_reg(mptsas_t *mpt)
2573 {
2574 ushort_t cmdreg;
2575
2576 /*
2577 * Set the command register to the needed values.
2578 */
2579 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2580 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2581 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2582 cmdreg &= ~PCI_COMM_IO;
2583 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2584 }
2585
2586 static void
2587 mptsas_disable_bus_master(mptsas_t *mpt)
2588 {
2589 ushort_t cmdreg;
2590
2591 /*
2592 * Clear the master enable bit in the PCI command register.
2593 * This prevents any bus mastering activity like DMA.
2594 */
2595 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2596 cmdreg &= ~PCI_COMM_ME;
2597 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2598 }
2599
2600 int
2601 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2602 {
2603 ddi_dma_attr_t attrs;
2604
2605 attrs = mpt->m_io_dma_attr;
2606 attrs.dma_attr_sgllen = 1;
2607
2608 ASSERT(dma_statep != NULL);
2609
2610 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2611 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2612 &dma_statep->cookie) == FALSE) {
2613 return (DDI_FAILURE);
2614 }
2615
2616 return (DDI_SUCCESS);
2617 }
2618
2619 void
2620 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2621 {
2622 ASSERT(dma_statep != NULL);
2623 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2624 dma_statep->size = 0;
2625 }
2626
2627 int
2628 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2629 {
2630 ddi_dma_attr_t attrs;
2631 ddi_dma_handle_t dma_handle;
2632 caddr_t memp;
2633 ddi_acc_handle_t accessp;
2634 int rval;
2635
2636 ASSERT(mutex_owned(&mpt->m_mutex));
2637
2638 attrs = mpt->m_msg_dma_attr;
2639 attrs.dma_attr_sgllen = 1;
2640 attrs.dma_attr_granular = size;
2641
2642 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2643 &accessp, &memp, size, NULL) == FALSE) {
2644 return (DDI_FAILURE);
2645 }
2646
2647 rval = (*callback) (mpt, memp, var, accessp);
2648
2649 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2650 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2651 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2652 rval = DDI_FAILURE;
2653 }
2654
2655 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2656 return (rval);
2657
2658 }
2659
2660 static int
2661 mptsas_alloc_request_frames(mptsas_t *mpt)
2662 {
2663 ddi_dma_attr_t frame_dma_attrs;
2664 caddr_t memp;
2665 ddi_dma_cookie_t cookie;
2666 size_t mem_size;
2667
2668 /*
2669 * re-alloc when it has already alloced
2670 */
2671 if (mpt->m_dma_req_frame_hdl)
2672 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2673 &mpt->m_acc_req_frame_hdl);
2674
2675 /*
2676 * The size of the request frame pool is:
2677 * Number of Request Frames * Request Frame Size
2678 */
2679 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2680
2681 /*
2682 * set the DMA attributes. System Request Message Frames must be
2683 * aligned on a 16-byte boundry.
2684 */
2685 frame_dma_attrs = mpt->m_msg_dma_attr;
2686 frame_dma_attrs.dma_attr_align = 16;
2687 frame_dma_attrs.dma_attr_sgllen = 1;
2688
2689 /*
2690 * allocate the request frame pool.
2691 */
2692 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2693 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2694 mem_size, &cookie) == FALSE) {
2695 return (DDI_FAILURE);
2696 }
2697
2698 /*
2699 * Store the request frame memory address. This chip uses this
2700 * address to dma to and from the driver's frame. The second
2701 * address is the address mpt uses to fill in the frame.
2702 */
2703 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2704 mpt->m_req_frame = memp;
2705
2706 /*
2707 * Clear the request frame pool.
2708 */
2709 bzero(mpt->m_req_frame, mem_size);
2710
2711 return (DDI_SUCCESS);
2712 }
2713
2714 static int
2715 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2716 {
2717 ddi_dma_attr_t sense_dma_attrs;
2718 caddr_t memp;
2719 ddi_dma_cookie_t cookie;
2720 size_t mem_size;
2721 int num_extrqsense_bufs;
2722
2723 /*
2724 * re-alloc when it has already alloced
2725 */
2726 if (mpt->m_dma_req_sense_hdl) {
2727 rmfreemap(mpt->m_erqsense_map);
2728 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2729 &mpt->m_acc_req_sense_hdl);
2730 }
2731
2732 /*
2733 * The size of the request sense pool is:
2734 * (Number of Request Frames - 2 ) * Request Sense Size +
2735 * extra memory for extended sense requests.
2736 */
2737 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2738 mptsas_extreq_sense_bufsize;
2739
2740 /*
2741 * set the DMA attributes. ARQ buffers
2742 * aligned on a 16-byte boundry.
2743 */
2744 sense_dma_attrs = mpt->m_msg_dma_attr;
2745 sense_dma_attrs.dma_attr_align = 16;
2746 sense_dma_attrs.dma_attr_sgllen = 1;
2747
2748 /*
2749 * allocate the request sense buffer pool.
2750 */
2751 if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2752 &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2753 mem_size, &cookie) == FALSE) {
2754 return (DDI_FAILURE);
2755 }
2756
2757 /*
2758 * Store the request sense base memory address. This chip uses this
2759 * address to dma the request sense data. The second
2760 * address is the address mpt uses to access the data.
2761 * The third is the base for the extended rqsense buffers.
2762 */
2763 mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2764 mpt->m_req_sense = memp;
2765 memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2766 mpt->m_extreq_sense = memp;
2767
2768 /*
2769 * The extra memory is divided up into multiples of the base
2770 * buffer size in order to allocate via rmalloc().
2771 * Note that the rmallocmap cannot start at zero!
2772 */
2773 num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2774 mpt->m_req_sense_size;
2775 mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2776 rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2777
2778 /*
2779 * Clear the pool.
2780 */
2781 bzero(mpt->m_req_sense, mem_size);
2782
2783 return (DDI_SUCCESS);
2784 }
2785
2786 static int
2787 mptsas_alloc_reply_frames(mptsas_t *mpt)
2788 {
2789 ddi_dma_attr_t frame_dma_attrs;
2790 caddr_t memp;
2791 ddi_dma_cookie_t cookie;
2792 size_t mem_size;
2793
2794 /*
2795 * re-alloc when it has already alloced
2796 */
2797 if (mpt->m_dma_reply_frame_hdl) {
2798 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2799 &mpt->m_acc_reply_frame_hdl);
2800 }
2801
2802 /*
2803 * The size of the reply frame pool is:
2804 * Number of Reply Frames * Reply Frame Size
2805 */
2806 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2807
2808 /*
2809 * set the DMA attributes. System Reply Message Frames must be
2810 * aligned on a 4-byte boundry. This is the default.
2811 */
2812 frame_dma_attrs = mpt->m_msg_dma_attr;
2813 frame_dma_attrs.dma_attr_sgllen = 1;
2814
2815 /*
2816 * allocate the reply frame pool
2817 */
2818 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2819 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2820 mem_size, &cookie) == FALSE) {
2821 return (DDI_FAILURE);
2822 }
2823
2824 /*
2825 * Store the reply frame memory address. This chip uses this
2826 * address to dma to and from the driver's frame. The second
2827 * address is the address mpt uses to process the frame.
2828 */
2829 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2830 mpt->m_reply_frame = memp;
2831
2832 /*
2833 * Clear the reply frame pool.
2834 */
2835 bzero(mpt->m_reply_frame, mem_size);
2836
2837 return (DDI_SUCCESS);
2838 }
2839
2840 static int
2841 mptsas_alloc_free_queue(mptsas_t *mpt)
2842 {
2843 ddi_dma_attr_t frame_dma_attrs;
2844 caddr_t memp;
2845 ddi_dma_cookie_t cookie;
2846 size_t mem_size;
2847
2848 /*
2849 * re-alloc when it has already alloced
2850 */
2851 if (mpt->m_dma_free_queue_hdl) {
2852 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2853 &mpt->m_acc_free_queue_hdl);
2854 }
2855
2856 /*
2857 * The reply free queue size is:
2858 * Reply Free Queue Depth * 4
2859 * The "4" is the size of one 32 bit address (low part of 64-bit
2860 * address)
2861 */
2862 mem_size = mpt->m_free_queue_depth * 4;
2863
2864 /*
2865 * set the DMA attributes The Reply Free Queue must be aligned on a
2866 * 16-byte boundry.
2867 */
2868 frame_dma_attrs = mpt->m_msg_dma_attr;
2869 frame_dma_attrs.dma_attr_align = 16;
2870 frame_dma_attrs.dma_attr_sgllen = 1;
2871
2872 /*
2873 * allocate the reply free queue
2874 */
2875 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2876 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2877 mem_size, &cookie) == FALSE) {
2878 return (DDI_FAILURE);
2879 }
2880
2881 /*
2882 * Store the reply free queue memory address. This chip uses this
2883 * address to read from the reply free queue. The second address
2884 * is the address mpt uses to manage the queue.
2885 */
2886 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2887 mpt->m_free_queue = memp;
2888
2889 /*
2890 * Clear the reply free queue memory.
2891 */
2892 bzero(mpt->m_free_queue, mem_size);
2893
2894 return (DDI_SUCCESS);
2895 }
2896
2897 static int
2898 mptsas_alloc_post_queue(mptsas_t *mpt)
2899 {
2900 ddi_dma_attr_t frame_dma_attrs;
2901 caddr_t memp;
2902 ddi_dma_cookie_t cookie;
2903 size_t mem_size;
2904
2905 /*
2906 * re-alloc when it has already alloced
2907 */
2908 if (mpt->m_dma_post_queue_hdl) {
2909 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2910 &mpt->m_acc_post_queue_hdl);
2911 }
2912
2913 /*
2914 * The reply descriptor post queue size is:
2915 * Reply Descriptor Post Queue Depth * 8
2916 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2917 */
2918 mem_size = mpt->m_post_queue_depth * 8;
2919
2920 /*
2921 * set the DMA attributes. The Reply Descriptor Post Queue must be
2922 * aligned on a 16-byte boundry.
2923 */
2924 frame_dma_attrs = mpt->m_msg_dma_attr;
2925 frame_dma_attrs.dma_attr_align = 16;
2926 frame_dma_attrs.dma_attr_sgllen = 1;
2927
2928 /*
2929 * allocate the reply post queue
2930 */
2931 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2932 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2933 mem_size, &cookie) == FALSE) {
2934 return (DDI_FAILURE);
2935 }
2936
2937 /*
2938 * Store the reply descriptor post queue memory address. This chip
2939 * uses this address to write to the reply descriptor post queue. The
2940 * second address is the address mpt uses to manage the queue.
2941 */
2942 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2943 mpt->m_post_queue = memp;
2944
2945 /*
2946 * Clear the reply post queue memory.
2947 */
2948 bzero(mpt->m_post_queue, mem_size);
2949
2950 return (DDI_SUCCESS);
2951 }
2952
2953 static void
2954 mptsas_alloc_reply_args(mptsas_t *mpt)
2955 {
2956 if (mpt->m_replyh_args == NULL) {
2957 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2958 mpt->m_max_replies, KM_SLEEP);
2959 }
2960 }
2961
2962 static int
2963 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2964 {
2965 mptsas_cache_frames_t *frames = NULL;
2966 if (cmd->cmd_extra_frames == NULL) {
2967 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2968 if (frames == NULL) {
2969 return (DDI_FAILURE);
2970 }
2971 cmd->cmd_extra_frames = frames;
2972 }
2973 return (DDI_SUCCESS);
2974 }
2975
2976 static void
2977 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2978 {
2979 if (cmd->cmd_extra_frames) {
2980 kmem_cache_free(mpt->m_cache_frames,
2981 (void *)cmd->cmd_extra_frames);
2982 cmd->cmd_extra_frames = NULL;
2983 }
2984 }
2985
2986 static void
2987 mptsas_cfg_fini(mptsas_t *mpt)
2988 {
2989 NDBG0(("mptsas_cfg_fini"));
2990 ddi_regs_map_free(&mpt->m_datap);
2991 }
2992
2993 static void
2994 mptsas_hba_fini(mptsas_t *mpt)
2995 {
2996 NDBG0(("mptsas_hba_fini"));
2997
2998 /*
2999 * Free up any allocated memory
3000 */
3001 if (mpt->m_dma_req_frame_hdl) {
3002 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
3003 &mpt->m_acc_req_frame_hdl);
3004 }
3005
3006 if (mpt->m_dma_req_sense_hdl) {
3007 rmfreemap(mpt->m_erqsense_map);
3008 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
3009 &mpt->m_acc_req_sense_hdl);
3010 }
3011
3012 if (mpt->m_dma_reply_frame_hdl) {
3013 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
3014 &mpt->m_acc_reply_frame_hdl);
3015 }
3016
3017 if (mpt->m_dma_free_queue_hdl) {
3018 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
3019 &mpt->m_acc_free_queue_hdl);
3020 }
3021
3022 if (mpt->m_dma_post_queue_hdl) {
3023 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
3024 &mpt->m_acc_post_queue_hdl);
3025 }
3026
3027 if (mpt->m_replyh_args != NULL) {
3028 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
3029 * mpt->m_max_replies);
3030 }
3031 }
3032
3033 static int
3034 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
3035 {
3036 int lun = 0;
3037 char *sas_wwn = NULL;
3038 int phynum = -1;
3039 int reallen = 0;
3040
3041 /* Get the target num */
3042 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
3043 LUN_PROP, 0);
3044
3045 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3046 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3047 /*
3048 * Stick in the address of form "pPHY,LUN"
3049 */
3050 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3051 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3052 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3053 == DDI_PROP_SUCCESS) {
3054 /*
3055 * Stick in the address of the form "wWWN,LUN"
3056 */
3057 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3058 ddi_prop_free(sas_wwn);
3059 } else {
3060 return (DDI_FAILURE);
3061 }
3062
3063 ASSERT(reallen < len);
3064 if (reallen >= len) {
3065 mptsas_log(0, CE_WARN, "mptsas_get_name: name parameter "
3066 "length too small, it needs to be %d bytes", reallen + 1);
3067 }
3068 return (DDI_SUCCESS);
3069 }
3070
3071 /*
3072 * tran_tgt_init(9E) - target device instance initialization
3073 */
3074 static int
3075 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3076 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3077 {
3078 #ifndef __lock_lint
3079 _NOTE(ARGUNUSED(hba_tran))
3080 #endif
3081
3082 /*
3083 * At this point, the scsi_device structure already exists
3084 * and has been initialized.
3085 *
3086 * Use this function to allocate target-private data structures,
3087 * if needed by this HBA. Add revised flow-control and queue
3088 * properties for child here, if desired and if you can tell they
3089 * support tagged queueing by now.
3090 */
3091 mptsas_t *mpt;
3092 int lun = sd->sd_address.a_lun;
3093 mdi_pathinfo_t *pip = NULL;
3094 mptsas_tgt_private_t *tgt_private = NULL;
3095 mptsas_target_t *ptgt = NULL;
3096 char *psas_wwn = NULL;
3097 mptsas_phymask_t phymask = 0;
3098 uint64_t sas_wwn = 0;
3099 mptsas_target_addr_t addr;
3100 mpt = SDEV2MPT(sd);
3101
3102 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3103
3104 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3105 (void *)hba_dip, (void *)tgt_dip, lun));
3106
3107 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3108 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3109 ddi_set_name_addr(tgt_dip, NULL);
3110 return (DDI_FAILURE);
3111 }
3112
3113 /*
3114 * The phymask exists if the port is active, otherwise
3115 * nothing to do.
3116 */
3117 if (ddi_prop_exists(DDI_DEV_T_ANY, hba_dip,
3118 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
3119 return (DDI_FAILURE);
3120
3121 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3122 "phymask", 0);
3123
3124 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3125 if ((pip = (void *)(sd->sd_private)) == NULL) {
3126 /*
3127 * Very bad news if this occurs. Somehow scsi_vhci has
3128 * lost the pathinfo node for this target.
3129 */
3130 return (DDI_NOT_WELL_FORMED);
3131 }
3132
3133 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3134 DDI_PROP_SUCCESS) {
3135 mptsas_log(mpt, CE_WARN, "Get lun property failed");
3136 return (DDI_FAILURE);
3137 }
3138
3139 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3140 &psas_wwn) == MDI_SUCCESS) {
3141 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3142 sas_wwn = 0;
3143 }
3144 (void) mdi_prop_free(psas_wwn);
3145 }
3146 } else {
3147 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3148 DDI_PROP_DONTPASS, LUN_PROP, 0);
3149 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3150 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3151 DDI_PROP_SUCCESS) {
3152 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3153 sas_wwn = 0;
3154 }
3155 ddi_prop_free(psas_wwn);
3156 } else {
3157 sas_wwn = 0;
3158 }
3159 }
3160
3161 ASSERT((sas_wwn != 0) || (phymask != 0));
3162 addr.mta_wwn = sas_wwn;
3163 addr.mta_phymask = phymask;
3164 mutex_enter(&mpt->m_mutex);
3165 ptgt = refhash_lookup(mpt->m_targets, &addr);
3166 mutex_exit(&mpt->m_mutex);
3167 if (ptgt == NULL) {
3168 mptsas_log(mpt, CE_WARN, "tgt_init: target doesn't exist or "
3169 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3170 sas_wwn);
3171 return (DDI_FAILURE);
3172 }
3173 if (hba_tran->tran_tgt_private == NULL) {
3174 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3175 KM_SLEEP);
3176 tgt_private->t_lun = lun;
3177 tgt_private->t_private = ptgt;
3178 hba_tran->tran_tgt_private = tgt_private;
3179 }
3180
3181 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3182 return (DDI_SUCCESS);
3183 }
3184 mutex_enter(&mpt->m_mutex);
3185
3186 if (ptgt->m_deviceinfo &
3187 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3188 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3189 uchar_t *inq89 = NULL;
3190 int inq89_len = 0x238;
3191 int reallen = 0;
3192 int rval = 0;
3193 struct sata_id *sid = NULL;
3194 char model[SATA_ID_MODEL_LEN + 1];
3195 char fw[SATA_ID_FW_LEN + 1];
3196 char *vid, *pid;
3197
3198 mutex_exit(&mpt->m_mutex);
3199 /*
3200 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3201 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3202 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3203 */
3204 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3205 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3206 inq89, inq89_len, &reallen, 1);
3207
3208 if (rval != 0) {
3209 if (inq89 != NULL) {
3210 kmem_free(inq89, inq89_len);
3211 }
3212
3213 mptsas_log(mpt, CE_WARN, "mptsas request inquiry page "
3214 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3215 return (DDI_SUCCESS);
3216 }
3217 sid = (void *)(&inq89[60]);
3218
3219 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3220 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3221
3222 model[SATA_ID_MODEL_LEN] = 0;
3223 fw[SATA_ID_FW_LEN] = 0;
3224
3225 sata_split_model(model, &vid, &pid);
3226
3227 /*
3228 * override SCSA "inquiry-*" properties
3229 */
3230 if (vid)
3231 (void) scsi_device_prop_update_inqstring(sd,
3232 INQUIRY_VENDOR_ID, vid, strlen(vid));
3233 if (pid)
3234 (void) scsi_device_prop_update_inqstring(sd,
3235 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3236 (void) scsi_device_prop_update_inqstring(sd,
3237 INQUIRY_REVISION_ID, fw, strlen(fw));
3238
3239 if (inq89 != NULL) {
3240 kmem_free(inq89, inq89_len);
3241 }
3242 } else {
3243 mutex_exit(&mpt->m_mutex);
3244 }
3245
3246 return (DDI_SUCCESS);
3247 }
3248 /*
3249 * tran_tgt_free(9E) - target device instance deallocation
3250 */
3251 static void
3252 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3253 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3254 {
3255 #ifndef __lock_lint
3256 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3257 #endif
3258
3259 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3260
3261 if (tgt_private != NULL) {
3262 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3263 hba_tran->tran_tgt_private = NULL;
3264 }
3265 }
3266
3267 /*
3268 * scsi_pkt handling
3269 *
3270 * Visible to the external world via the transport structure.
3271 */
3272
3273 /*
3274 * Notes:
3275 * - transport the command to the addressed SCSI target/lun device
3276 * - normal operation is to schedule the command to be transported,
3277 * and return TRAN_ACCEPT if this is successful.
3278 * - if NO_INTR, tran_start must poll device for command completion
3279 */
3280 static int
3281 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3282 {
3283 #ifndef __lock_lint
3284 _NOTE(ARGUNUSED(ap))
3285 #endif
3286 mptsas_t *mpt = PKT2MPT(pkt);
3287 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3288 int rval;
3289 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3290
3291 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3292 ASSERT(ptgt);
3293 if (ptgt == NULL)
3294 return (TRAN_FATAL_ERROR);
3295
3296 /*
3297 * prepare the pkt before taking mutex.
3298 */
3299 rval = mptsas_prepare_pkt(cmd);
3300 if (rval != TRAN_ACCEPT) {
3301 return (rval);
3302 }
3303
3304 /*
3305 * Send the command to target/lun, however your HBA requires it.
3306 * If busy, return TRAN_BUSY; if there's some other formatting error
3307 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3308 * return of TRAN_ACCEPT.
3309 *
3310 * Remember that access to shared resources, including the mptsas_t
3311 * data structure and the HBA hardware registers, must be protected
3312 * with mutexes, here and everywhere.
3313 *
3314 * Also remember that at interrupt time, you'll get an argument
3315 * to the interrupt handler which is a pointer to your mptsas_t
3316 * structure; you'll have to remember which commands are outstanding
3317 * and which scsi_pkt is the currently-running command so the
3318 * interrupt handler can refer to the pkt to set completion
3319 * status, call the target driver back through pkt_comp, etc.
3320 *
3321 * If the instance lock is held by other thread, don't spin to wait
3322 * for it. Instead, queue the cmd and next time when the instance lock
3323 * is not held, accept all the queued cmd. A extra tx_waitq is
3324 * introduced to protect the queue.
3325 *
3326 * The polled cmd will not be queud and accepted as usual.
3327 *
3328 * Under the tx_waitq mutex, record whether a thread is draining
3329 * the tx_waitq. An IO requesting thread that finds the instance
3330 * mutex contended appends to the tx_waitq and while holding the
3331 * tx_wait mutex, if the draining flag is not set, sets it and then
3332 * proceeds to spin for the instance mutex. This scheme ensures that
3333 * the last cmd in a burst be processed.
3334 *
3335 * we enable this feature only when the helper threads are enabled,
3336 * at which we think the loads are heavy.
3337 *
3338 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3339 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3340 */
3341
3342 if (mpt->m_doneq_thread_n) {
3343 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3344 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3345 mutex_exit(&mpt->m_mutex);
3346 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3347 mutex_enter(&mpt->m_mutex);
3348 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3349 mutex_exit(&mpt->m_mutex);
3350 } else {
3351 mutex_enter(&mpt->m_tx_waitq_mutex);
3352 /*
3353 * ptgt->m_dr_flag is protected by m_mutex or
3354 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3355 * is acquired.
3356 */
3357 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3358 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3359 /*
3360 * The command should be allowed to
3361 * retry by returning TRAN_BUSY to
3362 * to stall the I/O's which come from
3363 * scsi_vhci since the device/path is
3364 * in unstable state now.
3365 */
3366 mutex_exit(&mpt->m_tx_waitq_mutex);
3367 return (TRAN_BUSY);
3368 } else {
3369 /*
3370 * The device is offline, just fail the
3371 * command by returning
3372 * TRAN_FATAL_ERROR.
3373 */
3374 mutex_exit(&mpt->m_tx_waitq_mutex);
3375 return (TRAN_FATAL_ERROR);
3376 }
3377 }
3378 if (mpt->m_tx_draining) {
3379 cmd->cmd_flags |= CFLAG_TXQ;
3380 *mpt->m_tx_waitqtail = cmd;
3381 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3382 mutex_exit(&mpt->m_tx_waitq_mutex);
3383 } else { /* drain the queue */
3384 mpt->m_tx_draining = 1;
3385 mutex_exit(&mpt->m_tx_waitq_mutex);
3386 mutex_enter(&mpt->m_mutex);
3387 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3388 mutex_exit(&mpt->m_mutex);
3389 }
3390 }
3391 } else {
3392 mutex_enter(&mpt->m_mutex);
3393 /*
3394 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3395 * in this case, m_mutex is acquired.
3396 */
3397 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3398 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3399 /*
3400 * commands should be allowed to retry by
3401 * returning TRAN_BUSY to stall the I/O's
3402 * which come from scsi_vhci since the device/
3403 * path is in unstable state now.
3404 */
3405 mutex_exit(&mpt->m_mutex);
3406 return (TRAN_BUSY);
3407 } else {
3408 /*
3409 * The device is offline, just fail the
3410 * command by returning TRAN_FATAL_ERROR.
3411 */
3412 mutex_exit(&mpt->m_mutex);
3413 return (TRAN_FATAL_ERROR);
3414 }
3415 }
3416 rval = mptsas_accept_pkt(mpt, cmd);
3417 mutex_exit(&mpt->m_mutex);
3418 }
3419
3420 return (rval);
3421 }
3422
3423 /*
3424 * Accept all the queued cmds(if any) before accept the current one.
3425 */
3426 static int
3427 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3428 {
3429 int rval;
3430 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3431
3432 ASSERT(mutex_owned(&mpt->m_mutex));
3433 /*
3434 * The call to mptsas_accept_tx_waitq() must always be performed
3435 * because that is where mpt->m_tx_draining is cleared.
3436 */
3437 mutex_enter(&mpt->m_tx_waitq_mutex);
3438 mptsas_accept_tx_waitq(mpt);
3439 mutex_exit(&mpt->m_tx_waitq_mutex);
3440 /*
3441 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3442 * in this case, m_mutex is acquired.
3443 */
3444 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3445 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3446 /*
3447 * The command should be allowed to retry by returning
3448 * TRAN_BUSY to stall the I/O's which come from
3449 * scsi_vhci since the device/path is in unstable state
3450 * now.
3451 */
3452 return (TRAN_BUSY);
3453 } else {
3454 /*
3455 * The device is offline, just fail the command by
3456 * return TRAN_FATAL_ERROR.
3457 */
3458 return (TRAN_FATAL_ERROR);
3459 }
3460 }
3461 rval = mptsas_accept_pkt(mpt, cmd);
3462
3463 return (rval);
3464 }
3465
3466 #ifdef MPTSAS_FAULTINJECTION
3467 static void
3468 mptsas_fminj_move_cmd_to_doneq(mptsas_t *mpt, mptsas_cmd_t *cmd,
3469 uchar_t reason, uint_t stat)
3470 {
3471 struct scsi_pkt *pkt = cmd->cmd_pkt;
3472
3473 TAILQ_REMOVE(&mpt->m_fminj_cmdq, cmd, cmd_active_link);
3474
3475 /* Setup reason/statistics. */
3476 pkt->pkt_reason = reason;
3477 pkt->pkt_statistics = stat;
3478
3479 cmd->cmd_active_expiration = 0;
3480
3481 /* Move command to doneque. */
3482 cmd->cmd_linkp = NULL;
3483 cmd->cmd_flags |= CFLAG_FINISHED;
3484 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
3485
3486 *mpt->m_donetail = cmd;
3487 mpt->m_donetail = &cmd->cmd_linkp;
3488 mpt->m_doneq_len++;
3489 }
3490
3491 static void
3492 mptsas_fminj_move_tgt_to_doneq(mptsas_t *mpt, ushort_t target,
3493 uchar_t reason, uint_t stat)
3494 {
3495 mptsas_cmd_t *cmd;
3496
3497 ASSERT(mutex_owned(&mpt->m_mutex));
3498
3499 if (!TAILQ_EMPTY(&mpt->m_fminj_cmdq)) {
3500 cmd = TAILQ_FIRST(&mpt->m_fminj_cmdq);
3501 ASSERT(cmd != NULL);
3502
3503 while (cmd != NULL) {
3504 mptsas_cmd_t *next = TAILQ_NEXT(cmd, cmd_active_link);
3505
3506 if (Tgt(cmd) == target) {
3507 mptsas_fminj_move_cmd_to_doneq(mpt, cmd,
3508 reason, stat);
3509 }
3510 cmd = next;
3511 }
3512 }
3513 }
3514
3515 static void
3516 mptsas_fminj_watchsubr(mptsas_t *mpt,
3517 struct mptsas_active_cmdq *expired)
3518 {
3519 mptsas_cmd_t *cmd;
3520
3521 ASSERT(mutex_owned(&mpt->m_mutex));
3522
3523 if (!TAILQ_EMPTY(&mpt->m_fminj_cmdq)) {
3524 hrtime_t timestamp = gethrtime();
3525
3526 cmd = TAILQ_FIRST(&mpt->m_fminj_cmdq);
3527 ASSERT(cmd != NULL);
3528
3529 while (cmd != NULL) {
3530 mptsas_cmd_t *next = TAILQ_NEXT(cmd, cmd_active_link);
3531
3532 if (cmd->cmd_active_expiration <= timestamp) {
3533 struct scsi_pkt *pkt = cmd->cmd_pkt;
3534
3535 DTRACE_PROBE1(mptsas__command__timeout,
3536 struct scsi_pkt *, pkt);
3537
3538 /* Setup proper flags. */
3539 pkt->pkt_reason = CMD_TIMEOUT;
3540 pkt->pkt_statistics = (STAT_TIMEOUT |
3541 STAT_DEV_RESET);
3542 cmd->cmd_active_expiration = 0;
3543
3544 TAILQ_REMOVE(&mpt->m_fminj_cmdq, cmd,
3545 cmd_active_link);
3546 TAILQ_INSERT_TAIL(expired, cmd,
3547 cmd_active_link);
3548 }
3549 cmd = next;
3550 }
3551 }
3552 }
3553
3554 static int
3555 mptsas_fminject(mptsas_t *mpt, mptsas_cmd_t *cmd)
3556 {
3557 struct scsi_pkt *pkt = cmd->cmd_pkt;
3558
3559 ASSERT(mutex_owned(&mpt->m_mutex));
3560
3561 if (pkt->pkt_flags & FLAG_PKT_TIMEOUT) {
3562 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
3563 (pkt->pkt_comp != NULL)) {
3564 pkt->pkt_state = (STATE_GOT_BUS|STATE_GOT_TARGET|
3565 STATE_SENT_CMD);
3566 cmd->cmd_active_expiration =
3567 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
3568 TAILQ_INSERT_TAIL(&mpt->m_fminj_cmdq,
3569 cmd, cmd_active_link);
3570 return (0);
3571 }
3572 }
3573 return (-1);
3574 }
3575 #endif /* MPTSAS_FAULTINJECTION */
3576
3577 static int
3578 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3579 {
3580 int rval = TRAN_ACCEPT;
3581 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3582
3583 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3584
3585 ASSERT(mutex_owned(&mpt->m_mutex));
3586
3587 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3588 rval = mptsas_prepare_pkt(cmd);
3589 if (rval != TRAN_ACCEPT) {
3590 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3591 return (rval);
3592 }
3593 }
3594
3595 /*
3596 * reset the throttle if we were draining
3597 */
3598 if ((ptgt->m_t_ncmds == 0) &&
3599 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3600 NDBG23(("reset throttle"));
3601 ASSERT(ptgt->m_reset_delay == 0);
3602 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3603 }
3604
3605 /*
3606 * If HBA is being reset, the device handles will be invalidated.
3607 * This is temporary and, if target is still attached, the device
3608 * handles will be re-assigned when firmware reset completes.
3609 * Then, if command was already waiting, complete the command
3610 * otherwise return BUSY and expect transport retry.
3611 */
3612 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3613 NDBG20(("retry command, invalid devhdl, during FW reset."));
3614 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3615 if (cmd->cmd_flags & CFLAG_TXQ) {
3616 mptsas_doneq_add(mpt, cmd);
3617 mptsas_doneq_empty(mpt);
3618 return (rval);
3619 } else {
3620 return (TRAN_BUSY);
3621 }
3622 }
3623
3624 /*
3625 * If the device handle has been invalidated, set the response
3626 * reason to indicate the device is gone. Then add the
3627 * command to the done queue and run the completion routine
3628 * so the initiator of the command can clean up.
3629 */
3630 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3631 NDBG20(("rejecting command, invalid devhdl because "
3632 "device gone."));
3633 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3634 if (cmd->cmd_flags & CFLAG_TXQ) {
3635 mptsas_doneq_add(mpt, cmd);
3636 mptsas_doneq_empty(mpt);
3637 return (rval);
3638 } else {
3639 return (TRAN_FATAL_ERROR);
3640 }
3641 }
3642
3643 /*
3644 * Do fault injecttion before transmitting command.
3645 * FLAG_NOINTR commands are skipped.
3646 */
3647 #ifdef MPTSAS_FAULTINJECTION
3648 if (!mptsas_fminject(mpt, cmd)) {
3649 return (TRAN_ACCEPT);
3650 }
3651 #endif
3652
3653 /*
3654 * The first case is the normal case. mpt gets a command from the
3655 * target driver and starts it.
3656 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3657 * commands is m_max_requests - 2.
3658 */
3659 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3660 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3661 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3662 (ptgt->m_reset_delay == 0) &&
3663 (ptgt->m_t_nwait == 0) &&
3664 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3665 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3666 (void) mptsas_start_cmd(mpt, cmd);
3667 } else {
3668 mptsas_waitq_add(mpt, cmd);
3669 }
3670 } else {
3671 /*
3672 * Add this pkt to the work queue
3673 */
3674 mptsas_waitq_add(mpt, cmd);
3675
3676 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3677 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3678
3679 /*
3680 * Only flush the doneq if this is not a TM
3681 * cmd. For TM cmds the flushing of the
3682 * doneq will be done in those routines.
3683 */
3684 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3685 mptsas_doneq_empty(mpt);
3686 }
3687 }
3688 }
3689 return (rval);
3690 }
3691
3692 int
3693 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3694 {
3695 mptsas_slots_t *slots = mpt->m_active;
3696 uint_t slot, start_rotor;
3697 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3698
3699 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3700
3701 /*
3702 * Account for reserved TM request slot and reserved SMID of 0.
3703 */
3704 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3705
3706 /*
3707 * Find the next available slot, beginning at m_rotor. If no slot is
3708 * available, we'll return FALSE to indicate that. This mechanism
3709 * considers only the normal slots, not the reserved slot 0 nor the
3710 * task management slot m_n_normal + 1. The rotor is left to point to
3711 * the normal slot after the one we select, unless we select the last
3712 * normal slot in which case it returns to slot 1.
3713 */
3714 start_rotor = slots->m_rotor;
3715 do {
3716 slot = slots->m_rotor++;
3717 if (slots->m_rotor > slots->m_n_normal)
3718 slots->m_rotor = 1;
3719
3720 if (slots->m_rotor == start_rotor)
3721 break;
3722 } while (slots->m_slot[slot] != NULL);
3723
3724 if (slots->m_slot[slot] != NULL)
3725 return (FALSE);
3726
3727 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3728
3729 cmd->cmd_slot = slot;
3730 slots->m_slot[slot] = cmd;
3731 mpt->m_ncmds++;
3732
3733 /*
3734 * only increment per target ncmds if this is not a
3735 * command that has no target associated with it (i.e. a
3736 * event acknoledgment)
3737 */
3738 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3739 /*
3740 * Expiration time is set in mptsas_start_cmd
3741 */
3742 ptgt->m_t_ncmds++;
3743 cmd->cmd_active_expiration = 0;
3744 } else {
3745 /*
3746 * Initialize expiration time for passthrough commands,
3747 */
3748 cmd->cmd_active_expiration = gethrtime() +
3749 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3750 }
3751 return (TRUE);
3752 }
3753
3754 /*
3755 * prepare the pkt:
3756 * the pkt may have been resubmitted or just reused so
3757 * initialize some fields and do some checks.
3758 */
3759 static int
3760 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3761 {
3762 struct scsi_pkt *pkt = CMD2PKT(cmd);
3763
3764 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3765
3766 #ifdef MPTSAS_FAULTINJECTION
3767 /* Check for fault flags prior to perform actual initialization. */
3768 if (pkt->pkt_flags & FLAG_PKT_BUSY) {
3769 return (TRAN_BUSY);
3770 }
3771 #endif
3772
3773 /*
3774 * Reinitialize some fields that need it; the packet may
3775 * have been resubmitted
3776 */
3777 pkt->pkt_reason = CMD_CMPLT;
3778 pkt->pkt_state = 0;
3779 pkt->pkt_statistics = 0;
3780 pkt->pkt_resid = 0;
3781 cmd->cmd_age = 0;
3782 cmd->cmd_pkt_flags = pkt->pkt_flags;
3783
3784 /*
3785 * zero status byte.
3786 */
3787 *(pkt->pkt_scbp) = 0;
3788
3789 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3790 pkt->pkt_resid = cmd->cmd_dmacount;
3791
3792 /*
3793 * consistent packets need to be sync'ed first
3794 * (only for data going out)
3795 */
3796 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3797 (cmd->cmd_flags & CFLAG_DMASEND)) {
3798 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3799 DDI_DMA_SYNC_FORDEV);
3800 }
3801 }
3802
3803 cmd->cmd_flags =
3804 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3805 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3806
3807 return (TRAN_ACCEPT);
3808 }
3809
3810 /*
3811 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3812 *
3813 * One of three possibilities:
3814 * - allocate scsi_pkt
3815 * - allocate scsi_pkt and DMA resources
3816 * - allocate DMA resources to an already-allocated pkt
3817 */
3818 static struct scsi_pkt *
3819 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3820 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3821 int (*callback)(), caddr_t arg)
3822 {
3823 mptsas_cmd_t *cmd, *new_cmd;
3824 mptsas_t *mpt = ADDR2MPT(ap);
3825 uint_t oldcookiec;
3826 mptsas_target_t *ptgt = NULL;
3827 int rval;
3828 mptsas_tgt_private_t *tgt_private;
3829 int kf;
3830
3831 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3832
3833 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3834 tran_tgt_private;
3835 ASSERT(tgt_private != NULL);
3836 if (tgt_private == NULL) {
3837 return (NULL);
3838 }
3839 ptgt = tgt_private->t_private;
3840 ASSERT(ptgt != NULL);
3841 if (ptgt == NULL)
3842 return (NULL);
3843 ap->a_target = ptgt->m_devhdl;
3844 ap->a_lun = tgt_private->t_lun;
3845
3846 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3847 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3848 statuslen *= 100; tgtlen *= 4;
3849 #endif
3850 NDBG3(("mptsas_scsi_init_pkt:\n"
3851 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3852 ap->a_target, (void *)pkt, (void *)bp,
3853 cmdlen, statuslen, tgtlen, flags));
3854
3855 /*
3856 * Allocate the new packet.
3857 */
3858 if (pkt == NULL) {
3859 ddi_dma_handle_t save_dma_handle;
3860
3861 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3862 if (cmd == NULL)
3863 return (NULL);
3864
3865 save_dma_handle = cmd->cmd_dmahandle;
3866 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3867 cmd->cmd_dmahandle = save_dma_handle;
3868
3869 pkt = (void *)((uchar_t *)cmd +
3870 sizeof (struct mptsas_cmd));
3871 pkt->pkt_ha_private = (opaque_t)cmd;
3872 pkt->pkt_address = *ap;
3873 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3874 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3875 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3876 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3877 cmd->cmd_cdblen = (uchar_t)cmdlen;
3878 cmd->cmd_scblen = statuslen;
3879 cmd->cmd_rqslen = SENSE_LENGTH;
3880 cmd->cmd_tgt_addr = ptgt;
3881
3882 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3883 (tgtlen > PKT_PRIV_LEN) ||
3884 (statuslen > EXTCMDS_STATUS_SIZE)) {
3885 if (mptsas_pkt_alloc_extern(mpt, cmd,
3886 cmdlen, tgtlen, statuslen, kf)) {
3887 /*
3888 * if extern allocation fails, it will
3889 * deallocate the new pkt as well
3890 */
3891 return (NULL);
3892 }
3893 }
3894 new_cmd = cmd;
3895
3896 } else {
3897 cmd = PKT2CMD(pkt);
3898 pkt->pkt_start = 0;
3899 pkt->pkt_stop = 0;
3900 new_cmd = NULL;
3901 }
3902
3903
3904 /* grab cmd->cmd_cookiec here as oldcookiec */
3905
3906 oldcookiec = cmd->cmd_cookiec;
3907
3908 /*
3909 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3910 * greater than 0 and we'll need to grab the next dma window
3911 */
3912 /*
3913 * SLM-not doing extra command frame right now; may add later
3914 */
3915
3916 if (cmd->cmd_nwin > 0) {
3917
3918 /*
3919 * Make sure we havn't gone past the the total number
3920 * of windows
3921 */
3922 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3923 return (NULL);
3924 }
3925 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3926 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3927 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3928 return (NULL);
3929 }
3930 goto get_dma_cookies;
3931 }
3932
3933
3934 if (flags & PKT_XARQ) {
3935 cmd->cmd_flags |= CFLAG_XARQ;
3936 }
3937
3938 /*
3939 * DMA resource allocation. This version assumes your
3940 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3941 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3942 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3943 */
3944 if (bp && (bp->b_bcount != 0) &&
3945 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3946
3947 int cnt, dma_flags;
3948 mptti_t *dmap; /* ptr to the S/G list */
3949
3950 /*
3951 * Set up DMA memory and position to the next DMA segment.
3952 */
3953 ASSERT(cmd->cmd_dmahandle != NULL);
3954
3955 if (bp->b_flags & B_READ) {
3956 dma_flags = DDI_DMA_READ;
3957 cmd->cmd_flags &= ~CFLAG_DMASEND;
3958 } else {
3959 dma_flags = DDI_DMA_WRITE;
3960 cmd->cmd_flags |= CFLAG_DMASEND;
3961 }
3962 if (flags & PKT_CONSISTENT) {
3963 cmd->cmd_flags |= CFLAG_CMDIOPB;
3964 dma_flags |= DDI_DMA_CONSISTENT;
3965 }
3966
3967 if (flags & PKT_DMA_PARTIAL) {
3968 dma_flags |= DDI_DMA_PARTIAL;
3969 }
3970
3971 /*
3972 * workaround for byte hole issue on psycho and
3973 * schizo pre 2.1
3974 */
3975 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3976 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3977 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3978 dma_flags |= DDI_DMA_CONSISTENT;
3979 }
3980
3981 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3982 dma_flags, callback, arg,
3983 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3984 if (rval == DDI_DMA_PARTIAL_MAP) {
3985 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3986 &cmd->cmd_nwin);
3987 cmd->cmd_winindex = 0;
3988 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3989 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3990 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3991 &cmd->cmd_cookiec);
3992 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3993 switch (rval) {
3994 case DDI_DMA_NORESOURCES:
3995 bioerror(bp, 0);
3996 break;
3997 case DDI_DMA_BADATTR:
3998 case DDI_DMA_NOMAPPING:
3999 bioerror(bp, EFAULT);
4000 break;
4001 case DDI_DMA_TOOBIG:
4002 default:
4003 bioerror(bp, EINVAL);
4004 break;
4005 }
4006 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4007 if (new_cmd) {
4008 mptsas_scsi_destroy_pkt(ap, pkt);
4009 }
4010 return ((struct scsi_pkt *)NULL);
4011 }
4012
4013 get_dma_cookies:
4014 cmd->cmd_flags |= CFLAG_DMAVALID;
4015 ASSERT(cmd->cmd_cookiec > 0);
4016
4017 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
4018 mptsas_log(mpt, CE_NOTE, "large cookiec received %d",
4019 cmd->cmd_cookiec);
4020 bioerror(bp, EINVAL);
4021 if (new_cmd) {
4022 mptsas_scsi_destroy_pkt(ap, pkt);
4023 }
4024 return ((struct scsi_pkt *)NULL);
4025 }
4026
4027 /*
4028 * Allocate extra SGL buffer if needed.
4029 */
4030 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
4031 (cmd->cmd_extra_frames == NULL)) {
4032 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
4033 DDI_FAILURE) {
4034 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
4035 "failed");
4036 bioerror(bp, ENOMEM);
4037 if (new_cmd) {
4038 mptsas_scsi_destroy_pkt(ap, pkt);
4039 }
4040 return ((struct scsi_pkt *)NULL);
4041 }
4042 }
4043
4044 /*
4045 * Always use scatter-gather transfer
4046 * Use the loop below to store physical addresses of
4047 * DMA segments, from the DMA cookies, into your HBA's
4048 * scatter-gather list.
4049 * We need to ensure we have enough kmem alloc'd
4050 * for the sg entries since we are no longer using an
4051 * array inside mptsas_cmd_t.
4052 *
4053 * We check cmd->cmd_cookiec against oldcookiec so
4054 * the scatter-gather list is correctly allocated
4055 */
4056
4057 if (oldcookiec != cmd->cmd_cookiec) {
4058 if (cmd->cmd_sg != (mptti_t *)NULL) {
4059 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
4060 oldcookiec);
4061 cmd->cmd_sg = NULL;
4062 }
4063 }
4064
4065 if (cmd->cmd_sg == (mptti_t *)NULL) {
4066 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
4067 cmd->cmd_cookiec), kf);
4068
4069 if (cmd->cmd_sg == (mptti_t *)NULL) {
4070 mptsas_log(mpt, CE_WARN,
4071 "unable to kmem_alloc enough memory "
4072 "for scatter/gather list");
4073 /*
4074 * if we have an ENOMEM condition we need to behave
4075 * the same way as the rest of this routine
4076 */
4077
4078 bioerror(bp, ENOMEM);
4079 if (new_cmd) {
4080 mptsas_scsi_destroy_pkt(ap, pkt);
4081 }
4082 return ((struct scsi_pkt *)NULL);
4083 }
4084 }
4085
4086 dmap = cmd->cmd_sg;
4087
4088 ASSERT(cmd->cmd_cookie.dmac_size != 0);
4089
4090 /*
4091 * store the first segment into the S/G list
4092 */
4093 dmap->count = cmd->cmd_cookie.dmac_size;
4094 dmap->addr.address64.Low = (uint32_t)
4095 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4096 dmap->addr.address64.High = (uint32_t)
4097 (cmd->cmd_cookie.dmac_laddress >> 32);
4098
4099 /*
4100 * dmacount counts the size of the dma for this window
4101 * (if partial dma is being used). totaldmacount
4102 * keeps track of the total amount of dma we have
4103 * transferred for all the windows (needed to calculate
4104 * the resid value below).
4105 */
4106 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
4107 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4108
4109 /*
4110 * We already stored the first DMA scatter gather segment,
4111 * start at 1 if we need to store more.
4112 */
4113 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
4114 /*
4115 * Get next DMA cookie
4116 */
4117 ddi_dma_nextcookie(cmd->cmd_dmahandle,
4118 &cmd->cmd_cookie);
4119 dmap++;
4120
4121 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
4122 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4123
4124 /*
4125 * store the segment parms into the S/G list
4126 */
4127 dmap->count = cmd->cmd_cookie.dmac_size;
4128 dmap->addr.address64.Low = (uint32_t)
4129 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4130 dmap->addr.address64.High = (uint32_t)
4131 (cmd->cmd_cookie.dmac_laddress >> 32);
4132 }
4133
4134 /*
4135 * If this was partially allocated we set the resid
4136 * the amount of data NOT transferred in this window
4137 * If there is only one window, the resid will be 0
4138 */
4139 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
4140 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
4141 cmd->cmd_dmacount));
4142 }
4143 return (pkt);
4144 }
4145
4146 /*
4147 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
4148 *
4149 * Notes:
4150 * - also frees DMA resources if allocated
4151 * - implicit DMA synchonization
4152 */
4153 static void
4154 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4155 {
4156 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4157 mptsas_t *mpt = ADDR2MPT(ap);
4158
4159 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
4160 ap->a_target, (void *)pkt));
4161
4162 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4163 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4164 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4165 }
4166
4167 if (cmd->cmd_sg) {
4168 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4169 cmd->cmd_sg = NULL;
4170 }
4171
4172 mptsas_free_extra_sgl_frame(mpt, cmd);
4173
4174 if ((cmd->cmd_flags &
4175 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4176 CFLAG_SCBEXTERN)) == 0) {
4177 cmd->cmd_flags = CFLAG_FREE;
4178 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4179 } else {
4180 mptsas_pkt_destroy_extern(mpt, cmd);
4181 }
4182 }
4183
4184 /*
4185 * kmem cache constructor and destructor:
4186 * When constructing, we bzero the cmd and allocate the dma handle
4187 * When destructing, just free the dma handle
4188 */
4189 static int
4190 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4191 {
4192 mptsas_cmd_t *cmd = buf;
4193 mptsas_t *mpt = cdrarg;
4194 int (*callback)(caddr_t);
4195
4196 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4197
4198 NDBG4(("mptsas_kmem_cache_constructor"));
4199
4200 /*
4201 * allocate a dma handle
4202 */
4203 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4204 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4205 cmd->cmd_dmahandle = NULL;
4206 return (-1);
4207 }
4208 return (0);
4209 }
4210
4211 static void
4212 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4213 {
4214 #ifndef __lock_lint
4215 _NOTE(ARGUNUSED(cdrarg))
4216 #endif
4217 mptsas_cmd_t *cmd = buf;
4218
4219 NDBG4(("mptsas_kmem_cache_destructor"));
4220
4221 if (cmd->cmd_dmahandle) {
4222 ddi_dma_free_handle(&cmd->cmd_dmahandle);
4223 cmd->cmd_dmahandle = NULL;
4224 }
4225 }
4226
4227 static int
4228 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4229 {
4230 mptsas_cache_frames_t *p = buf;
4231 mptsas_t *mpt = cdrarg;
4232 ddi_dma_attr_t frame_dma_attr;
4233 size_t mem_size, alloc_len;
4234 ddi_dma_cookie_t cookie;
4235 uint_t ncookie;
4236 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4237 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4238
4239 frame_dma_attr = mpt->m_msg_dma_attr;
4240 frame_dma_attr.dma_attr_align = 0x10;
4241 frame_dma_attr.dma_attr_sgllen = 1;
4242
4243 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4244 &p->m_dma_hdl) != DDI_SUCCESS) {
4245 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4246 " extra SGL.");
4247 return (DDI_FAILURE);
4248 }
4249
4250 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4251
4252 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4253 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4254 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4255 ddi_dma_free_handle(&p->m_dma_hdl);
4256 p->m_dma_hdl = NULL;
4257 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4258 " extra SGL.");
4259 return (DDI_FAILURE);
4260 }
4261
4262 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4263 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4264 &cookie, &ncookie) != DDI_DMA_MAPPED) {
4265 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4266 ddi_dma_free_handle(&p->m_dma_hdl);
4267 p->m_dma_hdl = NULL;
4268 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4269 " extra SGL");
4270 return (DDI_FAILURE);
4271 }
4272
4273 /*
4274 * Store the SGL memory address. This chip uses this
4275 * address to dma to and from the driver. The second
4276 * address is the address mpt uses to fill in the SGL.
4277 */
4278 p->m_phys_addr = cookie.dmac_laddress;
4279
4280 return (DDI_SUCCESS);
4281 }
4282
4283 static void
4284 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4285 {
4286 #ifndef __lock_lint
4287 _NOTE(ARGUNUSED(cdrarg))
4288 #endif
4289 mptsas_cache_frames_t *p = buf;
4290 if (p->m_dma_hdl != NULL) {
4291 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4292 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4293 ddi_dma_free_handle(&p->m_dma_hdl);
4294 p->m_phys_addr = NULL;
4295 p->m_frames_addr = NULL;
4296 p->m_dma_hdl = NULL;
4297 p->m_acc_hdl = NULL;
4298 }
4299
4300 }
4301
4302 /*
4303 * Figure out if we need to use a different method for the request
4304 * sense buffer and allocate from the map if necessary.
4305 */
4306 static boolean_t
4307 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4308 {
4309 if (senselength > mpt->m_req_sense_size) {
4310 unsigned long i;
4311
4312 /* Sense length is limited to an 8 bit value in MPI Spec. */
4313 if (senselength > 255)
4314 senselength = 255;
4315 cmd->cmd_extrqschunks = (senselength +
4316 (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4317 i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4318 (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4319
4320 if (i == 0)
4321 return (B_FALSE);
4322
4323 cmd->cmd_extrqslen = (uint16_t)senselength;
4324 cmd->cmd_extrqsidx = i - 1;
4325 cmd->cmd_arq_buf = mpt->m_extreq_sense +
4326 (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4327 } else {
4328 cmd->cmd_rqslen = (uchar_t)senselength;
4329 }
4330
4331 return (B_TRUE);
4332 }
4333
4334 /*
4335 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4336 * for non-standard length cdb, pkt_private, status areas
4337 * if allocation fails, then deallocate all external space and the pkt
4338 */
4339 /* ARGSUSED */
4340 static int
4341 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4342 int cmdlen, int tgtlen, int statuslen, int kf)
4343 {
4344 caddr_t cdbp, scbp, tgt;
4345
4346 NDBG3(("mptsas_pkt_alloc_extern: "
4347 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4348 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4349
4350 tgt = cdbp = scbp = NULL;
4351 cmd->cmd_scblen = statuslen;
4352 cmd->cmd_privlen = (uchar_t)tgtlen;
4353
4354 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4355 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4356 goto fail;
4357 }
4358 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4359 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4360 }
4361 if (tgtlen > PKT_PRIV_LEN) {
4362 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4363 goto fail;
4364 }
4365 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4366 cmd->cmd_pkt->pkt_private = tgt;
4367 }
4368 if (statuslen > EXTCMDS_STATUS_SIZE) {
4369 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4370 goto fail;
4371 }
4372 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4373 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4374
4375 /* allocate sense data buf for DMA */
4376 if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4377 MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4378 kf) == B_FALSE)
4379 goto fail;
4380 }
4381 return (0);
4382 fail:
4383 mptsas_pkt_destroy_extern(mpt, cmd);
4384 return (1);
4385 }
4386
4387 /*
4388 * deallocate external pkt space and deallocate the pkt
4389 */
4390 static void
4391 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4392 {
4393 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4394
4395 if (cmd->cmd_flags & CFLAG_FREE) {
4396 mptsas_log(mpt, CE_PANIC,
4397 "mptsas_pkt_destroy_extern: freeing free packet");
4398 _NOTE(NOT_REACHED)
4399 /* NOTREACHED */
4400 }
4401 if (cmd->cmd_extrqslen != 0) {
4402 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4403 cmd->cmd_extrqsidx + 1);
4404 }
4405 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4406 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4407 }
4408 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4409 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4410 }
4411 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4412 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4413 }
4414 cmd->cmd_flags = CFLAG_FREE;
4415 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4416 }
4417
4418 /*
4419 * tran_sync_pkt(9E) - explicit DMA synchronization
4420 */
4421 /*ARGSUSED*/
4422 static void
4423 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4424 {
4425 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4426
4427 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4428 ap->a_target, (void *)pkt));
4429
4430 if (cmd->cmd_dmahandle) {
4431 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4432 (cmd->cmd_flags & CFLAG_DMASEND) ?
4433 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4434 }
4435 }
4436
4437 /*
4438 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4439 */
4440 /*ARGSUSED*/
4441 static void
4442 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4443 {
4444 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4445 mptsas_t *mpt = ADDR2MPT(ap);
4446
4447 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4448 ap->a_target, (void *)pkt));
4449
4450 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4451 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4452 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4453 }
4454
4455 mptsas_free_extra_sgl_frame(mpt, cmd);
4456 }
4457
4458 static void
4459 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4460 {
4461 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4462 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4463 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4464 DDI_DMA_SYNC_FORCPU);
4465 }
4466 (*pkt->pkt_comp)(pkt);
4467 }
4468
4469 static void
4470 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4471 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4472 {
4473 pMpi2SGESimple64_t sge;
4474 mptti_t *dmap;
4475 uint32_t flags;
4476
4477 dmap = cmd->cmd_sg;
4478
4479 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4480 while (cookiec--) {
4481 ddi_put32(acc_hdl,
4482 &sge->Address.Low, dmap->addr.address64.Low);
4483 ddi_put32(acc_hdl,
4484 &sge->Address.High, dmap->addr.address64.High);
4485 ddi_put32(acc_hdl, &sge->FlagsLength,
4486 dmap->count);
4487 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4488 flags |= ((uint32_t)
4489 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4490 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4491 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4492 MPI2_SGE_FLAGS_SHIFT);
4493
4494 /*
4495 * If this is the last cookie, we set the flags
4496 * to indicate so
4497 */
4498 if (cookiec == 0) {
4499 flags |= end_flags;
4500 }
4501 if (cmd->cmd_flags & CFLAG_DMASEND) {
4502 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4503 MPI2_SGE_FLAGS_SHIFT);
4504 } else {
4505 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4506 MPI2_SGE_FLAGS_SHIFT);
4507 }
4508 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4509 dmap++;
4510 sge++;
4511 }
4512 }
4513
4514 static void
4515 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4516 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4517 {
4518 pMpi2SGESimple64_t sge;
4519 pMpi2SGEChain64_t sgechain;
4520 uint64_t nframe_phys_addr;
4521 uint_t cookiec;
4522 mptti_t *dmap;
4523 uint32_t flags;
4524
4525 /*
4526 * Save the number of entries in the DMA
4527 * Scatter/Gather list
4528 */
4529 cookiec = cmd->cmd_cookiec;
4530
4531 /*
4532 * Hereby we start to deal with multiple frames.
4533 * The process is as follows:
4534 * 1. Determine how many frames are needed for SGL element
4535 * storage; Note that all frames are stored in contiguous
4536 * memory space and in 64-bit DMA mode each element is
4537 * 3 double-words (12 bytes) long.
4538 * 2. Fill up the main frame. We need to do this separately
4539 * since it contains the SCSI IO request header and needs
4540 * dedicated processing. Note that the last 4 double-words
4541 * of the SCSI IO header is for SGL element storage
4542 * (MPI2_SGE_IO_UNION).
4543 * 3. Fill the chain element in the main frame, so the DMA
4544 * engine can use the following frames.
4545 * 4. Enter a loop to fill the remaining frames. Note that the
4546 * last frame contains no chain element. The remaining
4547 * frames go into the mpt SGL buffer allocated on the fly,
4548 * not immediately following the main message frame, as in
4549 * Gen1.
4550 * Some restrictions:
4551 * 1. For 64-bit DMA, the simple element and chain element
4552 * are both of 3 double-words (12 bytes) in size, even
4553 * though all frames are stored in the first 4G of mem
4554 * range and the higher 32-bits of the address are always 0.
4555 * 2. On some controllers (like the 1064/1068), a frame can
4556 * hold SGL elements with the last 1 or 2 double-words
4557 * (4 or 8 bytes) un-used. On these controllers, we should
4558 * recognize that there's not enough room for another SGL
4559 * element and move the sge pointer to the next frame.
4560 */
4561 int i, j, k, l, frames, sgemax;
4562 int temp;
4563 uint8_t chainflags;
4564 uint16_t chainlength;
4565 mptsas_cache_frames_t *p;
4566
4567 /*
4568 * Sgemax is the number of SGE's that will fit
4569 * each extra frame and frames is total
4570 * number of frames we'll need. 1 sge entry per
4571 * frame is reseverd for the chain element thus the -1 below.
4572 */
4573 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4574 - 1);
4575 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4576
4577 /*
4578 * A little check to see if we need to round up the number
4579 * of frames we need
4580 */
4581 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4582 sgemax) > 1) {
4583 frames = (temp + 1);
4584 } else {
4585 frames = temp;
4586 }
4587 dmap = cmd->cmd_sg;
4588 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4589
4590 /*
4591 * First fill in the main frame
4592 */
4593 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4594 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4595 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4596 MPI2_SGE_FLAGS_SHIFT));
4597 dmap += j;
4598 sge += j;
4599 j++;
4600
4601 /*
4602 * Fill in the chain element in the main frame.
4603 * About calculation on ChainOffset:
4604 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4605 * in the end reserved for SGL element storage
4606 * (MPI2_SGE_IO_UNION); we should count it in our
4607 * calculation. See its definition in the header file.
4608 * 2. Constant j is the counter of the current SGL element
4609 * that will be processed, and (j - 1) is the number of
4610 * SGL elements that have been processed (stored in the
4611 * main frame).
4612 * 3. ChainOffset value should be in units of double-words (4
4613 * bytes) so the last value should be divided by 4.
4614 */
4615 ddi_put8(acc_hdl, &frame->ChainOffset,
4616 (sizeof (MPI2_SCSI_IO_REQUEST) -
4617 sizeof (MPI2_SGE_IO_UNION) +
4618 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4619 sgechain = (pMpi2SGEChain64_t)sge;
4620 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4621 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4622 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4623 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4624
4625 /*
4626 * The size of the next frame is the accurate size of space
4627 * (in bytes) used to store the SGL elements. j is the counter
4628 * of SGL elements. (j - 1) is the number of SGL elements that
4629 * have been processed (stored in frames).
4630 */
4631 if (frames >= 2) {
4632 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4633 chainlength = mpt->m_req_frame_size /
4634 sizeof (MPI2_SGE_SIMPLE64) *
4635 sizeof (MPI2_SGE_SIMPLE64);
4636 } else {
4637 chainlength = ((cookiec - (j - 1)) *
4638 sizeof (MPI2_SGE_SIMPLE64));
4639 }
4640
4641 p = cmd->cmd_extra_frames;
4642
4643 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4644 ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4645 ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4646
4647 /*
4648 * If there are more than 2 frames left we have to
4649 * fill in the next chain offset to the location of
4650 * the chain element in the next frame.
4651 * sgemax is the number of simple elements in an extra
4652 * frame. Note that the value NextChainOffset should be
4653 * in double-words (4 bytes).
4654 */
4655 if (frames >= 2) {
4656 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4657 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4658 } else {
4659 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4660 }
4661
4662 /*
4663 * Jump to next frame;
4664 * Starting here, chain buffers go into the per command SGL.
4665 * This buffer is allocated when chain buffers are needed.
4666 */
4667 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4668 i = cookiec;
4669
4670 /*
4671 * Start filling in frames with SGE's. If we
4672 * reach the end of frame and still have SGE's
4673 * to fill we need to add a chain element and
4674 * use another frame. j will be our counter
4675 * for what cookie we are at and i will be
4676 * the total cookiec. k is the current frame
4677 */
4678 for (k = 1; k <= frames; k++) {
4679 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4680
4681 /*
4682 * If we have reached the end of frame
4683 * and we have more SGE's to fill in
4684 * we have to fill the final entry
4685 * with a chain element and then
4686 * continue to the next frame
4687 */
4688 if ((l == (sgemax + 1)) && (k != frames)) {
4689 sgechain = (pMpi2SGEChain64_t)sge;
4690 j--;
4691 chainflags = (
4692 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4693 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4694 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4695 ddi_put8(p->m_acc_hdl,
4696 &sgechain->Flags, chainflags);
4697 /*
4698 * k is the frame counter and (k + 1)
4699 * is the number of the next frame.
4700 * Note that frames are in contiguous
4701 * memory space.
4702 */
4703 nframe_phys_addr = p->m_phys_addr +
4704 (mpt->m_req_frame_size * k);
4705 ddi_put32(p->m_acc_hdl,
4706 &sgechain->Address.Low,
4707 nframe_phys_addr);
4708 ddi_put32(p->m_acc_hdl,
4709 &sgechain->Address.High,
4710 nframe_phys_addr >> 32);
4711
4712 /*
4713 * If there are more than 2 frames left
4714 * we have to next chain offset to
4715 * the location of the chain element
4716 * in the next frame and fill in the
4717 * length of the next chain
4718 */
4719 if ((frames - k) >= 2) {
4720 ddi_put8(p->m_acc_hdl,
4721 &sgechain->NextChainOffset,
4722 (sgemax *
4723 sizeof (MPI2_SGE_SIMPLE64))
4724 >> 2);
4725 ddi_put16(p->m_acc_hdl,
4726 &sgechain->Length,
4727 mpt->m_req_frame_size /
4728 sizeof (MPI2_SGE_SIMPLE64) *
4729 sizeof (MPI2_SGE_SIMPLE64));
4730 } else {
4731 /*
4732 * This is the last frame. Set
4733 * the NextChainOffset to 0 and
4734 * Length is the total size of
4735 * all remaining simple elements
4736 */
4737 ddi_put8(p->m_acc_hdl,
4738 &sgechain->NextChainOffset,
4739 0);
4740 ddi_put16(p->m_acc_hdl,
4741 &sgechain->Length,
4742 (cookiec - j) *
4743 sizeof (MPI2_SGE_SIMPLE64));
4744 }
4745
4746 /* Jump to the next frame */
4747 sge = (pMpi2SGESimple64_t)
4748 ((char *)p->m_frames_addr +
4749 (int)mpt->m_req_frame_size * k);
4750
4751 continue;
4752 }
4753
4754 ddi_put32(p->m_acc_hdl,
4755 &sge->Address.Low,
4756 dmap->addr.address64.Low);
4757 ddi_put32(p->m_acc_hdl,
4758 &sge->Address.High,
4759 dmap->addr.address64.High);
4760 ddi_put32(p->m_acc_hdl,
4761 &sge->FlagsLength, dmap->count);
4762 flags = ddi_get32(p->m_acc_hdl,
4763 &sge->FlagsLength);
4764 flags |= ((uint32_t)(
4765 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4766 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4767 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4768 MPI2_SGE_FLAGS_SHIFT);
4769
4770 /*
4771 * If we are at the end of the frame and
4772 * there is another frame to fill in
4773 * we set the last simple element as last
4774 * element
4775 */
4776 if ((l == sgemax) && (k != frames)) {
4777 flags |= ((uint32_t)
4778 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4779 MPI2_SGE_FLAGS_SHIFT);
4780 }
4781
4782 /*
4783 * If this is the final cookie we
4784 * indicate it by setting the flags
4785 */
4786 if (j == i) {
4787 flags |= ((uint32_t)
4788 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4789 MPI2_SGE_FLAGS_END_OF_BUFFER |
4790 MPI2_SGE_FLAGS_END_OF_LIST) <<
4791 MPI2_SGE_FLAGS_SHIFT);
4792 }
4793 if (cmd->cmd_flags & CFLAG_DMASEND) {
4794 flags |=
4795 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4796 MPI2_SGE_FLAGS_SHIFT);
4797 } else {
4798 flags |=
4799 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4800 MPI2_SGE_FLAGS_SHIFT);
4801 }
4802 ddi_put32(p->m_acc_hdl,
4803 &sge->FlagsLength, flags);
4804 dmap++;
4805 sge++;
4806 }
4807 }
4808
4809 /*
4810 * Sync DMA with the chain buffers that were just created
4811 */
4812 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4813 }
4814
4815 static void
4816 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4817 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4818 {
4819 pMpi2IeeeSgeSimple64_t ieeesge;
4820 mptti_t *dmap;
4821 uint8_t flags;
4822
4823 dmap = cmd->cmd_sg;
4824
4825 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4826 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4827
4828 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4829 while (cookiec--) {
4830 ddi_put32(acc_hdl,
4831 &ieeesge->Address.Low, dmap->addr.address64.Low);
4832 ddi_put32(acc_hdl,
4833 &ieeesge->Address.High, dmap->addr.address64.High);
4834 ddi_put32(acc_hdl, &ieeesge->Length,
4835 dmap->count);
4836 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4837 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4838 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4839
4840 /*
4841 * If this is the last cookie, we set the flags
4842 * to indicate so
4843 */
4844 if (cookiec == 0) {
4845 flags |= end_flag;
4846 }
4847
4848 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4849 dmap++;
4850 ieeesge++;
4851 }
4852 }
4853
4854 static void
4855 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4856 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4857 {
4858 pMpi2IeeeSgeSimple64_t ieeesge;
4859 pMpi25IeeeSgeChain64_t ieeesgechain;
4860 uint64_t nframe_phys_addr;
4861 uint_t cookiec;
4862 mptti_t *dmap;
4863 uint8_t flags;
4864
4865 /*
4866 * Save the number of entries in the DMA
4867 * Scatter/Gather list
4868 */
4869 cookiec = cmd->cmd_cookiec;
4870
4871 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4872
4873 /*
4874 * Hereby we start to deal with multiple frames.
4875 * The process is as follows:
4876 * 1. Determine how many frames are needed for SGL element
4877 * storage; Note that all frames are stored in contiguous
4878 * memory space and in 64-bit DMA mode each element is
4879 * 4 double-words (16 bytes) long.
4880 * 2. Fill up the main frame. We need to do this separately
4881 * since it contains the SCSI IO request header and needs
4882 * dedicated processing. Note that the last 4 double-words
4883 * of the SCSI IO header is for SGL element storage
4884 * (MPI2_SGE_IO_UNION).
4885 * 3. Fill the chain element in the main frame, so the DMA
4886 * engine can use the following frames.
4887 * 4. Enter a loop to fill the remaining frames. Note that the
4888 * last frame contains no chain element. The remaining
4889 * frames go into the mpt SGL buffer allocated on the fly,
4890 * not immediately following the main message frame, as in
4891 * Gen1.
4892 * Restrictions:
4893 * For 64-bit DMA, the simple element and chain element
4894 * are both of 4 double-words (16 bytes) in size, even
4895 * though all frames are stored in the first 4G of mem
4896 * range and the higher 32-bits of the address are always 0.
4897 */
4898 int i, j, k, l, frames, sgemax;
4899 int temp;
4900 uint8_t chainflags;
4901 uint32_t chainlength;
4902 mptsas_cache_frames_t *p;
4903
4904 /*
4905 * Sgemax is the number of SGE's that will fit
4906 * each extra frame and frames is total
4907 * number of frames we'll need. 1 sge entry per
4908 * frame is reseverd for the chain element thus the -1 below.
4909 */
4910 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4911 - 1);
4912 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4913
4914 /*
4915 * A little check to see if we need to round up the number
4916 * of frames we need
4917 */
4918 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4919 sgemax) > 1) {
4920 frames = (temp + 1);
4921 } else {
4922 frames = temp;
4923 }
4924 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4925 dmap = cmd->cmd_sg;
4926 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4927
4928 /*
4929 * First fill in the main frame
4930 */
4931 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4932 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4933 dmap += j;
4934 ieeesge += j;
4935 j++;
4936
4937 /*
4938 * Fill in the chain element in the main frame.
4939 * About calculation on ChainOffset:
4940 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4941 * in the end reserved for SGL element storage
4942 * (MPI2_SGE_IO_UNION); we should count it in our
4943 * calculation. See its definition in the header file.
4944 * 2. Constant j is the counter of the current SGL element
4945 * that will be processed, and (j - 1) is the number of
4946 * SGL elements that have been processed (stored in the
4947 * main frame).
4948 * 3. ChainOffset value should be in units of quad-words (16
4949 * bytes) so the last value should be divided by 16.
4950 */
4951 ddi_put8(acc_hdl, &frame->ChainOffset,
4952 (sizeof (MPI2_SCSI_IO_REQUEST) -
4953 sizeof (MPI2_SGE_IO_UNION) +
4954 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4955 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4956 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4957 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4958 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4959
4960 /*
4961 * The size of the next frame is the accurate size of space
4962 * (in bytes) used to store the SGL elements. j is the counter
4963 * of SGL elements. (j - 1) is the number of SGL elements that
4964 * have been processed (stored in frames).
4965 */
4966 if (frames >= 2) {
4967 ASSERT(mpt->m_req_frame_size >=
4968 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4969 chainlength = mpt->m_req_frame_size /
4970 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4971 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4972 } else {
4973 chainlength = ((cookiec - (j - 1)) *
4974 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4975 }
4976
4977 p = cmd->cmd_extra_frames;
4978
4979 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4980 ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4981 ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4982
4983 /*
4984 * If there are more than 2 frames left we have to
4985 * fill in the next chain offset to the location of
4986 * the chain element in the next frame.
4987 * sgemax is the number of simple elements in an extra
4988 * frame. Note that the value NextChainOffset should be
4989 * in double-words (4 bytes).
4990 */
4991 if (frames >= 2) {
4992 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4993 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4994 } else {
4995 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4996 }
4997
4998 /*
4999 * Jump to next frame;
5000 * Starting here, chain buffers go into the per command SGL.
5001 * This buffer is allocated when chain buffers are needed.
5002 */
5003 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
5004 i = cookiec;
5005
5006 /*
5007 * Start filling in frames with SGE's. If we
5008 * reach the end of frame and still have SGE's
5009 * to fill we need to add a chain element and
5010 * use another frame. j will be our counter
5011 * for what cookie we are at and i will be
5012 * the total cookiec. k is the current frame
5013 */
5014 for (k = 1; k <= frames; k++) {
5015 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
5016
5017 /*
5018 * If we have reached the end of frame
5019 * and we have more SGE's to fill in
5020 * we have to fill the final entry
5021 * with a chain element and then
5022 * continue to the next frame
5023 */
5024 if ((l == (sgemax + 1)) && (k != frames)) {
5025 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
5026 j--;
5027 chainflags =
5028 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
5029 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
5030 ddi_put8(p->m_acc_hdl,
5031 &ieeesgechain->Flags, chainflags);
5032 /*
5033 * k is the frame counter and (k + 1)
5034 * is the number of the next frame.
5035 * Note that frames are in contiguous
5036 * memory space.
5037 */
5038 nframe_phys_addr = p->m_phys_addr +
5039 (mpt->m_req_frame_size * k);
5040 ddi_put32(p->m_acc_hdl,
5041 &ieeesgechain->Address.Low,
5042 nframe_phys_addr);
5043 ddi_put32(p->m_acc_hdl,
5044 &ieeesgechain->Address.High,
5045 nframe_phys_addr >> 32);
5046
5047 /*
5048 * If there are more than 2 frames left
5049 * we have to next chain offset to
5050 * the location of the chain element
5051 * in the next frame and fill in the
5052 * length of the next chain
5053 */
5054 if ((frames - k) >= 2) {
5055 ddi_put8(p->m_acc_hdl,
5056 &ieeesgechain->NextChainOffset,
5057 (sgemax *
5058 sizeof (MPI2_IEEE_SGE_SIMPLE64))
5059 >> 4);
5060 ASSERT(mpt->m_req_frame_size >=
5061 sizeof (MPI2_IEEE_SGE_SIMPLE64));
5062 ddi_put32(p->m_acc_hdl,
5063 &ieeesgechain->Length,
5064 mpt->m_req_frame_size /
5065 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
5066 sizeof (MPI2_IEEE_SGE_SIMPLE64));
5067 } else {
5068 /*
5069 * This is the last frame. Set
5070 * the NextChainOffset to 0 and
5071 * Length is the total size of
5072 * all remaining simple elements
5073 */
5074 ddi_put8(p->m_acc_hdl,
5075 &ieeesgechain->NextChainOffset,
5076 0);
5077 ddi_put32(p->m_acc_hdl,
5078 &ieeesgechain->Length,
5079 (cookiec - j) *
5080 sizeof (MPI2_IEEE_SGE_SIMPLE64));
5081 }
5082
5083 /* Jump to the next frame */
5084 ieeesge = (pMpi2IeeeSgeSimple64_t)
5085 ((char *)p->m_frames_addr +
5086 (int)mpt->m_req_frame_size * k);
5087
5088 continue;
5089 }
5090
5091 ddi_put32(p->m_acc_hdl,
5092 &ieeesge->Address.Low,
5093 dmap->addr.address64.Low);
5094 ddi_put32(p->m_acc_hdl,
5095 &ieeesge->Address.High,
5096 dmap->addr.address64.High);
5097 ddi_put32(p->m_acc_hdl,
5098 &ieeesge->Length, dmap->count);
5099 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
5100 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
5101
5102 /*
5103 * If we are at the end of the frame and
5104 * there is another frame to fill in
5105 * do we need to do anything?
5106 * if ((l == sgemax) && (k != frames)) {
5107 * }
5108 */
5109
5110 /*
5111 * If this is the final cookie set end of list.
5112 */
5113 if (j == i) {
5114 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
5115 }
5116
5117 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
5118 dmap++;
5119 ieeesge++;
5120 }
5121 }
5122
5123 /*
5124 * Sync DMA with the chain buffers that were just created
5125 */
5126 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
5127 }
5128
5129 static void
5130 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
5131 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
5132 {
5133 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
5134
5135 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
5136
5137 /*
5138 * Set read/write bit in control.
5139 */
5140 if (cmd->cmd_flags & CFLAG_DMASEND) {
5141 *control |= MPI2_SCSIIO_CONTROL_WRITE;
5142 } else {
5143 *control |= MPI2_SCSIIO_CONTROL_READ;
5144 }
5145
5146 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
5147
5148 /*
5149 * We have 4 cases here. First where we can fit all the
5150 * SG elements into the main frame, and the case
5151 * where we can't. The SG element is also different when using
5152 * MPI2.5 interface.
5153 * If we have more cookies than we can attach to a frame
5154 * we will need to use a chain element to point
5155 * a location of memory where the rest of the S/G
5156 * elements reside.
5157 */
5158 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
5159 if (mpt->m_MPI25) {
5160 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
5161 cmd->cmd_cookiec,
5162 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
5163 } else {
5164 mptsas_sge_mainframe(cmd, frame, acc_hdl,
5165 cmd->cmd_cookiec,
5166 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
5167 | MPI2_SGE_FLAGS_END_OF_BUFFER
5168 | MPI2_SGE_FLAGS_END_OF_LIST) <<
5169 MPI2_SGE_FLAGS_SHIFT));
5170 }
5171 } else {
5172 if (mpt->m_MPI25) {
5173 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
5174 } else {
5175 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
5176 }
5177 }
5178 }
5179
5180 /*
5181 * Interrupt handling
5182 * Utility routine. Poll for status of a command sent to HBA
5183 * without interrupts (a FLAG_NOINTR command).
5184 */
5185 int
5186 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
5187 {
5188 int rval = TRUE;
5189
5190 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
5191
5192 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
5193 mptsas_restart_hba(mpt);
5194 }
5195
5196 /*
5197 * Wait, using drv_usecwait(), long enough for the command to
5198 * reasonably return from the target if the target isn't
5199 * "dead". A polled command may well be sent from scsi_poll, and
5200 * there are retries built in to scsi_poll if the transport
5201 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
5202 * and retries the transport up to scsi_poll_busycnt times
5203 * (currently 60) if
5204 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
5205 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5206 *
5207 * limit the waiting to avoid a hang in the event that the
5208 * cmd never gets started but we are still receiving interrupts
5209 */
5210 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5211 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5212 NDBG5(("mptsas_poll: command incomplete"));
5213 rval = FALSE;
5214 break;
5215 }
5216 }
5217
5218 if (rval == FALSE) {
5219
5220 /*
5221 * this isn't supposed to happen, the hba must be wedged
5222 * Mark this cmd as a timeout.
5223 */
5224 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5225 (STAT_TIMEOUT|STAT_ABORTED));
5226
5227 if (poll_cmd->cmd_queued == FALSE) {
5228
5229 NDBG5(("mptsas_poll: not on waitq"));
5230
5231 poll_cmd->cmd_pkt->pkt_state |=
5232 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5233 } else {
5234
5235 /* find and remove it from the waitq */
5236 NDBG5(("mptsas_poll: delete from waitq"));
5237 mptsas_waitq_delete(mpt, poll_cmd);
5238 }
5239
5240 }
5241 mptsas_fma_check(mpt, poll_cmd);
5242 NDBG5(("mptsas_poll: done"));
5243 return (rval);
5244 }
5245
5246 /*
5247 * Used for polling cmds and TM function
5248 */
5249 static int
5250 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5251 {
5252 int cnt;
5253 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5254 uint32_t int_mask;
5255
5256 NDBG5(("mptsas_wait_intr"));
5257
5258 mpt->m_polled_intr = 1;
5259
5260 /*
5261 * Get the current interrupt mask and disable interrupts. When
5262 * re-enabling ints, set mask to saved value.
5263 */
5264 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5265 MPTSAS_DISABLE_INTR(mpt);
5266
5267 /*
5268 * Keep polling for at least (polltime * 1000) seconds
5269 */
5270 for (cnt = 0; cnt < polltime; cnt++) {
5271 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5272 DDI_DMA_SYNC_FORCPU);
5273
5274 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5275 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5276
5277 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5278 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5279 ddi_get32(mpt->m_acc_post_queue_hdl,
5280 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5281 drv_usecwait(1000);
5282 continue;
5283 }
5284
5285 /*
5286 * The reply is valid, process it according to its
5287 * type.
5288 */
5289 mptsas_process_intr(mpt, reply_desc_union);
5290
5291 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5292 mpt->m_post_index = 0;
5293 }
5294
5295 /*
5296 * Update the global reply index
5297 */
5298 ddi_put32(mpt->m_datap,
5299 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5300 mpt->m_polled_intr = 0;
5301
5302 /*
5303 * Re-enable interrupts and quit.
5304 */
5305 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5306 int_mask);
5307 return (TRUE);
5308
5309 }
5310
5311 /*
5312 * Clear polling flag, re-enable interrupts and quit.
5313 */
5314 mpt->m_polled_intr = 0;
5315 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5316 return (FALSE);
5317 }
5318
5319 static void
5320 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5321 pMpi2ReplyDescriptorsUnion_t reply_desc)
5322 {
5323 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5324 uint16_t SMID;
5325 mptsas_slots_t *slots = mpt->m_active;
5326 mptsas_cmd_t *cmd = NULL;
5327 struct scsi_pkt *pkt;
5328
5329 ASSERT(mutex_owned(&mpt->m_mutex));
5330
5331 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5332 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5333
5334 /*
5335 * This is a success reply so just complete the IO. First, do a sanity
5336 * check on the SMID. The final slot is used for TM requests, which
5337 * would not come into this reply handler.
5338 */
5339 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5340 mptsas_log(mpt, CE_WARN, "received invalid SMID of %d", SMID);
5341 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5342 return;
5343 }
5344
5345 cmd = slots->m_slot[SMID];
5346
5347 /*
5348 * print warning and return if the slot is empty
5349 */
5350 if (cmd == NULL) {
5351 mptsas_log(mpt, CE_WARN, "NULL command for successful SCSI IO "
5352 "in slot %d", SMID);
5353 return;
5354 }
5355
5356 pkt = CMD2PKT(cmd);
5357 ASSERT(pkt->pkt_start != 0);
5358 pkt->pkt_stop = gethrtime();
5359 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5360 STATE_GOT_STATUS);
5361 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5362 pkt->pkt_state |= STATE_XFERRED_DATA;
5363 }
5364 pkt->pkt_resid = 0;
5365
5366 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5367 cmd->cmd_flags |= CFLAG_FINISHED;
5368 cv_broadcast(&mpt->m_passthru_cv);
5369 return;
5370 } else {
5371 mptsas_remove_cmd(mpt, cmd);
5372 }
5373
5374 if (cmd->cmd_flags & CFLAG_RETRY) {
5375 /*
5376 * The target returned QFULL or busy, do not add tihs
5377 * pkt to the doneq since the hba will retry
5378 * this cmd.
5379 *
5380 * The pkt has already been resubmitted in
5381 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5382 * Remove this cmd_flag here.
5383 */
5384 cmd->cmd_flags &= ~CFLAG_RETRY;
5385 } else {
5386 mptsas_doneq_add(mpt, cmd);
5387 }
5388 }
5389
5390 static void
5391 mptsas_handle_address_reply(mptsas_t *mpt,
5392 pMpi2ReplyDescriptorsUnion_t reply_desc)
5393 {
5394 pMpi2AddressReplyDescriptor_t address_reply;
5395 pMPI2DefaultReply_t reply;
5396 mptsas_fw_diagnostic_buffer_t *pBuffer;
5397 uint32_t reply_addr, reply_frame_dma_baseaddr;
5398 uint16_t SMID, iocstatus;
5399 mptsas_slots_t *slots = mpt->m_active;
5400 mptsas_cmd_t *cmd = NULL;
5401 uint8_t function, buffer_type;
5402 m_replyh_arg_t *args;
5403 int reply_frame_no;
5404
5405 ASSERT(mutex_owned(&mpt->m_mutex));
5406
5407 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5408 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5409 &address_reply->ReplyFrameAddress);
5410 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5411
5412 /*
5413 * If reply frame is not in the proper range we should ignore this
5414 * message and exit the interrupt handler.
5415 */
5416 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5417 if ((reply_addr < reply_frame_dma_baseaddr) ||
5418 (reply_addr >= (reply_frame_dma_baseaddr +
5419 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5420 ((reply_addr - reply_frame_dma_baseaddr) %
5421 mpt->m_reply_frame_size != 0)) {
5422 mptsas_log(mpt, CE_WARN, "received invalid reply frame "
5423 "address 0x%x", reply_addr);
5424 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5425 return;
5426 }
5427
5428 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5429 DDI_DMA_SYNC_FORCPU);
5430 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5431 reply_frame_dma_baseaddr));
5432 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5433
5434 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5435 function, reply_addr));
5436
5437 /*
5438 * don't get slot information and command for events since these values
5439 * don't exist
5440 */
5441 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5442 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5443 /*
5444 * This could be a TM reply, which use the last allocated SMID,
5445 * so allow for that.
5446 */
5447 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5448 mptsas_log(mpt, CE_WARN, "received invalid SMID of "
5449 "%d", SMID);
5450 ddi_fm_service_impact(mpt->m_dip,
5451 DDI_SERVICE_UNAFFECTED);
5452 return;
5453 }
5454
5455 cmd = slots->m_slot[SMID];
5456
5457 /*
5458 * print warning and return if the slot is empty
5459 */
5460 if (cmd == NULL) {
5461 mptsas_log(mpt, CE_WARN, "NULL command for address "
5462 "reply in slot %d", SMID);
5463 return;
5464 }
5465 if ((cmd->cmd_flags &
5466 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5467 cmd->cmd_rfm = reply_addr;
5468 cmd->cmd_flags |= CFLAG_FINISHED;
5469 cv_broadcast(&mpt->m_passthru_cv);
5470 cv_broadcast(&mpt->m_config_cv);
5471 cv_broadcast(&mpt->m_fw_diag_cv);
5472 return;
5473 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5474 mptsas_remove_cmd(mpt, cmd);
5475 }
5476 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5477 }
5478 /*
5479 * Depending on the function, we need to handle
5480 * the reply frame (and cmd) differently.
5481 */
5482 switch (function) {
5483 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5484 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5485 break;
5486 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5487 cmd->cmd_rfm = reply_addr;
5488 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5489 cmd);
5490 break;
5491 case MPI2_FUNCTION_FW_DOWNLOAD:
5492 cmd->cmd_flags |= CFLAG_FINISHED;
5493 cv_signal(&mpt->m_fw_cv);
5494 break;
5495 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5496 reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5497 mpt->m_reply_frame_size;
5498 args = &mpt->m_replyh_args[reply_frame_no];
5499 args->mpt = (void *)mpt;
5500 args->rfm = reply_addr;
5501
5502 /*
5503 * Record the event if its type is enabled in
5504 * this mpt instance by ioctl.
5505 */
5506 mptsas_record_event(args);
5507
5508 /*
5509 * Handle time critical events
5510 * NOT_RESPONDING/ADDED only now
5511 */
5512 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5513 /*
5514 * Would not return main process,
5515 * just let taskq resolve ack action
5516 * and ack would be sent in taskq thread
5517 */
5518 NDBG20(("send mptsas_handle_event_sync success"));
5519 }
5520
5521 if (mpt->m_in_reset) {
5522 NDBG20(("dropping event received during reset"));
5523 return;
5524 }
5525
5526 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5527 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5528 mptsas_log(mpt, CE_WARN, "No memory available"
5529 "for dispatch taskq");
5530 /*
5531 * Return the reply frame to the free queue.
5532 */
5533 ddi_put32(mpt->m_acc_free_queue_hdl,
5534 &((uint32_t *)(void *)
5535 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5536 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5537 DDI_DMA_SYNC_FORDEV);
5538 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5539 mpt->m_free_index = 0;
5540 }
5541
5542 ddi_put32(mpt->m_datap,
5543 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5544 }
5545 return;
5546 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5547 /*
5548 * If SMID is 0, this implies that the reply is due to a
5549 * release function with a status that the buffer has been
5550 * released. Set the buffer flags accordingly.
5551 */
5552 if (SMID == 0) {
5553 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5554 &reply->IOCStatus);
5555 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5556 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5557 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5558 pBuffer =
5559 &mpt->m_fw_diag_buffer_list[buffer_type];
5560 pBuffer->valid_data = TRUE;
5561 pBuffer->owned_by_firmware = FALSE;
5562 pBuffer->immediate = FALSE;
5563 }
5564 } else {
5565 /*
5566 * Normal handling of diag post reply with SMID.
5567 */
5568 cmd = slots->m_slot[SMID];
5569
5570 /*
5571 * print warning and return if the slot is empty
5572 */
5573 if (cmd == NULL) {
5574 mptsas_log(mpt, CE_WARN, "NULL command for "
5575 "address reply in slot %d", SMID);
5576 return;
5577 }
5578 cmd->cmd_rfm = reply_addr;
5579 cmd->cmd_flags |= CFLAG_FINISHED;
5580 cv_broadcast(&mpt->m_fw_diag_cv);
5581 }
5582 return;
5583 default:
5584 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5585 break;
5586 }
5587
5588 /*
5589 * Return the reply frame to the free queue.
5590 */
5591 ddi_put32(mpt->m_acc_free_queue_hdl,
5592 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5593 reply_addr);
5594 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5595 DDI_DMA_SYNC_FORDEV);
5596 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5597 mpt->m_free_index = 0;
5598 }
5599 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5600 mpt->m_free_index);
5601
5602 if (cmd->cmd_flags & CFLAG_FW_CMD)
5603 return;
5604
5605 if (cmd->cmd_flags & CFLAG_RETRY) {
5606 /*
5607 * The target returned QFULL or busy, do not add this
5608 * pkt to the doneq since the hba will retry
5609 * this cmd.
5610 *
5611 * The pkt has already been resubmitted in
5612 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5613 * Remove this cmd_flag here.
5614 */
5615 cmd->cmd_flags &= ~CFLAG_RETRY;
5616 } else {
5617 mptsas_doneq_add(mpt, cmd);
5618 }
5619 }
5620
5621 #ifdef MPTSAS_DEBUG
5622 static uint8_t mptsas_last_sense[256];
5623 #endif
5624
5625 static void
5626 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5627 mptsas_cmd_t *cmd)
5628 {
5629 uint8_t scsi_status, scsi_state;
5630 uint16_t ioc_status, cmd_rqs_len;
5631 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5632 struct scsi_pkt *pkt;
5633 struct scsi_arq_status *arqstat;
5634 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5635 uint8_t *sensedata = NULL;
5636 uint64_t sas_wwn;
5637 uint8_t phy;
5638 char wwn_str[MPTSAS_WWN_STRLEN];
5639
5640 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5641 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5642 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5643 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5644 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5645 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5646 &reply->ResponseInfo);
5647
5648 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5649 sas_wwn = ptgt->m_addr.mta_wwn;
5650 phy = ptgt->m_phynum;
5651 if (sas_wwn == 0) {
5652 (void) sprintf(wwn_str, "p%x", phy);
5653 } else {
5654 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5655 }
5656 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5657 &reply->IOCLogInfo);
5658 mptsas_log(mpt, CE_NOTE,
5659 "log info 0x%x received for target %d %s, "
5660 "scsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5661 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5662 scsi_state);
5663 }
5664
5665 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5666 scsi_status, ioc_status, scsi_state));
5667
5668 pkt = CMD2PKT(cmd);
5669 ASSERT(pkt->pkt_start != 0);
5670 pkt->pkt_stop = gethrtime();
5671 *(pkt->pkt_scbp) = scsi_status;
5672
5673 if (loginfo == 0x31170000) {
5674 /*
5675 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5676 * 0x31170000 comes, that means the device missing delay
5677 * is in progressing, the command need retry later.
5678 */
5679 *(pkt->pkt_scbp) = STATUS_BUSY;
5680 return;
5681 }
5682
5683 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5684 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5685 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5686 pkt->pkt_reason = CMD_INCOMPLETE;
5687 pkt->pkt_state |= STATE_GOT_BUS;
5688 if (ptgt->m_reset_delay == 0) {
5689 mptsas_set_throttle(mpt, ptgt,
5690 DRAIN_THROTTLE);
5691 }
5692 return;
5693 }
5694
5695 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5696 responsedata &= 0x000000FF;
5697 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5698 mptsas_log(mpt, CE_NOTE, "TLR not supported");
5699 pkt->pkt_reason = CMD_TLR_OFF;
5700 return;
5701 }
5702 }
5703
5704
5705 switch (scsi_status) {
5706 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5707 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5708 arqstat = (void*)(pkt->pkt_scbp);
5709 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5710 (pkt->pkt_scbp));
5711 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5712 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5713 if (cmd->cmd_flags & CFLAG_XARQ) {
5714 pkt->pkt_state |= STATE_XARQ_DONE;
5715 }
5716 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5717 pkt->pkt_state |= STATE_XFERRED_DATA;
5718 }
5719 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5720 arqstat->sts_rqpkt_state = pkt->pkt_state;
5721 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5722 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5723 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5724 cmd_rqs_len = cmd->cmd_extrqslen ?
5725 cmd->cmd_extrqslen : cmd->cmd_rqslen;
5726 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5727 DDI_DMA_SYNC_FORKERNEL);
5728 #ifdef MPTSAS_DEBUG
5729 bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5730 ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5731 sizeof (mptsas_last_sense):cmd_rqs_len));
5732 #endif
5733 bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5734 ((cmd_rqs_len >= sensecount) ? sensecount :
5735 cmd_rqs_len));
5736 arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5737 cmd->cmd_flags |= CFLAG_CMDARQ;
5738 /*
5739 * Set proper status for pkt if autosense was valid
5740 */
5741 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5742 struct scsi_status zero_status = { 0 };
5743 arqstat->sts_rqpkt_status = zero_status;
5744 }
5745
5746 /*
5747 * ASC=0x47 is parity error
5748 * ASC=0x48 is initiator detected error received
5749 */
5750 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5751 ((scsi_sense_asc(sensedata) == 0x47) ||
5752 (scsi_sense_asc(sensedata) == 0x48))) {
5753 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5754 }
5755
5756 /*
5757 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5758 * ASC/ASCQ=0x25/0x00 means invalid lun
5759 */
5760 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5761 (scsi_sense_asc(sensedata) == 0x3F) &&
5762 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5763 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5764 (scsi_sense_asc(sensedata) == 0x25) &&
5765 (scsi_sense_ascq(sensedata) == 0x00))) {
5766 mptsas_topo_change_list_t *topo_node = NULL;
5767
5768 topo_node = kmem_zalloc(
5769 sizeof (mptsas_topo_change_list_t),
5770 KM_NOSLEEP);
5771 if (topo_node == NULL) {
5772 mptsas_log(mpt, CE_NOTE, "No memory"
5773 "resource for handle SAS dynamic"
5774 "reconfigure");
5775 break;
5776 }
5777 topo_node->mpt = mpt;
5778 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5779 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5780 topo_node->devhdl = ptgt->m_devhdl;
5781 topo_node->object = (void *)ptgt;
5782 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5783
5784 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5785 mptsas_handle_dr,
5786 (void *)topo_node,
5787 DDI_NOSLEEP)) != DDI_SUCCESS) {
5788 kmem_free(topo_node,
5789 sizeof (mptsas_topo_change_list_t));
5790 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5791 "for handle SAS dynamic reconfigure"
5792 "failed");
5793 }
5794 }
5795 break;
5796 case MPI2_SCSI_STATUS_GOOD:
5797 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5798 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5799 pkt->pkt_reason = CMD_DEV_GONE;
5800 pkt->pkt_state |= STATE_GOT_BUS;
5801 if (ptgt->m_reset_delay == 0) {
5802 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5803 }
5804 NDBG31(("lost disk for target%d, command:%x",
5805 Tgt(cmd), pkt->pkt_cdbp[0]));
5806 break;
5807 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5808 NDBG31(("data overrun: xferred=%d", xferred));
5809 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5810 pkt->pkt_reason = CMD_DATA_OVR;
5811 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5812 | STATE_SENT_CMD | STATE_GOT_STATUS
5813 | STATE_XFERRED_DATA);
5814 pkt->pkt_resid = 0;
5815 break;
5816 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5817 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5818 NDBG31(("data underrun: xferred=%d", xferred));
5819 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5820 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5821 | STATE_SENT_CMD | STATE_GOT_STATUS);
5822 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5823 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5824 pkt->pkt_state |= STATE_XFERRED_DATA;
5825 }
5826 break;
5827 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5828 if (cmd->cmd_active_expiration <= gethrtime()) {
5829 /*
5830 * When timeout requested, propagate
5831 * proper reason and statistics to
5832 * target drivers.
5833 */
5834 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5835 STAT_BUS_RESET | STAT_TIMEOUT);
5836 } else {
5837 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5838 STAT_BUS_RESET);
5839 }
5840 break;
5841 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5842 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5843 mptsas_set_pkt_reason(mpt,
5844 cmd, CMD_RESET, STAT_DEV_RESET);
5845 break;
5846 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5847 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5848 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5849 mptsas_set_pkt_reason(mpt,
5850 cmd, CMD_TERMINATED, STAT_TERMINATED);
5851 break;
5852 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5853 case MPI2_IOCSTATUS_BUSY:
5854 /*
5855 * set throttles to drain
5856 */
5857 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5858 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5859 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5860 }
5861
5862 /*
5863 * retry command
5864 */
5865 cmd->cmd_flags |= CFLAG_RETRY;
5866 cmd->cmd_pkt_flags |= FLAG_HEAD;
5867
5868 (void) mptsas_accept_pkt(mpt, cmd);
5869 break;
5870 default:
5871 mptsas_log(mpt, CE_WARN,
5872 "unknown ioc_status = %x", ioc_status);
5873 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5874 "count = %x, scsi_status = %x", scsi_state,
5875 xferred, scsi_status);
5876 break;
5877 }
5878 break;
5879 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5880 mptsas_handle_qfull(mpt, cmd);
5881 break;
5882 case MPI2_SCSI_STATUS_BUSY:
5883 NDBG31(("scsi_status busy received"));
5884 break;
5885 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5886 NDBG31(("scsi_status reservation conflict received"));
5887 break;
5888 default:
5889 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x",
5890 scsi_status, ioc_status);
5891 mptsas_log(mpt, CE_WARN,
5892 "mptsas_process_intr: invalid scsi status");
5893 break;
5894 }
5895 }
5896
5897 static void
5898 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5899 mptsas_cmd_t *cmd)
5900 {
5901 uint8_t task_type;
5902 uint16_t ioc_status;
5903 uint32_t log_info;
5904 uint16_t dev_handle;
5905 struct scsi_pkt *pkt = CMD2PKT(cmd);
5906
5907 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5908 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5909 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5910 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5911
5912 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5913 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5914 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d",
5915 task_type, ioc_status, log_info, dev_handle);
5916 pkt->pkt_reason = CMD_INCOMPLETE;
5917 return;
5918 }
5919
5920 switch (task_type) {
5921 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5922 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5923 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5924 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5925 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5926 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5927 break;
5928 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5929 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5930 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5931 /*
5932 * Check for invalid DevHandle of 0 in case application
5933 * sends bad command. DevHandle of 0 could cause problems.
5934 */
5935 if (dev_handle == 0) {
5936 mptsas_log(mpt, CE_WARN, "Can't flush target with"
5937 " DevHandle of 0.");
5938 } else {
5939 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5940 task_type);
5941 }
5942 break;
5943 default:
5944 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5945 task_type);
5946 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5947 break;
5948 }
5949 }
5950
5951 static void
5952 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5953 {
5954 mptsas_t *mpt = arg->mpt;
5955 uint64_t t = arg->t;
5956 mptsas_cmd_t *cmd;
5957 struct scsi_pkt *pkt;
5958 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5959
5960 mutex_enter(&item->mutex);
5961 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5962 if (!item->doneq) {
5963 cv_wait(&item->cv, &item->mutex);
5964 }
5965 pkt = NULL;
5966 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5967 cmd->cmd_flags |= CFLAG_COMPLETED;
5968 pkt = CMD2PKT(cmd);
5969 }
5970 mutex_exit(&item->mutex);
5971 if (pkt) {
5972 mptsas_pkt_comp(pkt, cmd);
5973 }
5974 mutex_enter(&item->mutex);
5975 }
5976 mutex_exit(&item->mutex);
5977 mutex_enter(&mpt->m_doneq_mutex);
5978 mpt->m_doneq_thread_n--;
5979 cv_broadcast(&mpt->m_doneq_thread_cv);
5980 mutex_exit(&mpt->m_doneq_mutex);
5981 }
5982
5983
5984 /*
5985 * mpt interrupt handler.
5986 */
5987 static uint_t
5988 mptsas_intr(caddr_t arg1, caddr_t arg2)
5989 {
5990 mptsas_t *mpt = (void *)arg1;
5991 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5992 uchar_t did_reply = FALSE;
5993
5994 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5995
5996 mutex_enter(&mpt->m_mutex);
5997
5998 /*
5999 * If interrupts are shared by two channels then check whether this
6000 * interrupt is genuinely for this channel by making sure first the
6001 * chip is in high power state.
6002 */
6003 if ((mpt->m_options & MPTSAS_OPT_PM) &&
6004 (mpt->m_power_level != PM_LEVEL_D0)) {
6005 mutex_exit(&mpt->m_mutex);
6006 return (DDI_INTR_UNCLAIMED);
6007 }
6008
6009 /*
6010 * If polling, interrupt was triggered by some shared interrupt because
6011 * IOC interrupts are disabled during polling, so polling routine will
6012 * handle any replies. Considering this, if polling is happening,
6013 * return with interrupt unclaimed.
6014 */
6015 if (mpt->m_polled_intr) {
6016 mutex_exit(&mpt->m_mutex);
6017 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
6018 return (DDI_INTR_UNCLAIMED);
6019 }
6020
6021 /*
6022 * Read the istat register.
6023 */
6024 if ((INTPENDING(mpt)) != 0) {
6025 /*
6026 * read fifo until empty.
6027 */
6028 #ifndef __lock_lint
6029 _NOTE(CONSTCOND)
6030 #endif
6031 while (TRUE) {
6032 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6033 DDI_DMA_SYNC_FORCPU);
6034 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
6035 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
6036
6037 if (ddi_get32(mpt->m_acc_post_queue_hdl,
6038 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
6039 ddi_get32(mpt->m_acc_post_queue_hdl,
6040 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
6041 break;
6042 }
6043
6044 /*
6045 * The reply is valid, process it according to its
6046 * type. Also, set a flag for updating the reply index
6047 * after they've all been processed.
6048 */
6049 did_reply = TRUE;
6050
6051 mptsas_process_intr(mpt, reply_desc_union);
6052
6053 /*
6054 * Increment post index and roll over if needed.
6055 */
6056 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
6057 mpt->m_post_index = 0;
6058 }
6059 }
6060
6061 /*
6062 * Update the global reply index if at least one reply was
6063 * processed.
6064 */
6065 if (did_reply) {
6066 ddi_put32(mpt->m_datap,
6067 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
6068 }
6069 } else {
6070 mutex_exit(&mpt->m_mutex);
6071 return (DDI_INTR_UNCLAIMED);
6072 }
6073 NDBG1(("mptsas_intr complete"));
6074
6075 /*
6076 * If no helper threads are created, process the doneq in ISR. If
6077 * helpers are created, use the doneq length as a metric to measure the
6078 * load on the interrupt CPU. If it is long enough, which indicates the
6079 * load is heavy, then we deliver the IO completions to the helpers.
6080 * This measurement has some limitations, although it is simple and
6081 * straightforward and works well for most of the cases at present.
6082 */
6083 if (!mpt->m_doneq_thread_n ||
6084 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
6085 mptsas_doneq_empty(mpt);
6086 } else {
6087 mptsas_deliver_doneq_thread(mpt);
6088 }
6089
6090 /*
6091 * If there are queued cmd, start them now.
6092 */
6093 if (mpt->m_waitq != NULL) {
6094 mptsas_restart_waitq(mpt);
6095 }
6096
6097 mutex_exit(&mpt->m_mutex);
6098 return (DDI_INTR_CLAIMED);
6099 }
6100
6101 static void
6102 mptsas_process_intr(mptsas_t *mpt,
6103 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6104 {
6105 uint8_t reply_type;
6106
6107 ASSERT(mutex_owned(&mpt->m_mutex));
6108
6109 /*
6110 * The reply is valid, process it according to its
6111 * type. Also, set a flag for updated the reply index
6112 * after they've all been processed.
6113 */
6114 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6115 &reply_desc_union->Default.ReplyFlags);
6116 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6117 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6118 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6119 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6120 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6121 mptsas_handle_address_reply(mpt, reply_desc_union);
6122 } else {
6123 mptsas_log(mpt, CE_WARN, "bad reply type %x", reply_type);
6124 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6125 }
6126
6127 /*
6128 * Clear the reply descriptor for re-use and increment
6129 * index.
6130 */
6131 ddi_put64(mpt->m_acc_post_queue_hdl,
6132 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6133 0xFFFFFFFFFFFFFFFF);
6134 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6135 DDI_DMA_SYNC_FORDEV);
6136 }
6137
6138 /*
6139 * handle qfull condition
6140 */
6141 static void
6142 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6143 {
6144 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
6145
6146 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
6147 (ptgt->m_qfull_retries == 0)) {
6148 /*
6149 * We have exhausted the retries on QFULL, or,
6150 * the target driver has indicated that it
6151 * wants to handle QFULL itself by setting
6152 * qfull-retries capability to 0. In either case
6153 * we want the target driver's QFULL handling
6154 * to kick in. We do this by having pkt_reason
6155 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
6156 */
6157 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
6158 } else {
6159 if (ptgt->m_reset_delay == 0) {
6160 ptgt->m_t_throttle =
6161 max((ptgt->m_t_ncmds - 2), 0);
6162 }
6163
6164 cmd->cmd_pkt_flags |= FLAG_HEAD;
6165 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
6166 cmd->cmd_flags |= CFLAG_RETRY;
6167
6168 (void) mptsas_accept_pkt(mpt, cmd);
6169
6170 /*
6171 * when target gives queue full status with no commands
6172 * outstanding (m_t_ncmds == 0), throttle is set to 0
6173 * (HOLD_THROTTLE), and the queue full handling start
6174 * (see psarc/1994/313); if there are commands outstanding,
6175 * throttle is set to (m_t_ncmds - 2)
6176 */
6177 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
6178 /*
6179 * By setting throttle to QFULL_THROTTLE, we
6180 * avoid submitting new commands and in
6181 * mptsas_restart_cmd find out slots which need
6182 * their throttles to be cleared.
6183 */
6184 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
6185 if (mpt->m_restart_cmd_timeid == 0) {
6186 mpt->m_restart_cmd_timeid =
6187 timeout(mptsas_restart_cmd, mpt,
6188 ptgt->m_qfull_retry_interval);
6189 }
6190 }
6191 }
6192 }
6193
6194 mptsas_phymask_t
6195 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
6196 {
6197 mptsas_phymask_t phy_mask = 0;
6198 uint8_t i = 0;
6199
6200 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
6201
6202 ASSERT(mutex_owned(&mpt->m_mutex));
6203
6204 /*
6205 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
6206 */
6207 if (physport == 0xFF) {
6208 return (0);
6209 }
6210
6211 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6212 if (mpt->m_phy_info[i].attached_devhdl &&
6213 (mpt->m_phy_info[i].phy_mask != 0) &&
6214 (mpt->m_phy_info[i].port_num == physport)) {
6215 phy_mask = mpt->m_phy_info[i].phy_mask;
6216 break;
6217 }
6218 }
6219 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6220 mpt->m_instance, physport, phy_mask));
6221 return (phy_mask);
6222 }
6223
6224 /*
6225 * mpt free device handle after device gone, by use of passthrough
6226 */
6227 static int
6228 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6229 {
6230 Mpi2SasIoUnitControlRequest_t req;
6231 Mpi2SasIoUnitControlReply_t rep;
6232 int ret;
6233
6234 ASSERT(mutex_owned(&mpt->m_mutex));
6235
6236 /*
6237 * Need to compose a SAS IO Unit Control request message
6238 * and call mptsas_do_passthru() function
6239 */
6240 bzero(&req, sizeof (req));
6241 bzero(&rep, sizeof (rep));
6242
6243 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6244 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6245 req.DevHandle = LE_16(devhdl);
6246
6247 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6248 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
6249 if (ret != 0) {
6250 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6251 "Control error %d", ret);
6252 return (DDI_FAILURE);
6253 }
6254
6255 /* do passthrough success, check the ioc status */
6256 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6257 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6258 "Control IOCStatus %d", LE_16(rep.IOCStatus));
6259 return (DDI_FAILURE);
6260 }
6261
6262 return (DDI_SUCCESS);
6263 }
6264
6265 /*
6266 * We have a SATA target that has changed, which means the "bridge-port"
6267 * property must be updated to reflect the SAS WWN of the new attachment point.
6268 * This may change if a SATA device changes which bay, and therefore phy, it is
6269 * plugged into. This SATA device may be a multipath virtual device or may be a
6270 * physical device. We have to handle both cases.
6271 */
6272 static boolean_t
6273 mptsas_update_sata_bridge(mptsas_t *mpt, dev_info_t *parent,
6274 mptsas_target_t *ptgt)
6275 {
6276 int rval;
6277 uint16_t dev_hdl;
6278 uint16_t pdev_hdl;
6279 uint64_t dev_sas_wwn;
6280 uint8_t physport;
6281 uint8_t phy_id;
6282 uint32_t page_address;
6283 uint16_t bay_num, enclosure, io_flags;
6284 uint32_t dev_info;
6285 char uabuf[SCSI_WWN_BUFLEN];
6286 dev_info_t *dip;
6287 mdi_pathinfo_t *pip;
6288
6289 mutex_enter(&mpt->m_mutex);
6290 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6291 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)ptgt->m_devhdl;
6292 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
6293 &dev_sas_wwn, &dev_info, &physport, &phy_id, &pdev_hdl, &bay_num,
6294 &enclosure, &io_flags);
6295 mutex_exit(&mpt->m_mutex);
6296 if (rval != DDI_SUCCESS) {
6297 mptsas_log(mpt, CE_WARN, "unable to get SAS page 0 for "
6298 "handle %d", page_address);
6299 return (B_FALSE);
6300 }
6301
6302 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
6303 mptsas_log(mpt, CE_WARN,
6304 "mptsas unable to format SATA bridge WWN");
6305 return (B_FALSE);
6306 }
6307
6308 if (mpt->m_mpxio_enable == TRUE && (pip = mptsas_find_path_addr(parent,
6309 ptgt->m_addr.mta_wwn, 0)) != NULL) {
6310 if (mdi_prop_update_string(pip, SCSI_ADDR_PROP_BRIDGE_PORT,
6311 uabuf) != DDI_SUCCESS) {
6312 mptsas_log(mpt, CE_WARN,
6313 "mptsas unable to create SCSI bridge port "
6314 "property for SATA device");
6315 return (B_FALSE);
6316 }
6317 return (B_TRUE);
6318 }
6319
6320 if ((dip = mptsas_find_child_addr(parent, ptgt->m_addr.mta_wwn,
6321 0)) != NULL) {
6322 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
6323 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) != DDI_PROP_SUCCESS) {
6324 mptsas_log(mpt, CE_WARN,
6325 "mptsas unable to create SCSI bridge port "
6326 "property for SATA device");
6327 return (B_FALSE);
6328 }
6329 return (B_TRUE);
6330 }
6331
6332 mptsas_log(mpt, CE_WARN, "mptsas failed to find dev_info_t or "
6333 "mdi_pathinfo_t for target with WWN %016" PRIx64,
6334 ptgt->m_addr.mta_wwn);
6335
6336 return (B_FALSE);
6337 }
6338
6339 static void
6340 mptsas_update_phymask(mptsas_t *mpt)
6341 {
6342 mptsas_phymask_t mask = 0, phy_mask;
6343 char *phy_mask_name;
6344 uint8_t current_port;
6345 int i, j;
6346
6347 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6348
6349 ASSERT(mutex_owned(&mpt->m_mutex));
6350
6351 (void) mptsas_get_sas_io_unit_page(mpt);
6352
6353 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6354
6355 for (i = 0; i < mpt->m_num_phys; i++) {
6356 phy_mask = 0x00;
6357
6358 if (mpt->m_phy_info[i].attached_devhdl == 0)
6359 continue;
6360
6361 bzero(phy_mask_name, sizeof (phy_mask_name));
6362
6363 current_port = mpt->m_phy_info[i].port_num;
6364
6365 if ((mask & (1 << i)) != 0)
6366 continue;
6367
6368 for (j = 0; j < mpt->m_num_phys; j++) {
6369 if (mpt->m_phy_info[j].attached_devhdl &&
6370 (mpt->m_phy_info[j].port_num == current_port)) {
6371 phy_mask |= (1 << j);
6372 }
6373 }
6374 mask = mask | phy_mask;
6375
6376 for (j = 0; j < mpt->m_num_phys; j++) {
6377 if ((phy_mask >> j) & 0x01) {
6378 mpt->m_phy_info[j].phy_mask = phy_mask;
6379 }
6380 }
6381
6382 (void) sprintf(phy_mask_name, "%x", phy_mask);
6383
6384 mutex_exit(&mpt->m_mutex);
6385 /*
6386 * register a iport, if the port has already been existed
6387 * SCSA will do nothing and just return.
6388 */
6389 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6390 mutex_enter(&mpt->m_mutex);
6391 }
6392 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6393 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6394 }
6395
6396 /*
6397 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6398 * 1. Directly attched Device Added/Removed.
6399 * 2. Expander Device Added/Removed.
6400 * 3. Indirectly Attached Device Added/Expander.
6401 * 4. LUNs of a existing device status change.
6402 * 5. RAID volume created/deleted.
6403 * 6. Member of RAID volume is released because of RAID deletion.
6404 * 7. Physical disks are removed because of RAID creation.
6405 */
6406 static void
6407 mptsas_handle_dr(void *args)
6408 {
6409 mptsas_topo_change_list_t *topo_node = NULL;
6410 mptsas_topo_change_list_t *save_node = NULL;
6411 mptsas_t *mpt;
6412 dev_info_t *parent = NULL;
6413 mptsas_phymask_t phymask = 0;
6414 char *phy_mask_name;
6415 uint8_t flags = 0, physport = 0xff;
6416 uint8_t port_update = 0;
6417 uint_t event;
6418
6419 topo_node = (mptsas_topo_change_list_t *)args;
6420
6421 mpt = topo_node->mpt;
6422 event = topo_node->event;
6423 flags = topo_node->flags;
6424
6425 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6426
6427 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6428
6429 switch (event) {
6430 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6431 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6432 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6433 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6434 /*
6435 * Direct attached or expander attached device added
6436 * into system or a Phys Disk that is being unhidden.
6437 */
6438 port_update = 1;
6439 }
6440 break;
6441 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6442 /*
6443 * New expander added into system, it must be the head
6444 * of topo_change_list_t
6445 */
6446 port_update = 1;
6447 break;
6448 default:
6449 port_update = 0;
6450 break;
6451 }
6452 /*
6453 * All cases port_update == 1 may cause initiator port form change
6454 */
6455 mutex_enter(&mpt->m_mutex);
6456 if (mpt->m_port_chng && port_update) {
6457 /*
6458 * mpt->m_port_chng flag indicates some PHYs of initiator
6459 * port have changed to online. So when expander added or
6460 * directly attached device online event come, we force to
6461 * update port information by issueing SAS IO Unit Page and
6462 * update PHYMASKs.
6463 */
6464 (void) mptsas_update_phymask(mpt);
6465 mpt->m_port_chng = 0;
6466
6467 }
6468 mutex_exit(&mpt->m_mutex);
6469 while (topo_node) {
6470 phymask = 0;
6471 if (parent == NULL) {
6472 physport = topo_node->un.physport;
6473 event = topo_node->event;
6474 flags = topo_node->flags;
6475 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6476 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6477 /*
6478 * For all offline events, phymask is known
6479 */
6480 phymask = topo_node->un.phymask;
6481 goto find_parent;
6482 }
6483 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6484 goto handle_topo_change;
6485 }
6486 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6487 phymask = topo_node->un.phymask;
6488 goto find_parent;
6489 }
6490
6491 if ((flags ==
6492 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6493 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6494 /*
6495 * There is no any field in IR_CONFIG_CHANGE
6496 * event indicate physport/phynum, let's get
6497 * parent after SAS Device Page0 request.
6498 */
6499 goto handle_topo_change;
6500 }
6501
6502 mutex_enter(&mpt->m_mutex);
6503 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6504 /*
6505 * If the direct attached device added or a
6506 * phys disk is being unhidden, argument
6507 * physport actually is PHY#, so we have to get
6508 * phymask according PHY#.
6509 */
6510 physport = mpt->m_phy_info[physport].port_num;
6511 }
6512
6513 /*
6514 * Translate physport to phymask so that we can search
6515 * parent dip.
6516 */
6517 phymask = mptsas_physport_to_phymask(mpt,
6518 physport);
6519 mutex_exit(&mpt->m_mutex);
6520
6521 find_parent:
6522 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6523 /*
6524 * For RAID topology change node, write the iport name
6525 * as v0.
6526 */
6527 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6528 (void) sprintf(phy_mask_name, "v0");
6529 } else {
6530 /*
6531 * phymask can bo 0 if the drive has been
6532 * pulled by the time an add event is
6533 * processed. If phymask is 0, just skip this
6534 * event and continue.
6535 */
6536 if (phymask == 0) {
6537 mutex_enter(&mpt->m_mutex);
6538 save_node = topo_node;
6539 topo_node = topo_node->next;
6540 ASSERT(save_node);
6541 kmem_free(save_node,
6542 sizeof (mptsas_topo_change_list_t));
6543 mutex_exit(&mpt->m_mutex);
6544
6545 parent = NULL;
6546 continue;
6547 }
6548 (void) sprintf(phy_mask_name, "%x", phymask);
6549 }
6550 parent = scsi_hba_iport_find(mpt->m_dip,
6551 phy_mask_name);
6552 if (parent == NULL) {
6553 mptsas_log(mpt, CE_WARN, "Failed to find an "
6554 "iport, should not happen!");
6555 goto out;
6556 }
6557
6558 }
6559 ASSERT(parent);
6560 handle_topo_change:
6561
6562 mutex_enter(&mpt->m_mutex);
6563 /*
6564 * If HBA is being reset, don't perform operations depending
6565 * on the IOC. We must free the topo list, however.
6566 */
6567 if (!mpt->m_in_reset) {
6568 mptsas_handle_topo_change(topo_node, parent);
6569 } else {
6570 NDBG20(("skipping topo change received during reset"));
6571 }
6572 save_node = topo_node;
6573 topo_node = topo_node->next;
6574 ASSERT(save_node);
6575 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6576 mutex_exit(&mpt->m_mutex);
6577
6578 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6579 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6580 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6581 /*
6582 * If direct attached device associated, make sure
6583 * reset the parent before start the next one. But
6584 * all devices associated with expander shares the
6585 * parent. Also, reset parent if this is for RAID.
6586 */
6587 parent = NULL;
6588 }
6589 }
6590 out:
6591 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6592 }
6593
6594 static void
6595 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6596 dev_info_t *parent)
6597 {
6598 mptsas_target_t *ptgt = NULL;
6599 mptsas_smp_t *psmp = NULL;
6600 mptsas_t *mpt = (void *)topo_node->mpt;
6601 uint16_t devhdl;
6602 uint16_t attached_devhdl;
6603 uint64_t sas_wwn = 0;
6604 int rval = 0;
6605 uint32_t page_address;
6606 uint8_t phy, flags;
6607 char *addr = NULL;
6608 dev_info_t *lundip;
6609 int circ = 0, circ1 = 0;
6610 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6611
6612 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6613 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6614 topo_node->event, topo_node->flags));
6615
6616 ASSERT(mutex_owned(&mpt->m_mutex));
6617
6618 switch (topo_node->event) {
6619 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6620 {
6621 char *phy_mask_name;
6622 mptsas_phymask_t phymask = 0;
6623
6624 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6625 /*
6626 * Get latest RAID info.
6627 */
6628 (void) mptsas_get_raid_info(mpt);
6629 ptgt = refhash_linear_search(mpt->m_targets,
6630 mptsas_target_eval_devhdl, &topo_node->devhdl);
6631 if (ptgt == NULL)
6632 break;
6633 } else {
6634 ptgt = (void *)topo_node->object;
6635 }
6636
6637 if (ptgt == NULL) {
6638 /*
6639 * If a Phys Disk was deleted, RAID info needs to be
6640 * updated to reflect the new topology.
6641 */
6642 (void) mptsas_get_raid_info(mpt);
6643
6644 /*
6645 * Get sas device page 0 by DevHandle to make sure if
6646 * SSP/SATA end device exist.
6647 */
6648 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6649 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6650 topo_node->devhdl;
6651
6652 rval = mptsas_get_target_device_info(mpt, page_address,
6653 &devhdl, &ptgt);
6654 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6655 mptsas_log(mpt, CE_NOTE,
6656 "mptsas_handle_topo_change: target %d is "
6657 "not a SAS/SATA device",
6658 topo_node->devhdl);
6659 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6660 mptsas_log(mpt, CE_NOTE,
6661 "mptsas_handle_topo_change: could not "
6662 "allocate memory");
6663 } else if (rval == DEV_INFO_FAIL_GUID) {
6664 mptsas_log(mpt, CE_NOTE,
6665 "mptsas_handle_topo_change: could not "
6666 "get SATA GUID for target %d",
6667 topo_node->devhdl);
6668 }
6669 /*
6670 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6671 * then there is nothing else to do, just leave.
6672 */
6673 if (rval != DEV_INFO_SUCCESS) {
6674 return;
6675 }
6676 }
6677
6678 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6679
6680 mutex_exit(&mpt->m_mutex);
6681 flags = topo_node->flags;
6682
6683 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6684 phymask = ptgt->m_addr.mta_phymask;
6685 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6686 (void) sprintf(phy_mask_name, "%x", phymask);
6687 parent = scsi_hba_iport_find(mpt->m_dip,
6688 phy_mask_name);
6689 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6690 if (parent == NULL) {
6691 mptsas_log(mpt, CE_WARN, "Failed to find a "
6692 "iport for PD, should not happen!");
6693 mutex_enter(&mpt->m_mutex);
6694 break;
6695 }
6696 }
6697
6698 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6699 ndi_devi_enter(parent, &circ1);
6700 (void) mptsas_config_raid(parent, topo_node->devhdl,
6701 &lundip);
6702 ndi_devi_exit(parent, circ1);
6703 } else {
6704 /*
6705 * hold nexus for bus configure
6706 */
6707 ndi_devi_enter(scsi_vhci_dip, &circ);
6708 ndi_devi_enter(parent, &circ1);
6709 rval = mptsas_config_target(parent, ptgt);
6710 /*
6711 * release nexus for bus configure
6712 */
6713 ndi_devi_exit(parent, circ1);
6714 ndi_devi_exit(scsi_vhci_dip, circ);
6715
6716 /*
6717 * If this is a SATA device, make sure that the
6718 * bridge-port (the SAS WWN that the SATA device is
6719 * plugged into) is updated. This may change if a SATA
6720 * device changes which bay, and therefore phy, it is
6721 * plugged into.
6722 */
6723 if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
6724 if (!mptsas_update_sata_bridge(mpt, parent,
6725 ptgt)) {
6726 mutex_enter(&mpt->m_mutex);
6727 return;
6728 }
6729 }
6730
6731 /*
6732 * Add parent's props for SMHBA support
6733 */
6734 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6735 bzero(attached_wwnstr,
6736 sizeof (attached_wwnstr));
6737 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6738 ptgt->m_addr.mta_wwn);
6739 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6740 parent,
6741 SCSI_ADDR_PROP_ATTACHED_PORT,
6742 attached_wwnstr)
6743 != DDI_PROP_SUCCESS) {
6744 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6745 parent,
6746 SCSI_ADDR_PROP_ATTACHED_PORT);
6747 mptsas_log(mpt, CE_WARN, "Failed to"
6748 "attached-port props");
6749 mutex_enter(&mpt->m_mutex);
6750 return;
6751 }
6752 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6753 MPTSAS_NUM_PHYS, 1) !=
6754 DDI_PROP_SUCCESS) {
6755 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6756 parent, MPTSAS_NUM_PHYS);
6757 mptsas_log(mpt, CE_WARN, "Failed to"
6758 " create num-phys props");
6759 mutex_enter(&mpt->m_mutex);
6760 return;
6761 }
6762
6763 /*
6764 * Update PHY info for smhba
6765 */
6766 mutex_enter(&mpt->m_mutex);
6767 if (mptsas_smhba_phy_init(mpt)) {
6768 mptsas_log(mpt, CE_WARN, "mptsas phy"
6769 " update failed");
6770 return;
6771 }
6772 mutex_exit(&mpt->m_mutex);
6773
6774 /*
6775 * topo_node->un.physport is really the PHY#
6776 * for direct attached devices
6777 */
6778 mptsas_smhba_set_one_phy_props(mpt, parent,
6779 topo_node->un.physport, &attached_devhdl);
6780
6781 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6782 MPTSAS_VIRTUAL_PORT, 0) !=
6783 DDI_PROP_SUCCESS) {
6784 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6785 parent, MPTSAS_VIRTUAL_PORT);
6786 mptsas_log(mpt, CE_WARN,
6787 "mptsas virtual-port"
6788 "port prop update failed");
6789 mutex_enter(&mpt->m_mutex);
6790 return;
6791 }
6792 }
6793 }
6794 mutex_enter(&mpt->m_mutex);
6795
6796 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6797 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6798 ptgt->m_addr.mta_phymask));
6799 break;
6800 }
6801 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6802 {
6803 devhdl = topo_node->devhdl;
6804 ptgt = refhash_linear_search(mpt->m_targets,
6805 mptsas_target_eval_devhdl, &devhdl);
6806 if (ptgt == NULL)
6807 break;
6808
6809 sas_wwn = ptgt->m_addr.mta_wwn;
6810 phy = ptgt->m_phynum;
6811
6812 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6813
6814 if (sas_wwn) {
6815 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6816 } else {
6817 (void) sprintf(addr, "p%x", phy);
6818 }
6819 ASSERT(ptgt->m_devhdl == devhdl);
6820
6821 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6822 (topo_node->flags ==
6823 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6824 /*
6825 * Get latest RAID info if RAID volume status changes
6826 * or Phys Disk status changes
6827 */
6828 (void) mptsas_get_raid_info(mpt);
6829 }
6830 /*
6831 * Abort all outstanding command on the device
6832 */
6833 rval = mptsas_do_scsi_reset(mpt, devhdl);
6834 if (rval) {
6835 NDBG20(("mptsas%d handle_topo_change to reset target "
6836 "before offline devhdl:%x, phymask:%x, rval:%x",
6837 mpt->m_instance, ptgt->m_devhdl,
6838 ptgt->m_addr.mta_phymask, rval));
6839 }
6840
6841 mutex_exit(&mpt->m_mutex);
6842
6843 ndi_devi_enter(scsi_vhci_dip, &circ);
6844 ndi_devi_enter(parent, &circ1);
6845 rval = mptsas_offline_target(parent, addr);
6846 ndi_devi_exit(parent, circ1);
6847 ndi_devi_exit(scsi_vhci_dip, circ);
6848 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6849 "phymask:%x, rval:%x", mpt->m_instance,
6850 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6851
6852 kmem_free(addr, SCSI_MAXNAMELEN);
6853
6854 /*
6855 * Clear parent's props for SMHBA support
6856 */
6857 flags = topo_node->flags;
6858 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6859 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6860 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6861 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6862 DDI_PROP_SUCCESS) {
6863 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6864 SCSI_ADDR_PROP_ATTACHED_PORT);
6865 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6866 "prop update failed");
6867 mutex_enter(&mpt->m_mutex);
6868 break;
6869 }
6870 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6871 MPTSAS_NUM_PHYS, 0) !=
6872 DDI_PROP_SUCCESS) {
6873 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6874 MPTSAS_NUM_PHYS);
6875 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6876 "prop update failed");
6877 mutex_enter(&mpt->m_mutex);
6878 break;
6879 }
6880 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6881 MPTSAS_VIRTUAL_PORT, 1) !=
6882 DDI_PROP_SUCCESS) {
6883 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6884 MPTSAS_VIRTUAL_PORT);
6885 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6886 "prop update failed");
6887 mutex_enter(&mpt->m_mutex);
6888 break;
6889 }
6890 }
6891
6892 mutex_enter(&mpt->m_mutex);
6893 if (rval == DDI_SUCCESS) {
6894 refhash_remove(mpt->m_targets, ptgt);
6895 ptgt = NULL;
6896 } else {
6897 /*
6898 * clean DR_INTRANSITION flag to allow I/O down to
6899 * PHCI driver since failover finished.
6900 * Invalidate the devhdl
6901 */
6902 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6903 ptgt->m_tgt_unconfigured = 0;
6904 mutex_enter(&mpt->m_tx_waitq_mutex);
6905 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6906 mutex_exit(&mpt->m_tx_waitq_mutex);
6907 }
6908
6909 /*
6910 * Send SAS IO Unit Control to free the dev handle
6911 */
6912 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6913 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6914 rval = mptsas_free_devhdl(mpt, devhdl);
6915
6916 NDBG20(("mptsas%d handle_topo_change to remove "
6917 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6918 rval));
6919 }
6920
6921 break;
6922 }
6923 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6924 {
6925 devhdl = topo_node->devhdl;
6926 /*
6927 * If this is the remove handle event, do a reset first.
6928 */
6929 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6930 rval = mptsas_do_scsi_reset(mpt, devhdl);
6931 if (rval) {
6932 NDBG20(("mpt%d reset target before remove "
6933 "devhdl:%x, rval:%x", mpt->m_instance,
6934 devhdl, rval));
6935 }
6936 }
6937
6938 /*
6939 * Send SAS IO Unit Control to free the dev handle
6940 */
6941 rval = mptsas_free_devhdl(mpt, devhdl);
6942 NDBG20(("mptsas%d handle_topo_change to remove "
6943 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6944 rval));
6945 break;
6946 }
6947 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6948 {
6949 mptsas_smp_t smp;
6950 dev_info_t *smpdip;
6951
6952 devhdl = topo_node->devhdl;
6953
6954 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6955 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6956 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6957 if (rval != DDI_SUCCESS) {
6958 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6959 "handle %x", devhdl);
6960 return;
6961 }
6962
6963 psmp = mptsas_smp_alloc(mpt, &smp);
6964 if (psmp == NULL) {
6965 return;
6966 }
6967
6968 mutex_exit(&mpt->m_mutex);
6969 ndi_devi_enter(parent, &circ1);
6970 (void) mptsas_online_smp(parent, psmp, &smpdip);
6971 ndi_devi_exit(parent, circ1);
6972
6973 mutex_enter(&mpt->m_mutex);
6974 break;
6975 }
6976 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6977 {
6978 devhdl = topo_node->devhdl;
6979 uint32_t dev_info;
6980
6981 psmp = refhash_linear_search(mpt->m_smp_targets,
6982 mptsas_smp_eval_devhdl, &devhdl);
6983 if (psmp == NULL)
6984 break;
6985 /*
6986 * The mptsas_smp_t data is released only if the dip is offlined
6987 * successfully.
6988 */
6989 mutex_exit(&mpt->m_mutex);
6990
6991 ndi_devi_enter(parent, &circ1);
6992 rval = mptsas_offline_smp(parent, psmp);
6993 ndi_devi_exit(parent, circ1);
6994
6995 dev_info = psmp->m_deviceinfo;
6996 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6997 DEVINFO_DIRECT_ATTACHED) {
6998 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6999 MPTSAS_VIRTUAL_PORT, 1) !=
7000 DDI_PROP_SUCCESS) {
7001 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7002 MPTSAS_VIRTUAL_PORT);
7003 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
7004 "prop update failed");
7005 mutex_enter(&mpt->m_mutex);
7006 return;
7007 }
7008 /*
7009 * Check whether the smp connected to the iport,
7010 */
7011 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
7012 MPTSAS_NUM_PHYS, 0) !=
7013 DDI_PROP_SUCCESS) {
7014 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7015 MPTSAS_NUM_PHYS);
7016 mptsas_log(mpt, CE_WARN, "mptsas num phys"
7017 "prop update failed");
7018 mutex_enter(&mpt->m_mutex);
7019 return;
7020 }
7021 /*
7022 * Clear parent's attached-port props
7023 */
7024 bzero(attached_wwnstr, sizeof (attached_wwnstr));
7025 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
7026 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
7027 DDI_PROP_SUCCESS) {
7028 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7029 SCSI_ADDR_PROP_ATTACHED_PORT);
7030 mptsas_log(mpt, CE_WARN, "mptsas attached port "
7031 "prop update failed");
7032 mutex_enter(&mpt->m_mutex);
7033 return;
7034 }
7035 }
7036
7037 mutex_enter(&mpt->m_mutex);
7038 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
7039 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
7040 if (rval == DDI_SUCCESS) {
7041 refhash_remove(mpt->m_smp_targets, psmp);
7042 } else {
7043 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
7044 }
7045
7046 bzero(attached_wwnstr, sizeof (attached_wwnstr));
7047
7048 break;
7049 }
7050 default:
7051 return;
7052 }
7053 }
7054
7055 /*
7056 * Record the event if its type is enabled in mpt instance by ioctl.
7057 */
7058 static void
7059 mptsas_record_event(void *args)
7060 {
7061 m_replyh_arg_t *replyh_arg;
7062 pMpi2EventNotificationReply_t eventreply;
7063 uint32_t event, rfm;
7064 mptsas_t *mpt;
7065 int i, j;
7066 uint16_t event_data_len;
7067 boolean_t sendAEN = FALSE;
7068
7069 replyh_arg = (m_replyh_arg_t *)args;
7070 rfm = replyh_arg->rfm;
7071 mpt = replyh_arg->mpt;
7072
7073 eventreply = (pMpi2EventNotificationReply_t)
7074 (mpt->m_reply_frame + (rfm -
7075 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7076 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7077
7078
7079 /*
7080 * Generate a system event to let anyone who cares know that a
7081 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
7082 * event mask is set to.
7083 */
7084 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
7085 sendAEN = TRUE;
7086 }
7087
7088 /*
7089 * Record the event only if it is not masked. Determine which dword
7090 * and bit of event mask to test.
7091 */
7092 i = (uint8_t)(event / 32);
7093 j = (uint8_t)(event % 32);
7094 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
7095 i = mpt->m_event_index;
7096 mpt->m_events[i].Type = event;
7097 mpt->m_events[i].Number = ++mpt->m_event_number;
7098 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
7099 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
7100 &eventreply->EventDataLength);
7101
7102 if (event_data_len > 0) {
7103 /*
7104 * Limit data to size in m_event entry
7105 */
7106 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
7107 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
7108 }
7109 for (j = 0; j < event_data_len; j++) {
7110 mpt->m_events[i].Data[j] =
7111 ddi_get32(mpt->m_acc_reply_frame_hdl,
7112 &(eventreply->EventData[j]));
7113 }
7114
7115 /*
7116 * check for index wrap-around
7117 */
7118 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
7119 i = 0;
7120 }
7121 mpt->m_event_index = (uint8_t)i;
7122
7123 /*
7124 * Set flag to send the event.
7125 */
7126 sendAEN = TRUE;
7127 }
7128 }
7129
7130 /*
7131 * Generate a system event if flag is set to let anyone who cares know
7132 * that an event has occurred.
7133 */
7134 if (sendAEN) {
7135 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
7136 "SAS", NULL, NULL, DDI_NOSLEEP);
7137 }
7138 }
7139
7140 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
7141 /*
7142 * handle sync events from ioc in interrupt
7143 * return value:
7144 * DDI_SUCCESS: The event is handled by this func
7145 * DDI_FAILURE: Event is not handled
7146 */
7147 static int
7148 mptsas_handle_event_sync(void *args)
7149 {
7150 m_replyh_arg_t *replyh_arg;
7151 pMpi2EventNotificationReply_t eventreply;
7152 uint32_t event, rfm;
7153 mptsas_t *mpt;
7154 uint_t iocstatus;
7155
7156 replyh_arg = (m_replyh_arg_t *)args;
7157 rfm = replyh_arg->rfm;
7158 mpt = replyh_arg->mpt;
7159
7160 ASSERT(mutex_owned(&mpt->m_mutex));
7161
7162 eventreply = (pMpi2EventNotificationReply_t)
7163 (mpt->m_reply_frame + (rfm -
7164 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7165 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7166
7167 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7168 &eventreply->IOCStatus)) != 0) {
7169 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7170 mptsas_log(mpt, CE_WARN,
7171 "mptsas_handle_event_sync: event 0x%x, "
7172 "IOCStatus=0x%x, "
7173 "IOCLogInfo=0x%x", event, iocstatus,
7174 ddi_get32(mpt->m_acc_reply_frame_hdl,
7175 &eventreply->IOCLogInfo));
7176 } else {
7177 mptsas_log(mpt, CE_WARN,
7178 "mptsas_handle_event_sync: event 0x%x, "
7179 "IOCStatus=0x%x, "
7180 "(IOCLogInfo=0x%x)", event, iocstatus,
7181 ddi_get32(mpt->m_acc_reply_frame_hdl,
7182 &eventreply->IOCLogInfo));
7183 }
7184 }
7185
7186 /*
7187 * figure out what kind of event we got and handle accordingly
7188 */
7189 switch (event) {
7190 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7191 {
7192 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
7193 uint8_t num_entries, expstatus, phy;
7194 uint8_t phystatus, physport, state, i;
7195 uint8_t start_phy_num, link_rate;
7196 uint16_t dev_handle, reason_code;
7197 uint16_t enc_handle, expd_handle;
7198 char string[80], curr[80], prev[80];
7199 mptsas_topo_change_list_t *topo_head = NULL;
7200 mptsas_topo_change_list_t *topo_tail = NULL;
7201 mptsas_topo_change_list_t *topo_node = NULL;
7202 mptsas_target_t *ptgt;
7203 mptsas_smp_t *psmp;
7204 uint8_t flags = 0, exp_flag;
7205 smhba_info_t *pSmhba = NULL;
7206
7207 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
7208
7209 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
7210 eventreply->EventData;
7211
7212 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7213 &sas_topo_change_list->EnclosureHandle);
7214 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7215 &sas_topo_change_list->ExpanderDevHandle);
7216 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7217 &sas_topo_change_list->NumEntries);
7218 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7219 &sas_topo_change_list->StartPhyNum);
7220 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7221 &sas_topo_change_list->ExpStatus);
7222 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
7223 &sas_topo_change_list->PhysicalPort);
7224
7225 string[0] = 0;
7226 if (expd_handle) {
7227 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
7228 switch (expstatus) {
7229 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7230 (void) sprintf(string, " added");
7231 /*
7232 * New expander device added
7233 */
7234 mpt->m_port_chng = 1;
7235 topo_node = kmem_zalloc(
7236 sizeof (mptsas_topo_change_list_t),
7237 KM_SLEEP);
7238 topo_node->mpt = mpt;
7239 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
7240 topo_node->un.physport = physport;
7241 topo_node->devhdl = expd_handle;
7242 topo_node->flags = flags;
7243 topo_node->object = NULL;
7244 if (topo_head == NULL) {
7245 topo_head = topo_tail = topo_node;
7246 } else {
7247 topo_tail->next = topo_node;
7248 topo_tail = topo_node;
7249 }
7250 break;
7251 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7252 (void) sprintf(string, " not responding, "
7253 "removed");
7254 psmp = refhash_linear_search(mpt->m_smp_targets,
7255 mptsas_smp_eval_devhdl, &expd_handle);
7256 if (psmp == NULL)
7257 break;
7258
7259 topo_node = kmem_zalloc(
7260 sizeof (mptsas_topo_change_list_t),
7261 KM_SLEEP);
7262 topo_node->mpt = mpt;
7263 topo_node->un.phymask =
7264 psmp->m_addr.mta_phymask;
7265 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
7266 topo_node->devhdl = expd_handle;
7267 topo_node->flags = flags;
7268 topo_node->object = NULL;
7269 if (topo_head == NULL) {
7270 topo_head = topo_tail = topo_node;
7271 } else {
7272 topo_tail->next = topo_node;
7273 topo_tail = topo_node;
7274 }
7275 break;
7276 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7277 break;
7278 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7279 (void) sprintf(string, " not responding, "
7280 "delaying removal");
7281 break;
7282 default:
7283 break;
7284 }
7285 } else {
7286 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
7287 }
7288
7289 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
7290 enc_handle, expd_handle, string));
7291 for (i = 0; i < num_entries; i++) {
7292 phy = i + start_phy_num;
7293 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7294 &sas_topo_change_list->PHY[i].PhyStatus);
7295 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7296 &sas_topo_change_list->PHY[i].AttachedDevHandle);
7297 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
7298 /*
7299 * Filter out processing of Phy Vacant Status unless
7300 * the reason code is "Not Responding". Process all
7301 * other combinations of Phy Status and Reason Codes.
7302 */
7303 if ((phystatus &
7304 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
7305 (reason_code !=
7306 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
7307 continue;
7308 }
7309 curr[0] = 0;
7310 prev[0] = 0;
7311 string[0] = 0;
7312 switch (reason_code) {
7313 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7314 {
7315 NDBG20(("mptsas%d phy %d physical_port %d "
7316 "dev_handle %d added", mpt->m_instance, phy,
7317 physport, dev_handle));
7318 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7319 &sas_topo_change_list->PHY[i].LinkRate);
7320 state = (link_rate &
7321 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7322 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7323 switch (state) {
7324 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7325 (void) sprintf(curr, "is disabled");
7326 break;
7327 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7328 (void) sprintf(curr, "is offline, "
7329 "failed speed negotiation");
7330 break;
7331 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7332 (void) sprintf(curr, "SATA OOB "
7333 "complete");
7334 break;
7335 case SMP_RESET_IN_PROGRESS:
7336 (void) sprintf(curr, "SMP reset in "
7337 "progress");
7338 break;
7339 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7340 (void) sprintf(curr, "is online at "
7341 "1.5 Gbps");
7342 break;
7343 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7344 (void) sprintf(curr, "is online at 3.0 "
7345 "Gbps");
7346 break;
7347 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7348 (void) sprintf(curr, "is online at 6.0 "
7349 "Gbps");
7350 break;
7351 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7352 (void) sprintf(curr,
7353 "is online at 12.0 Gbps");
7354 break;
7355 default:
7356 (void) sprintf(curr, "state is "
7357 "unknown");
7358 break;
7359 }
7360 /*
7361 * New target device added into the system.
7362 * Set association flag according to if an
7363 * expander is used or not.
7364 */
7365 exp_flag =
7366 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7367 if (flags ==
7368 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7369 flags = exp_flag;
7370 }
7371 topo_node = kmem_zalloc(
7372 sizeof (mptsas_topo_change_list_t),
7373 KM_SLEEP);
7374 topo_node->mpt = mpt;
7375 topo_node->event =
7376 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7377 if (expd_handle == 0) {
7378 /*
7379 * Per MPI 2, if expander dev handle
7380 * is 0, it's a directly attached
7381 * device. So driver use PHY to decide
7382 * which iport is associated
7383 */
7384 physport = phy;
7385 mpt->m_port_chng = 1;
7386 }
7387 topo_node->un.physport = physport;
7388 topo_node->devhdl = dev_handle;
7389 topo_node->flags = flags;
7390 topo_node->object = NULL;
7391 if (topo_head == NULL) {
7392 topo_head = topo_tail = topo_node;
7393 } else {
7394 topo_tail->next = topo_node;
7395 topo_tail = topo_node;
7396 }
7397 break;
7398 }
7399 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7400 {
7401 NDBG20(("mptsas%d phy %d physical_port %d "
7402 "dev_handle %d removed", mpt->m_instance,
7403 phy, physport, dev_handle));
7404 /*
7405 * Set association flag according to if an
7406 * expander is used or not.
7407 */
7408 exp_flag =
7409 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7410 if (flags ==
7411 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7412 flags = exp_flag;
7413 }
7414 /*
7415 * Target device is removed from the system
7416 * Before the device is really offline from
7417 * from system.
7418 */
7419 ptgt = refhash_linear_search(mpt->m_targets,
7420 mptsas_target_eval_devhdl, &dev_handle);
7421 /*
7422 * If ptgt is NULL here, it means that the
7423 * DevHandle is not in the hash table. This is
7424 * reasonable sometimes. For example, if a
7425 * disk was pulled, then added, then pulled
7426 * again, the disk will not have been put into
7427 * the hash table because the add event will
7428 * have an invalid phymask. BUT, this does not
7429 * mean that the DevHandle is invalid. The
7430 * controller will still have a valid DevHandle
7431 * that must be removed. To do this, use the
7432 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7433 */
7434 if (ptgt == NULL) {
7435 topo_node = kmem_zalloc(
7436 sizeof (mptsas_topo_change_list_t),
7437 KM_SLEEP);
7438 topo_node->mpt = mpt;
7439 topo_node->un.phymask = 0;
7440 topo_node->event =
7441 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7442 topo_node->devhdl = dev_handle;
7443 topo_node->flags = flags;
7444 topo_node->object = NULL;
7445 if (topo_head == NULL) {
7446 topo_head = topo_tail =
7447 topo_node;
7448 } else {
7449 topo_tail->next = topo_node;
7450 topo_tail = topo_node;
7451 }
7452 break;
7453 }
7454
7455 /*
7456 * Update DR flag immediately avoid I/O failure
7457 * before failover finish. Pay attention to the
7458 * mutex protect, we need grab m_tx_waitq_mutex
7459 * during set m_dr_flag because we won't add
7460 * the following command into waitq, instead,
7461 * we need return TRAN_BUSY in the tran_start
7462 * context.
7463 */
7464 mutex_enter(&mpt->m_tx_waitq_mutex);
7465 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7466 mutex_exit(&mpt->m_tx_waitq_mutex);
7467
7468 topo_node = kmem_zalloc(
7469 sizeof (mptsas_topo_change_list_t),
7470 KM_SLEEP);
7471 topo_node->mpt = mpt;
7472 topo_node->un.phymask =
7473 ptgt->m_addr.mta_phymask;
7474 topo_node->event =
7475 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7476 topo_node->devhdl = dev_handle;
7477 topo_node->flags = flags;
7478 topo_node->object = NULL;
7479 if (topo_head == NULL) {
7480 topo_head = topo_tail = topo_node;
7481 } else {
7482 topo_tail->next = topo_node;
7483 topo_tail = topo_node;
7484 }
7485 break;
7486 }
7487 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7488 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7489 &sas_topo_change_list->PHY[i].LinkRate);
7490 state = (link_rate &
7491 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7492 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7493 pSmhba = &mpt->m_phy_info[i].smhba_info;
7494 pSmhba->negotiated_link_rate = state;
7495 switch (state) {
7496 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7497 (void) sprintf(curr, "is disabled");
7498 mptsas_smhba_log_sysevent(mpt,
7499 ESC_SAS_PHY_EVENT,
7500 SAS_PHY_REMOVE,
7501 &mpt->m_phy_info[i].smhba_info);
7502 mpt->m_phy_info[i].smhba_info.
7503 negotiated_link_rate
7504 = 0x1;
7505 break;
7506 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7507 (void) sprintf(curr, "is offline, "
7508 "failed speed negotiation");
7509 mptsas_smhba_log_sysevent(mpt,
7510 ESC_SAS_PHY_EVENT,
7511 SAS_PHY_OFFLINE,
7512 &mpt->m_phy_info[i].smhba_info);
7513 break;
7514 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7515 (void) sprintf(curr, "SATA OOB "
7516 "complete");
7517 break;
7518 case SMP_RESET_IN_PROGRESS:
7519 (void) sprintf(curr, "SMP reset in "
7520 "progress");
7521 break;
7522 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7523 (void) sprintf(curr, "is online at "
7524 "1.5 Gbps");
7525 if ((expd_handle == 0) &&
7526 (enc_handle == 1)) {
7527 mpt->m_port_chng = 1;
7528 }
7529 mptsas_smhba_log_sysevent(mpt,
7530 ESC_SAS_PHY_EVENT,
7531 SAS_PHY_ONLINE,
7532 &mpt->m_phy_info[i].smhba_info);
7533 break;
7534 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7535 (void) sprintf(curr, "is online at 3.0 "
7536 "Gbps");
7537 if ((expd_handle == 0) &&
7538 (enc_handle == 1)) {
7539 mpt->m_port_chng = 1;
7540 }
7541 mptsas_smhba_log_sysevent(mpt,
7542 ESC_SAS_PHY_EVENT,
7543 SAS_PHY_ONLINE,
7544 &mpt->m_phy_info[i].smhba_info);
7545 break;
7546 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7547 (void) sprintf(curr, "is online at "
7548 "6.0 Gbps");
7549 if ((expd_handle == 0) &&
7550 (enc_handle == 1)) {
7551 mpt->m_port_chng = 1;
7552 }
7553 mptsas_smhba_log_sysevent(mpt,
7554 ESC_SAS_PHY_EVENT,
7555 SAS_PHY_ONLINE,
7556 &mpt->m_phy_info[i].smhba_info);
7557 break;
7558 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7559 (void) sprintf(curr, "is online at "
7560 "12.0 Gbps");
7561 if ((expd_handle == 0) &&
7562 (enc_handle == 1)) {
7563 mpt->m_port_chng = 1;
7564 }
7565 mptsas_smhba_log_sysevent(mpt,
7566 ESC_SAS_PHY_EVENT,
7567 SAS_PHY_ONLINE,
7568 &mpt->m_phy_info[i].smhba_info);
7569 break;
7570 default:
7571 (void) sprintf(curr, "state is "
7572 "unknown");
7573 break;
7574 }
7575
7576 state = (link_rate &
7577 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7578 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7579 switch (state) {
7580 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7581 (void) sprintf(prev, ", was disabled");
7582 break;
7583 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7584 (void) sprintf(prev, ", was offline, "
7585 "failed speed negotiation");
7586 break;
7587 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7588 (void) sprintf(prev, ", was SATA OOB "
7589 "complete");
7590 break;
7591 case SMP_RESET_IN_PROGRESS:
7592 (void) sprintf(prev, ", was SMP reset "
7593 "in progress");
7594 break;
7595 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7596 (void) sprintf(prev, ", was online at "
7597 "1.5 Gbps");
7598 break;
7599 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7600 (void) sprintf(prev, ", was online at "
7601 "3.0 Gbps");
7602 break;
7603 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7604 (void) sprintf(prev, ", was online at "
7605 "6.0 Gbps");
7606 break;
7607 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7608 (void) sprintf(prev, ", was online at "
7609 "12.0 Gbps");
7610 break;
7611 default:
7612 break;
7613 }
7614 (void) sprintf(&string[strlen(string)], "link "
7615 "changed, ");
7616 break;
7617 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7618 continue;
7619 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7620 (void) sprintf(&string[strlen(string)],
7621 "target not responding, delaying "
7622 "removal");
7623 break;
7624 }
7625 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7626 mpt->m_instance, phy, dev_handle, string, curr,
7627 prev));
7628 }
7629 if (topo_head != NULL) {
7630 /*
7631 * Launch DR taskq to handle topology change
7632 */
7633 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7634 mptsas_handle_dr, (void *)topo_head,
7635 DDI_NOSLEEP)) != DDI_SUCCESS) {
7636 while (topo_head != NULL) {
7637 topo_node = topo_head;
7638 topo_head = topo_head->next;
7639 kmem_free(topo_node,
7640 sizeof (mptsas_topo_change_list_t));
7641 }
7642 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7643 "for handle SAS DR event failed");
7644 }
7645 }
7646 break;
7647 }
7648 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7649 {
7650 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7651 mptsas_topo_change_list_t *topo_head = NULL;
7652 mptsas_topo_change_list_t *topo_tail = NULL;
7653 mptsas_topo_change_list_t *topo_node = NULL;
7654 mptsas_target_t *ptgt;
7655 uint8_t num_entries, i, reason;
7656 uint16_t volhandle, diskhandle;
7657
7658 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7659 eventreply->EventData;
7660 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7661 &irChangeList->NumElements);
7662
7663 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7664 mpt->m_instance));
7665
7666 for (i = 0; i < num_entries; i++) {
7667 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7668 &irChangeList->ConfigElement[i].ReasonCode);
7669 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7670 &irChangeList->ConfigElement[i].VolDevHandle);
7671 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7672 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7673
7674 switch (reason) {
7675 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7676 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7677 {
7678 NDBG20(("mptsas %d volume added\n",
7679 mpt->m_instance));
7680
7681 topo_node = kmem_zalloc(
7682 sizeof (mptsas_topo_change_list_t),
7683 KM_SLEEP);
7684
7685 topo_node->mpt = mpt;
7686 topo_node->event =
7687 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7688 topo_node->un.physport = 0xff;
7689 topo_node->devhdl = volhandle;
7690 topo_node->flags =
7691 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7692 topo_node->object = NULL;
7693 if (topo_head == NULL) {
7694 topo_head = topo_tail = topo_node;
7695 } else {
7696 topo_tail->next = topo_node;
7697 topo_tail = topo_node;
7698 }
7699 break;
7700 }
7701 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7702 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7703 {
7704 NDBG20(("mptsas %d volume deleted\n",
7705 mpt->m_instance));
7706 ptgt = refhash_linear_search(mpt->m_targets,
7707 mptsas_target_eval_devhdl, &volhandle);
7708 if (ptgt == NULL)
7709 break;
7710
7711 /*
7712 * Clear any flags related to volume
7713 */
7714 (void) mptsas_delete_volume(mpt, volhandle);
7715
7716 /*
7717 * Update DR flag immediately avoid I/O failure
7718 */
7719 mutex_enter(&mpt->m_tx_waitq_mutex);
7720 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7721 mutex_exit(&mpt->m_tx_waitq_mutex);
7722
7723 topo_node = kmem_zalloc(
7724 sizeof (mptsas_topo_change_list_t),
7725 KM_SLEEP);
7726 topo_node->mpt = mpt;
7727 topo_node->un.phymask =
7728 ptgt->m_addr.mta_phymask;
7729 topo_node->event =
7730 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7731 topo_node->devhdl = volhandle;
7732 topo_node->flags =
7733 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7734 topo_node->object = (void *)ptgt;
7735 if (topo_head == NULL) {
7736 topo_head = topo_tail = topo_node;
7737 } else {
7738 topo_tail->next = topo_node;
7739 topo_tail = topo_node;
7740 }
7741 break;
7742 }
7743 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7744 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7745 {
7746 ptgt = refhash_linear_search(mpt->m_targets,
7747 mptsas_target_eval_devhdl, &diskhandle);
7748 if (ptgt == NULL)
7749 break;
7750
7751 /*
7752 * Update DR flag immediately avoid I/O failure
7753 */
7754 mutex_enter(&mpt->m_tx_waitq_mutex);
7755 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7756 mutex_exit(&mpt->m_tx_waitq_mutex);
7757
7758 topo_node = kmem_zalloc(
7759 sizeof (mptsas_topo_change_list_t),
7760 KM_SLEEP);
7761 topo_node->mpt = mpt;
7762 topo_node->un.phymask =
7763 ptgt->m_addr.mta_phymask;
7764 topo_node->event =
7765 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7766 topo_node->devhdl = diskhandle;
7767 topo_node->flags =
7768 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7769 topo_node->object = (void *)ptgt;
7770 if (topo_head == NULL) {
7771 topo_head = topo_tail = topo_node;
7772 } else {
7773 topo_tail->next = topo_node;
7774 topo_tail = topo_node;
7775 }
7776 break;
7777 }
7778 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7779 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7780 {
7781 /*
7782 * The physical drive is released by a IR
7783 * volume. But we cannot get the the physport
7784 * or phynum from the event data, so we only
7785 * can get the physport/phynum after SAS
7786 * Device Page0 request for the devhdl.
7787 */
7788 topo_node = kmem_zalloc(
7789 sizeof (mptsas_topo_change_list_t),
7790 KM_SLEEP);
7791 topo_node->mpt = mpt;
7792 topo_node->un.phymask = 0;
7793 topo_node->event =
7794 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7795 topo_node->devhdl = diskhandle;
7796 topo_node->flags =
7797 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7798 topo_node->object = NULL;
7799 mpt->m_port_chng = 1;
7800 if (topo_head == NULL) {
7801 topo_head = topo_tail = topo_node;
7802 } else {
7803 topo_tail->next = topo_node;
7804 topo_tail = topo_node;
7805 }
7806 break;
7807 }
7808 default:
7809 break;
7810 }
7811 }
7812
7813 if (topo_head != NULL) {
7814 /*
7815 * Launch DR taskq to handle topology change
7816 */
7817 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7818 mptsas_handle_dr, (void *)topo_head,
7819 DDI_NOSLEEP)) != DDI_SUCCESS) {
7820 while (topo_head != NULL) {
7821 topo_node = topo_head;
7822 topo_head = topo_head->next;
7823 kmem_free(topo_node,
7824 sizeof (mptsas_topo_change_list_t));
7825 }
7826 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7827 "for handle SAS DR event failed");
7828 }
7829 }
7830 break;
7831 }
7832 default:
7833 return (DDI_FAILURE);
7834 }
7835
7836 return (DDI_SUCCESS);
7837 }
7838
7839 /*
7840 * handle events from ioc
7841 */
7842 static void
7843 mptsas_handle_event(void *args)
7844 {
7845 m_replyh_arg_t *replyh_arg;
7846 pMpi2EventNotificationReply_t eventreply;
7847 uint32_t event, iocloginfo, rfm;
7848 uint32_t status;
7849 uint8_t port;
7850 mptsas_t *mpt;
7851 uint_t iocstatus;
7852
7853 replyh_arg = (m_replyh_arg_t *)args;
7854 rfm = replyh_arg->rfm;
7855 mpt = replyh_arg->mpt;
7856
7857 mutex_enter(&mpt->m_mutex);
7858 /*
7859 * If HBA is being reset, drop incoming event.
7860 */
7861 if (mpt->m_in_reset) {
7862 NDBG20(("dropping event received prior to reset"));
7863 mutex_exit(&mpt->m_mutex);
7864 return;
7865 }
7866
7867 eventreply = (pMpi2EventNotificationReply_t)
7868 (mpt->m_reply_frame + (rfm -
7869 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7870 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7871
7872 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7873 &eventreply->IOCStatus)) != 0) {
7874 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7875 mptsas_log(mpt, CE_WARN,
7876 "mptsas_handle_event: IOCStatus=0x%x, "
7877 "IOCLogInfo=0x%x", iocstatus,
7878 ddi_get32(mpt->m_acc_reply_frame_hdl,
7879 &eventreply->IOCLogInfo));
7880 } else {
7881 mptsas_log(mpt, CE_WARN,
7882 "mptsas_handle_event: IOCStatus=0x%x, "
7883 "IOCLogInfo=0x%x", iocstatus,
7884 ddi_get32(mpt->m_acc_reply_frame_hdl,
7885 &eventreply->IOCLogInfo));
7886 }
7887 }
7888
7889 /*
7890 * figure out what kind of event we got and handle accordingly
7891 */
7892 switch (event) {
7893 case MPI2_EVENT_LOG_ENTRY_ADDED:
7894 break;
7895 case MPI2_EVENT_LOG_DATA:
7896 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7897 &eventreply->IOCLogInfo);
7898 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7899 iocloginfo));
7900 break;
7901 case MPI2_EVENT_STATE_CHANGE:
7902 NDBG20(("mptsas%d state change.", mpt->m_instance));
7903 break;
7904 case MPI2_EVENT_HARD_RESET_RECEIVED:
7905 NDBG20(("mptsas%d event change.", mpt->m_instance));
7906 break;
7907 case MPI2_EVENT_SAS_DISCOVERY:
7908 {
7909 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7910 char string[80];
7911 uint8_t rc;
7912
7913 sasdiscovery =
7914 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7915
7916 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7917 &sasdiscovery->ReasonCode);
7918 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7919 &sasdiscovery->PhysicalPort);
7920 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7921 &sasdiscovery->DiscoveryStatus);
7922
7923 string[0] = 0;
7924 switch (rc) {
7925 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7926 (void) sprintf(string, "STARTING");
7927 break;
7928 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7929 (void) sprintf(string, "COMPLETED");
7930 break;
7931 default:
7932 (void) sprintf(string, "UNKNOWN");
7933 break;
7934 }
7935
7936 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7937 port, status));
7938
7939 break;
7940 }
7941 case MPI2_EVENT_EVENT_CHANGE:
7942 NDBG20(("mptsas%d event change.", mpt->m_instance));
7943 break;
7944 case MPI2_EVENT_TASK_SET_FULL:
7945 {
7946 pMpi2EventDataTaskSetFull_t taskfull;
7947
7948 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7949
7950 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7951 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7952 &taskfull->CurrentDepth)));
7953 break;
7954 }
7955 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7956 {
7957 /*
7958 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7959 * in mptsas_handle_event_sync() of interrupt context
7960 */
7961 break;
7962 }
7963 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7964 {
7965 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7966 uint8_t rc;
7967 uint16_t enchdl;
7968 char string[80];
7969 mptsas_enclosure_t *mep;
7970
7971 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7972 eventreply->EventData;
7973
7974 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7975 &encstatus->ReasonCode);
7976 enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7977 &encstatus->EnclosureHandle);
7978
7979 switch (rc) {
7980 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7981 (void) sprintf(string, "added");
7982 break;
7983 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7984 mep = mptsas_enc_lookup(mpt, enchdl);
7985 if (mep != NULL) {
7986 list_remove(&mpt->m_enclosures, mep);
7987 mptsas_enc_free(mep);
7988 mep = NULL;
7989 }
7990 (void) sprintf(string, ", not responding");
7991 break;
7992 default:
7993 break;
7994 }
7995 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7996 "%x%s\n", mpt->m_instance,
7997 ddi_get16(mpt->m_acc_reply_frame_hdl,
7998 &encstatus->EnclosureHandle), string));
7999
8000 /*
8001 * No matter what has happened, update all of our device state
8002 * for enclosures, by retriggering an evaluation.
8003 */
8004 mpt->m_done_traverse_enc = 0;
8005 mptsas_update_hashtab(mpt);
8006 break;
8007 }
8008
8009 /*
8010 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
8011 * mptsas_handle_event_sync,in here just send ack message.
8012 */
8013 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
8014 {
8015 pMpi2EventDataSasDeviceStatusChange_t statuschange;
8016 uint8_t rc;
8017 uint16_t devhdl;
8018 uint64_t wwn = 0;
8019 uint32_t wwn_lo, wwn_hi;
8020
8021 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
8022 eventreply->EventData;
8023 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8024 &statuschange->ReasonCode);
8025 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
8026 (uint32_t *)(void *)&statuschange->SASAddress);
8027 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
8028 (uint32_t *)(void *)&statuschange->SASAddress + 1);
8029 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
8030 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
8031 &statuschange->DevHandle);
8032
8033 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
8034 wwn));
8035
8036 switch (rc) {
8037 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
8038 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
8039 ddi_get8(mpt->m_acc_reply_frame_hdl,
8040 &statuschange->ASC),
8041 ddi_get8(mpt->m_acc_reply_frame_hdl,
8042 &statuschange->ASCQ)));
8043 break;
8044
8045 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
8046 NDBG20(("Device not supported"));
8047 break;
8048
8049 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
8050 NDBG20(("IOC internally generated the Target Reset "
8051 "for devhdl:%x", devhdl));
8052 break;
8053
8054 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8055 NDBG20(("IOC's internally generated Target Reset "
8056 "completed for devhdl:%x", devhdl));
8057 break;
8058
8059 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
8060 NDBG20(("IOC internally generated Abort Task"));
8061 break;
8062
8063 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8064 NDBG20(("IOC's internally generated Abort Task "
8065 "completed"));
8066 break;
8067
8068 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8069 NDBG20(("IOC internally generated Abort Task Set"));
8070 break;
8071
8072 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8073 NDBG20(("IOC internally generated Clear Task Set"));
8074 break;
8075
8076 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
8077 NDBG20(("IOC internally generated Query Task"));
8078 break;
8079
8080 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
8081 NDBG20(("Device sent an Asynchronous Notification"));
8082 break;
8083
8084 default:
8085 break;
8086 }
8087 break;
8088 }
8089 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
8090 {
8091 /*
8092 * IR TOPOLOGY CHANGE LIST Event has already been handled
8093 * in mpt_handle_event_sync() of interrupt context
8094 */
8095 break;
8096 }
8097 case MPI2_EVENT_IR_OPERATION_STATUS:
8098 {
8099 Mpi2EventDataIrOperationStatus_t *irOpStatus;
8100 char reason_str[80];
8101 uint8_t rc, percent;
8102 uint16_t handle;
8103
8104 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
8105 eventreply->EventData;
8106 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8107 &irOpStatus->RAIDOperation);
8108 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
8109 &irOpStatus->PercentComplete);
8110 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8111 &irOpStatus->VolDevHandle);
8112
8113 switch (rc) {
8114 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8115 (void) sprintf(reason_str, "resync");
8116 break;
8117 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8118 (void) sprintf(reason_str, "online capacity "
8119 "expansion");
8120 break;
8121 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8122 (void) sprintf(reason_str, "consistency check");
8123 break;
8124 default:
8125 (void) sprintf(reason_str, "unknown reason %x",
8126 rc);
8127 }
8128
8129 NDBG20(("mptsas%d raid operational status: (%s)"
8130 "\thandle(0x%04x), percent complete(%d)\n",
8131 mpt->m_instance, reason_str, handle, percent));
8132 break;
8133 }
8134 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
8135 {
8136 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
8137 uint8_t phy_num;
8138 uint8_t primitive;
8139
8140 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
8141 eventreply->EventData;
8142
8143 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
8144 &sas_broadcast->PhyNum);
8145 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
8146 &sas_broadcast->Primitive);
8147
8148 switch (primitive) {
8149 case MPI2_EVENT_PRIMITIVE_CHANGE:
8150 mptsas_smhba_log_sysevent(mpt,
8151 ESC_SAS_HBA_PORT_BROADCAST,
8152 SAS_PORT_BROADCAST_CHANGE,
8153 &mpt->m_phy_info[phy_num].smhba_info);
8154 break;
8155 case MPI2_EVENT_PRIMITIVE_SES:
8156 mptsas_smhba_log_sysevent(mpt,
8157 ESC_SAS_HBA_PORT_BROADCAST,
8158 SAS_PORT_BROADCAST_SES,
8159 &mpt->m_phy_info[phy_num].smhba_info);
8160 break;
8161 case MPI2_EVENT_PRIMITIVE_EXPANDER:
8162 mptsas_smhba_log_sysevent(mpt,
8163 ESC_SAS_HBA_PORT_BROADCAST,
8164 SAS_PORT_BROADCAST_D01_4,
8165 &mpt->m_phy_info[phy_num].smhba_info);
8166 break;
8167 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
8168 mptsas_smhba_log_sysevent(mpt,
8169 ESC_SAS_HBA_PORT_BROADCAST,
8170 SAS_PORT_BROADCAST_D04_7,
8171 &mpt->m_phy_info[phy_num].smhba_info);
8172 break;
8173 case MPI2_EVENT_PRIMITIVE_RESERVED3:
8174 mptsas_smhba_log_sysevent(mpt,
8175 ESC_SAS_HBA_PORT_BROADCAST,
8176 SAS_PORT_BROADCAST_D16_7,
8177 &mpt->m_phy_info[phy_num].smhba_info);
8178 break;
8179 case MPI2_EVENT_PRIMITIVE_RESERVED4:
8180 mptsas_smhba_log_sysevent(mpt,
8181 ESC_SAS_HBA_PORT_BROADCAST,
8182 SAS_PORT_BROADCAST_D29_7,
8183 &mpt->m_phy_info[phy_num].smhba_info);
8184 break;
8185 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
8186 mptsas_smhba_log_sysevent(mpt,
8187 ESC_SAS_HBA_PORT_BROADCAST,
8188 SAS_PORT_BROADCAST_D24_0,
8189 &mpt->m_phy_info[phy_num].smhba_info);
8190 break;
8191 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
8192 mptsas_smhba_log_sysevent(mpt,
8193 ESC_SAS_HBA_PORT_BROADCAST,
8194 SAS_PORT_BROADCAST_D27_4,
8195 &mpt->m_phy_info[phy_num].smhba_info);
8196 break;
8197 default:
8198 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
8199 " %x received",
8200 mpt->m_instance, primitive));
8201 break;
8202 }
8203 NDBG16(("mptsas%d sas broadcast primitive: "
8204 "\tprimitive(0x%04x), phy(%d) complete\n",
8205 mpt->m_instance, primitive, phy_num));
8206 break;
8207 }
8208 case MPI2_EVENT_IR_VOLUME:
8209 {
8210 Mpi2EventDataIrVolume_t *irVolume;
8211 uint16_t devhandle;
8212 uint32_t state;
8213 int config, vol;
8214 uint8_t found = FALSE;
8215
8216 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
8217 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8218 &irVolume->NewValue);
8219 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8220 &irVolume->VolDevHandle);
8221
8222 NDBG20(("EVENT_IR_VOLUME event is received"));
8223
8224 /*
8225 * Get latest RAID info and then find the DevHandle for this
8226 * event in the configuration. If the DevHandle is not found
8227 * just exit the event.
8228 */
8229 (void) mptsas_get_raid_info(mpt);
8230 for (config = 0; (config < mpt->m_num_raid_configs) &&
8231 (!found); config++) {
8232 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
8233 if (mpt->m_raidconfig[config].m_raidvol[vol].
8234 m_raidhandle == devhandle) {
8235 found = TRUE;
8236 break;
8237 }
8238 }
8239 }
8240 if (!found) {
8241 break;
8242 }
8243
8244 switch (irVolume->ReasonCode) {
8245 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8246 {
8247 uint32_t i;
8248 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8249 state;
8250
8251 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8252 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8253 ", auto-config of hot-swap drives is %s"
8254 ", write caching is %s"
8255 ", hot-spare pool mask is %02x",
8256 vol, state &
8257 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8258 ? "disabled" : "enabled",
8259 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8260 ? "controlled by member disks" :
8261 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8262 ? "disabled" :
8263 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8264 ? "enabled" :
8265 "incorrectly set",
8266 (state >> 16) & 0xff);
8267 break;
8268 }
8269 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8270 {
8271 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8272 (uint8_t)state;
8273
8274 mptsas_log(mpt, CE_NOTE,
8275 "Volume %d is now %s", vol,
8276 state == MPI2_RAID_VOL_STATE_OPTIMAL
8277 ? "optimal" :
8278 state == MPI2_RAID_VOL_STATE_DEGRADED
8279 ? "degraded" :
8280 state == MPI2_RAID_VOL_STATE_ONLINE
8281 ? "online" :
8282 state == MPI2_RAID_VOL_STATE_INITIALIZING
8283 ? "initializing" :
8284 state == MPI2_RAID_VOL_STATE_FAILED
8285 ? "failed" :
8286 state == MPI2_RAID_VOL_STATE_MISSING
8287 ? "missing" :
8288 "state unknown");
8289 break;
8290 }
8291 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8292 {
8293 mpt->m_raidconfig[config].m_raidvol[vol].
8294 m_statusflags = state;
8295
8296 mptsas_log(mpt, CE_NOTE,
8297 " Volume %d is now %s%s%s%s%s%s%s%s%s",
8298 vol,
8299 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8300 ? ", enabled" : ", disabled",
8301 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8302 ? ", quiesced" : "",
8303 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8304 ? ", inactive" : ", active",
8305 state &
8306 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8307 ? ", bad block table is full" : "",
8308 state &
8309 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8310 ? ", resync in progress" : "",
8311 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8312 ? ", background initialization in progress" : "",
8313 state &
8314 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8315 ? ", capacity expansion in progress" : "",
8316 state &
8317 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8318 ? ", consistency check in progress" : "",
8319 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
8320 ? ", data scrub in progress" : "");
8321 break;
8322 }
8323 default:
8324 break;
8325 }
8326 break;
8327 }
8328 case MPI2_EVENT_IR_PHYSICAL_DISK:
8329 {
8330 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
8331 uint16_t devhandle, enchandle, slot;
8332 uint32_t status, state;
8333 uint8_t physdisknum, reason;
8334
8335 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8336 eventreply->EventData;
8337 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8338 &irPhysDisk->PhysDiskNum);
8339 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8340 &irPhysDisk->PhysDiskDevHandle);
8341 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8342 &irPhysDisk->EnclosureHandle);
8343 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8344 &irPhysDisk->Slot);
8345 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8346 &irPhysDisk->NewValue);
8347 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8348 &irPhysDisk->ReasonCode);
8349
8350 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8351
8352 switch (reason) {
8353 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8354 mptsas_log(mpt, CE_NOTE,
8355 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8356 "for enclosure with handle 0x%x is now in hot "
8357 "spare pool %d",
8358 physdisknum, devhandle, slot, enchandle,
8359 (state >> 16) & 0xff);
8360 break;
8361
8362 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8363 status = state;
8364 mptsas_log(mpt, CE_NOTE,
8365 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8366 "for enclosure with handle 0x%x is now "
8367 "%s%s%s%s%s", physdisknum, devhandle, slot,
8368 enchandle,
8369 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8370 ? ", inactive" : ", active",
8371 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8372 ? ", out of sync" : "",
8373 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8374 ? ", quiesced" : "",
8375 status &
8376 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8377 ? ", write cache enabled" : "",
8378 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8379 ? ", capacity expansion target" : "");
8380 break;
8381
8382 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8383 mptsas_log(mpt, CE_NOTE,
8384 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8385 "for enclosure with handle 0x%x is now %s",
8386 physdisknum, devhandle, slot, enchandle,
8387 state == MPI2_RAID_PD_STATE_OPTIMAL
8388 ? "optimal" :
8389 state == MPI2_RAID_PD_STATE_REBUILDING
8390 ? "rebuilding" :
8391 state == MPI2_RAID_PD_STATE_DEGRADED
8392 ? "degraded" :
8393 state == MPI2_RAID_PD_STATE_HOT_SPARE
8394 ? "a hot spare" :
8395 state == MPI2_RAID_PD_STATE_ONLINE
8396 ? "online" :
8397 state == MPI2_RAID_PD_STATE_OFFLINE
8398 ? "offline" :
8399 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8400 ? "not compatible" :
8401 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8402 ? "not configured" :
8403 "state unknown");
8404 break;
8405 }
8406 break;
8407 }
8408 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8409 {
8410 pMpi26EventDataActiveCableExcept_t actcable;
8411 uint32_t power;
8412 uint8_t reason, id;
8413
8414 actcable = (pMpi26EventDataActiveCableExcept_t)
8415 eventreply->EventData;
8416 power = ddi_get32(mpt->m_acc_reply_frame_hdl,
8417 &actcable->ActiveCablePowerRequirement);
8418 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8419 &actcable->ReasonCode);
8420 id = ddi_get8(mpt->m_acc_reply_frame_hdl,
8421 &actcable->ReceptacleID);
8422
8423 /*
8424 * It'd be nice if this weren't just logging to the system but
8425 * were telling FMA about the active cable problem and FMA was
8426 * aware of the cable topology and state.
8427 */
8428 switch (reason) {
8429 case MPI26_EVENT_ACTIVE_CABLE_PRESENT:
8430 /* Don't log anything if it's fine */
8431 break;
8432 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8433 mptsas_log(mpt, CE_WARN, "An active cable (id %u) does "
8434 "not have sufficient power to be enabled. "
8435 "Devices connected to this cable will not be "
8436 "visible to the system.", id);
8437 if (power == UINT32_MAX) {
8438 mptsas_log(mpt, CE_CONT, "The cable's power "
8439 "requirements are unknown.\n");
8440 } else {
8441 mptsas_log(mpt, CE_CONT, "The cable requires "
8442 "%u mW of power to function.\n", power);
8443 }
8444 break;
8445 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8446 mptsas_log(mpt, CE_WARN, "An active cable (id %u) is "
8447 "degraded and not running at its full speed. "
8448 "Some devices might not appear.", id);
8449 break;
8450 default:
8451 break;
8452 }
8453 break;
8454 }
8455 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8456 case MPI2_EVENT_PCIE_ENUMERATION:
8457 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
8458 case MPI2_EVENT_PCIE_LINK_COUNTER:
8459 mptsas_log(mpt, CE_NOTE, "Unhandled mpt_sas PCIe device "
8460 "event received (0x%x)", event);
8461 break;
8462 default:
8463 NDBG20(("mptsas%d: unknown event %x received",
8464 mpt->m_instance, event));
8465 break;
8466 }
8467
8468 /*
8469 * Return the reply frame to the free queue.
8470 */
8471 ddi_put32(mpt->m_acc_free_queue_hdl,
8472 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8473 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8474 DDI_DMA_SYNC_FORDEV);
8475 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8476 mpt->m_free_index = 0;
8477 }
8478 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8479 mpt->m_free_index);
8480 mutex_exit(&mpt->m_mutex);
8481 }
8482
8483 /*
8484 * invoked from timeout() to restart qfull cmds with throttle == 0
8485 */
8486 static void
8487 mptsas_restart_cmd(void *arg)
8488 {
8489 mptsas_t *mpt = arg;
8490 mptsas_target_t *ptgt = NULL;
8491
8492 mutex_enter(&mpt->m_mutex);
8493
8494 mpt->m_restart_cmd_timeid = 0;
8495
8496 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8497 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8498 if (ptgt->m_reset_delay == 0) {
8499 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8500 mptsas_set_throttle(mpt, ptgt,
8501 MAX_THROTTLE);
8502 }
8503 }
8504 }
8505 mptsas_restart_hba(mpt);
8506 mutex_exit(&mpt->m_mutex);
8507 }
8508
8509 void
8510 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8511 {
8512 int slot;
8513 mptsas_slots_t *slots = mpt->m_active;
8514 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8515
8516 ASSERT(cmd != NULL);
8517 ASSERT(cmd->cmd_queued == FALSE);
8518
8519 /*
8520 * Task Management cmds are removed in their own routines. Also,
8521 * we don't want to modify timeout based on TM cmds.
8522 */
8523 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8524 return;
8525 }
8526
8527 slot = cmd->cmd_slot;
8528
8529 /*
8530 * remove the cmd.
8531 */
8532 if (cmd == slots->m_slot[slot]) {
8533 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8534 "0x%x", (void *)cmd, cmd->cmd_flags));
8535 slots->m_slot[slot] = NULL;
8536 mpt->m_ncmds--;
8537
8538 /*
8539 * only decrement per target ncmds if command
8540 * has a target associated with it.
8541 */
8542 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8543 ptgt->m_t_ncmds--;
8544 /*
8545 * reset throttle if we just ran an untagged command
8546 * to a tagged target
8547 */
8548 if ((ptgt->m_t_ncmds == 0) &&
8549 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8550 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8551 }
8552
8553 /*
8554 * Remove this command from the active queue.
8555 */
8556 if (cmd->cmd_active_expiration != 0) {
8557 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8558 cmd_active_link);
8559 cmd->cmd_active_expiration = 0;
8560 }
8561 }
8562 }
8563
8564 /*
8565 * This is all we need to do for ioc commands.
8566 */
8567 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8568 mptsas_return_to_pool(mpt, cmd);
8569 return;
8570 }
8571
8572 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8573 }
8574
8575 /*
8576 * accept all cmds on the tx_waitq if any and then
8577 * start a fresh request from the top of the device queue.
8578 *
8579 * since there are always cmds queued on the tx_waitq, and rare cmds on
8580 * the instance waitq, so this function should not be invoked in the ISR,
8581 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8582 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8583 */
8584 static void
8585 mptsas_restart_hba(mptsas_t *mpt)
8586 {
8587 ASSERT(mutex_owned(&mpt->m_mutex));
8588
8589 mutex_enter(&mpt->m_tx_waitq_mutex);
8590 if (mpt->m_tx_waitq) {
8591 mptsas_accept_tx_waitq(mpt);
8592 }
8593 mutex_exit(&mpt->m_tx_waitq_mutex);
8594 mptsas_restart_waitq(mpt);
8595 }
8596
8597 /*
8598 * start a fresh request from the top of the device queue
8599 */
8600 static void
8601 mptsas_restart_waitq(mptsas_t *mpt)
8602 {
8603 mptsas_cmd_t *cmd, *next_cmd;
8604 mptsas_target_t *ptgt = NULL;
8605
8606 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8607
8608 ASSERT(mutex_owned(&mpt->m_mutex));
8609
8610 /*
8611 * If there is a reset delay, don't start any cmds. Otherwise, start
8612 * as many cmds as possible.
8613 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8614 * commands is m_max_requests - 2.
8615 */
8616 cmd = mpt->m_waitq;
8617
8618 while (cmd != NULL) {
8619 next_cmd = cmd->cmd_linkp;
8620 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8621 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8622 /*
8623 * passthru command get slot need
8624 * set CFLAG_PREPARED.
8625 */
8626 cmd->cmd_flags |= CFLAG_PREPARED;
8627 mptsas_waitq_delete(mpt, cmd);
8628 mptsas_start_passthru(mpt, cmd);
8629 }
8630 cmd = next_cmd;
8631 continue;
8632 }
8633 if (cmd->cmd_flags & CFLAG_CONFIG) {
8634 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8635 /*
8636 * Send the config page request and delete it
8637 * from the waitq.
8638 */
8639 cmd->cmd_flags |= CFLAG_PREPARED;
8640 mptsas_waitq_delete(mpt, cmd);
8641 mptsas_start_config_page_access(mpt, cmd);
8642 }
8643 cmd = next_cmd;
8644 continue;
8645 }
8646 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8647 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8648 /*
8649 * Send the FW Diag request and delete if from
8650 * the waitq.
8651 */
8652 cmd->cmd_flags |= CFLAG_PREPARED;
8653 mptsas_waitq_delete(mpt, cmd);
8654 mptsas_start_diag(mpt, cmd);
8655 }
8656 cmd = next_cmd;
8657 continue;
8658 }
8659
8660 ptgt = cmd->cmd_tgt_addr;
8661 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8662 (ptgt->m_t_ncmds == 0)) {
8663 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8664 }
8665 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8666 (ptgt && (ptgt->m_reset_delay == 0)) &&
8667 (ptgt && (ptgt->m_t_ncmds <
8668 ptgt->m_t_throttle))) {
8669 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8670 mptsas_waitq_delete(mpt, cmd);
8671 (void) mptsas_start_cmd(mpt, cmd);
8672 }
8673 }
8674 cmd = next_cmd;
8675 }
8676 }
8677 /*
8678 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8679 * Accept all those queued cmds before new cmd is accept so that the
8680 * cmds are sent in order.
8681 */
8682 static void
8683 mptsas_accept_tx_waitq(mptsas_t *mpt)
8684 {
8685 mptsas_cmd_t *cmd;
8686
8687 ASSERT(mutex_owned(&mpt->m_mutex));
8688 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8689
8690 /*
8691 * A Bus Reset could occur at any time and flush the tx_waitq,
8692 * so we cannot count on the tx_waitq to contain even one cmd.
8693 * And when the m_tx_waitq_mutex is released and run
8694 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8695 */
8696 cmd = mpt->m_tx_waitq;
8697 for (;;) {
8698 if ((cmd = mpt->m_tx_waitq) == NULL) {
8699 mpt->m_tx_draining = 0;
8700 break;
8701 }
8702 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8703 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8704 }
8705 cmd->cmd_linkp = NULL;
8706 mutex_exit(&mpt->m_tx_waitq_mutex);
8707 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8708 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8709 "to accept cmd on queue");
8710 mutex_enter(&mpt->m_tx_waitq_mutex);
8711 }
8712 }
8713
8714
8715 /*
8716 * mpt tag type lookup
8717 */
8718 static char mptsas_tag_lookup[] =
8719 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8720
8721 static int
8722 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8723 {
8724 struct scsi_pkt *pkt = CMD2PKT(cmd);
8725 uint32_t control = 0;
8726 caddr_t mem, arsbuf;
8727 pMpi2SCSIIORequest_t io_request;
8728 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8729 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8730 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8731 uint16_t SMID, io_flags = 0;
8732 uint8_t ars_size;
8733 uint64_t request_desc;
8734 uint32_t ars_dmaaddrlow;
8735 mptsas_cmd_t *c;
8736
8737 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8738 cmd->cmd_flags));
8739
8740 /*
8741 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8742 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8743 */
8744 SMID = cmd->cmd_slot;
8745
8746 /*
8747 * It is possible for back to back device reset to
8748 * happen before the reset delay has expired. That's
8749 * ok, just let the device reset go out on the bus.
8750 */
8751 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8752 ASSERT(ptgt->m_reset_delay == 0);
8753 }
8754
8755 /*
8756 * if a non-tagged cmd is submitted to an active tagged target
8757 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8758 * to be untagged
8759 */
8760 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8761 (ptgt->m_t_ncmds > 1) &&
8762 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8763 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8764 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8765 NDBG23(("target=%d, untagged cmd, start draining\n",
8766 ptgt->m_devhdl));
8767
8768 if (ptgt->m_reset_delay == 0) {
8769 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8770 }
8771
8772 mptsas_remove_cmd(mpt, cmd);
8773 cmd->cmd_pkt_flags |= FLAG_HEAD;
8774 mptsas_waitq_add(mpt, cmd);
8775 }
8776 return (DDI_FAILURE);
8777 }
8778
8779 /*
8780 * Set correct tag bits.
8781 */
8782 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8783 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8784 FLAG_TAGMASK) >> 12)]) {
8785 case MSG_SIMPLE_QTAG:
8786 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8787 break;
8788 case MSG_HEAD_QTAG:
8789 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8790 break;
8791 case MSG_ORDERED_QTAG:
8792 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8793 break;
8794 default:
8795 mptsas_log(mpt, CE_WARN, "invalid tag type");
8796 break;
8797 }
8798 } else {
8799 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8800 ptgt->m_t_throttle = 1;
8801 }
8802 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8803 }
8804
8805 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8806 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8807 }
8808
8809 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8810 io_request = (pMpi2SCSIIORequest_t)mem;
8811 if (cmd->cmd_extrqslen != 0) {
8812 /*
8813 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8814 * Calculate the DMA address with the same offset.
8815 */
8816 arsbuf = cmd->cmd_arq_buf;
8817 ars_size = cmd->cmd_extrqslen;
8818 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8819 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8820 0xffffffffu;
8821 } else {
8822 arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8823 cmd->cmd_arq_buf = arsbuf;
8824 ars_size = mpt->m_req_sense_size;
8825 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8826 (mpt->m_req_sense_size * (SMID-1))) &
8827 0xffffffffu;
8828 }
8829 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8830 bzero(arsbuf, ars_size);
8831
8832 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8833 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8834 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8835 MPI2_FUNCTION_SCSI_IO_REQUEST);
8836
8837 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8838 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8839
8840 io_flags = cmd->cmd_cdblen;
8841 if (mptsas_use_fastpath &&
8842 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8843 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8844 request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8845 } else {
8846 request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8847 }
8848 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8849 /*
8850 * setup the Scatter/Gather DMA list for this request
8851 */
8852 if (cmd->cmd_cookiec > 0) {
8853 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8854 } else {
8855 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8856 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8857 MPI2_SGE_FLAGS_END_OF_BUFFER |
8858 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8859 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8860 }
8861
8862 /*
8863 * save ARQ information
8864 */
8865 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8866 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8867
8868 ddi_put32(acc_hdl, &io_request->Control, control);
8869
8870 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8871 SMID, (void *)io_request, (void *)cmd));
8872
8873 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8874 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8875 DDI_DMA_SYNC_FORDEV);
8876 pkt->pkt_start = gethrtime();
8877
8878 /*
8879 * Build request descriptor and write it to the request desc post reg.
8880 */
8881 request_desc |= (SMID << 16);
8882 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8883 MPTSAS_START_CMD(mpt, request_desc);
8884
8885 /*
8886 * Start timeout.
8887 */
8888 cmd->cmd_active_expiration = pkt->pkt_start +
8889 (hrtime_t)pkt->pkt_time * (hrtime_t)NANOSEC;
8890
8891 #ifdef MPTSAS_TEST
8892 /*
8893 * Force timeouts to happen immediately.
8894 */
8895 if (mptsas_test_timeouts)
8896 cmd->cmd_active_expiration = gethrtime();
8897 #endif
8898 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8899 if (c == NULL ||
8900 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8901 /*
8902 * Common case is that this is the last pending expiration
8903 * (or queue is empty). Insert at head of the queue.
8904 */
8905 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8906 } else {
8907 /*
8908 * Queue is not empty and first element expires later than
8909 * this command. Search for element expiring sooner.
8910 */
8911 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8912 if (c->cmd_active_expiration <
8913 cmd->cmd_active_expiration) {
8914 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8915 break;
8916 }
8917 }
8918 if (c == NULL) {
8919 /*
8920 * No element found expiring sooner, append to
8921 * non-empty queue.
8922 */
8923 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8924 cmd_active_link);
8925 }
8926 }
8927
8928 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8929 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8930 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8931 return (DDI_FAILURE);
8932 }
8933 return (DDI_SUCCESS);
8934 }
8935
8936 /*
8937 * Select a helper thread to handle current doneq
8938 */
8939 static void
8940 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8941 {
8942 uint64_t t, i;
8943 uint32_t min = 0xffffffff;
8944 mptsas_doneq_thread_list_t *item;
8945
8946 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8947 item = &mpt->m_doneq_thread_id[i];
8948 /*
8949 * If the completed command on help thread[i] less than
8950 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8951 * pick a thread which has least completed command.
8952 */
8953
8954 mutex_enter(&item->mutex);
8955 if (item->len < mpt->m_doneq_thread_threshold) {
8956 t = i;
8957 mutex_exit(&item->mutex);
8958 break;
8959 }
8960 if (item->len < min) {
8961 min = item->len;
8962 t = i;
8963 }
8964 mutex_exit(&item->mutex);
8965 }
8966 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8967 mptsas_doneq_mv(mpt, t);
8968 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8969 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8970 }
8971
8972 /*
8973 * move the current global doneq to the doneq of thead[t]
8974 */
8975 static void
8976 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8977 {
8978 mptsas_cmd_t *cmd;
8979 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8980
8981 ASSERT(mutex_owned(&item->mutex));
8982 while ((cmd = mpt->m_doneq) != NULL) {
8983 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8984 mpt->m_donetail = &mpt->m_doneq;
8985 }
8986 cmd->cmd_linkp = NULL;
8987 *item->donetail = cmd;
8988 item->donetail = &cmd->cmd_linkp;
8989 mpt->m_doneq_len--;
8990 item->len++;
8991 }
8992 }
8993
8994 void
8995 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8996 {
8997 struct scsi_pkt *pkt = CMD2PKT(cmd);
8998
8999 /* Check all acc and dma handles */
9000 if ((mptsas_check_acc_handle(mpt->m_datap) !=
9001 DDI_SUCCESS) ||
9002 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
9003 DDI_SUCCESS) ||
9004 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
9005 DDI_SUCCESS) ||
9006 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
9007 DDI_SUCCESS) ||
9008 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
9009 DDI_SUCCESS) ||
9010 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
9011 DDI_SUCCESS) ||
9012 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
9013 DDI_SUCCESS) ||
9014 (mptsas_check_acc_handle(mpt->m_config_handle) !=
9015 DDI_SUCCESS)) {
9016 ddi_fm_service_impact(mpt->m_dip,
9017 DDI_SERVICE_UNAFFECTED);
9018 ddi_fm_acc_err_clear(mpt->m_config_handle,
9019 DDI_FME_VER0);
9020 pkt->pkt_reason = CMD_TRAN_ERR;
9021 pkt->pkt_statistics = 0;
9022 }
9023 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
9024 DDI_SUCCESS) ||
9025 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
9026 DDI_SUCCESS) ||
9027 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
9028 DDI_SUCCESS) ||
9029 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
9030 DDI_SUCCESS) ||
9031 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
9032 DDI_SUCCESS) ||
9033 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
9034 DDI_SUCCESS)) {
9035 ddi_fm_service_impact(mpt->m_dip,
9036 DDI_SERVICE_UNAFFECTED);
9037 pkt->pkt_reason = CMD_TRAN_ERR;
9038 pkt->pkt_statistics = 0;
9039 }
9040 if (cmd->cmd_dmahandle &&
9041 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
9042 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9043 pkt->pkt_reason = CMD_TRAN_ERR;
9044 pkt->pkt_statistics = 0;
9045 }
9046 if ((cmd->cmd_extra_frames &&
9047 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
9048 DDI_SUCCESS) ||
9049 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
9050 DDI_SUCCESS)))) {
9051 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9052 pkt->pkt_reason = CMD_TRAN_ERR;
9053 pkt->pkt_statistics = 0;
9054 }
9055 }
9056
9057 /*
9058 * These routines manipulate the queue of commands that
9059 * are waiting for their completion routines to be called.
9060 * The queue is usually in FIFO order but on an MP system
9061 * it's possible for the completion routines to get out
9062 * of order. If that's a problem you need to add a global
9063 * mutex around the code that calls the completion routine
9064 * in the interrupt handler.
9065 */
9066 static void
9067 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9068 {
9069 struct scsi_pkt *pkt = CMD2PKT(cmd);
9070
9071 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
9072
9073 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
9074 cmd->cmd_linkp = NULL;
9075 cmd->cmd_flags |= CFLAG_FINISHED;
9076 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
9077
9078 mptsas_fma_check(mpt, cmd);
9079
9080 /*
9081 * only add scsi pkts that have completion routines to
9082 * the doneq. no intr cmds do not have callbacks.
9083 */
9084 if (pkt && (pkt->pkt_comp)) {
9085 *mpt->m_donetail = cmd;
9086 mpt->m_donetail = &cmd->cmd_linkp;
9087 mpt->m_doneq_len++;
9088 }
9089 }
9090
9091 static mptsas_cmd_t *
9092 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
9093 {
9094 mptsas_cmd_t *cmd;
9095 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
9096
9097 /* pop one off the done queue */
9098 if ((cmd = item->doneq) != NULL) {
9099 /* if the queue is now empty fix the tail pointer */
9100 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
9101 if ((item->doneq = cmd->cmd_linkp) == NULL) {
9102 item->donetail = &item->doneq;
9103 }
9104 cmd->cmd_linkp = NULL;
9105 item->len--;
9106 }
9107 return (cmd);
9108 }
9109
9110 static void
9111 mptsas_doneq_empty(mptsas_t *mpt)
9112 {
9113 if (mpt->m_doneq && !mpt->m_in_callback) {
9114 mptsas_cmd_t *cmd, *next;
9115 struct scsi_pkt *pkt;
9116
9117 mpt->m_in_callback = 1;
9118 cmd = mpt->m_doneq;
9119 mpt->m_doneq = NULL;
9120 mpt->m_donetail = &mpt->m_doneq;
9121 mpt->m_doneq_len = 0;
9122
9123 mutex_exit(&mpt->m_mutex);
9124 /*
9125 * run the completion routines of all the
9126 * completed commands
9127 */
9128 while (cmd != NULL) {
9129 next = cmd->cmd_linkp;
9130 cmd->cmd_linkp = NULL;
9131 /* run this command's completion routine */
9132 cmd->cmd_flags |= CFLAG_COMPLETED;
9133 pkt = CMD2PKT(cmd);
9134 mptsas_pkt_comp(pkt, cmd);
9135 cmd = next;
9136 }
9137 mutex_enter(&mpt->m_mutex);
9138 mpt->m_in_callback = 0;
9139 }
9140 }
9141
9142 /*
9143 * These routines manipulate the target's queue of pending requests
9144 */
9145 void
9146 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9147 {
9148 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
9149 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9150 cmd->cmd_queued = TRUE;
9151 if (ptgt)
9152 ptgt->m_t_nwait++;
9153 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
9154 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
9155 mpt->m_waitqtail = &cmd->cmd_linkp;
9156 }
9157 mpt->m_waitq = cmd;
9158 } else {
9159 cmd->cmd_linkp = NULL;
9160 *(mpt->m_waitqtail) = cmd;
9161 mpt->m_waitqtail = &cmd->cmd_linkp;
9162 }
9163 }
9164
9165 static mptsas_cmd_t *
9166 mptsas_waitq_rm(mptsas_t *mpt)
9167 {
9168 mptsas_cmd_t *cmd;
9169 mptsas_target_t *ptgt;
9170 NDBG7(("mptsas_waitq_rm"));
9171
9172 MPTSAS_WAITQ_RM(mpt, cmd);
9173
9174 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
9175 if (cmd) {
9176 ptgt = cmd->cmd_tgt_addr;
9177 if (ptgt) {
9178 ptgt->m_t_nwait--;
9179 ASSERT(ptgt->m_t_nwait >= 0);
9180 }
9181 }
9182 return (cmd);
9183 }
9184
9185 /*
9186 * remove specified cmd from the middle of the wait queue.
9187 */
9188 static void
9189 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9190 {
9191 mptsas_cmd_t *prevp = mpt->m_waitq;
9192 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9193
9194 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9195 (void *)mpt, (void *)cmd));
9196 if (ptgt) {
9197 ptgt->m_t_nwait--;
9198 ASSERT(ptgt->m_t_nwait >= 0);
9199 }
9200
9201 if (prevp == cmd) {
9202 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
9203 mpt->m_waitqtail = &mpt->m_waitq;
9204
9205 cmd->cmd_linkp = NULL;
9206 cmd->cmd_queued = FALSE;
9207 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9208 (void *)mpt, (void *)cmd));
9209 return;
9210 }
9211
9212 while (prevp != NULL) {
9213 if (prevp->cmd_linkp == cmd) {
9214 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9215 mpt->m_waitqtail = &prevp->cmd_linkp;
9216
9217 cmd->cmd_linkp = NULL;
9218 cmd->cmd_queued = FALSE;
9219 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9220 (void *)mpt, (void *)cmd));
9221 return;
9222 }
9223 prevp = prevp->cmd_linkp;
9224 }
9225 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
9226 }
9227
9228 static mptsas_cmd_t *
9229 mptsas_tx_waitq_rm(mptsas_t *mpt)
9230 {
9231 mptsas_cmd_t *cmd;
9232 NDBG7(("mptsas_tx_waitq_rm"));
9233
9234 MPTSAS_TX_WAITQ_RM(mpt, cmd);
9235
9236 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
9237
9238 return (cmd);
9239 }
9240
9241 /*
9242 * remove specified cmd from the middle of the tx_waitq.
9243 */
9244 static void
9245 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9246 {
9247 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
9248
9249 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9250 (void *)mpt, (void *)cmd));
9251
9252 if (prevp == cmd) {
9253 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
9254 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
9255
9256 cmd->cmd_linkp = NULL;
9257 cmd->cmd_queued = FALSE;
9258 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9259 (void *)mpt, (void *)cmd));
9260 return;
9261 }
9262
9263 while (prevp != NULL) {
9264 if (prevp->cmd_linkp == cmd) {
9265 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9266 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
9267
9268 cmd->cmd_linkp = NULL;
9269 cmd->cmd_queued = FALSE;
9270 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9271 (void *)mpt, (void *)cmd));
9272 return;
9273 }
9274 prevp = prevp->cmd_linkp;
9275 }
9276 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
9277 }
9278
9279 /*
9280 * device and bus reset handling
9281 *
9282 * Notes:
9283 * - RESET_ALL: reset the controller
9284 * - RESET_TARGET: reset the target specified in scsi_address
9285 */
9286 static int
9287 mptsas_scsi_reset(struct scsi_address *ap, int level)
9288 {
9289 mptsas_t *mpt = ADDR2MPT(ap);
9290 int rval;
9291 mptsas_tgt_private_t *tgt_private;
9292 mptsas_target_t *ptgt = NULL;
9293
9294 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
9295 ptgt = tgt_private->t_private;
9296 if (ptgt == NULL) {
9297 return (FALSE);
9298 }
9299 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
9300 level));
9301
9302 mutex_enter(&mpt->m_mutex);
9303 /*
9304 * if we are not in panic set up a reset delay for this target
9305 */
9306 if (!ddi_in_panic()) {
9307 mptsas_setup_bus_reset_delay(mpt);
9308 } else {
9309 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
9310 }
9311 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
9312 mutex_exit(&mpt->m_mutex);
9313
9314 /*
9315 * The transport layer expect to only see TRUE and
9316 * FALSE. Therefore, we will adjust the return value
9317 * if mptsas_do_scsi_reset returns FAILED.
9318 */
9319 if (rval == FAILED)
9320 rval = FALSE;
9321 return (rval);
9322 }
9323
9324 static int
9325 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
9326 {
9327 int rval = FALSE;
9328 uint8_t config, disk;
9329
9330 ASSERT(mutex_owned(&mpt->m_mutex));
9331
9332 if (mptsas_debug_resets) {
9333 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
9334 devhdl);
9335 }
9336
9337 /*
9338 * Issue a Target Reset message to the target specified but not to a
9339 * disk making up a raid volume. Just look through the RAID config
9340 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9341 * list, then don't reset this target.
9342 */
9343 for (config = 0; config < mpt->m_num_raid_configs; config++) {
9344 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9345 if (devhdl == mpt->m_raidconfig[config].
9346 m_physdisk_devhdl[disk]) {
9347 return (TRUE);
9348 }
9349 }
9350 }
9351
9352 rval = mptsas_ioc_task_management(mpt,
9353 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9354
9355 mptsas_doneq_empty(mpt);
9356 return (rval);
9357 }
9358
9359 static int
9360 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9361 void (*callback)(caddr_t), caddr_t arg)
9362 {
9363 mptsas_t *mpt = ADDR2MPT(ap);
9364
9365 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9366
9367 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9368 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9369 }
9370
9371 static int
9372 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9373 {
9374 dev_info_t *lun_dip = NULL;
9375
9376 ASSERT(sd != NULL);
9377 ASSERT(name != NULL);
9378 lun_dip = sd->sd_dev;
9379 ASSERT(lun_dip != NULL);
9380
9381 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9382 return (1);
9383 } else {
9384 return (0);
9385 }
9386 }
9387
9388 static int
9389 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9390 {
9391 return (mptsas_get_name(sd, name, len));
9392 }
9393
9394 void
9395 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9396 {
9397
9398 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9399
9400 /*
9401 * if the bus is draining/quiesced, no changes to the throttles
9402 * are allowed. Not allowing change of throttles during draining
9403 * limits error recovery but will reduce draining time
9404 *
9405 * all throttles should have been set to HOLD_THROTTLE
9406 */
9407 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9408 return;
9409 }
9410
9411 if (what == HOLD_THROTTLE) {
9412 ptgt->m_t_throttle = what;
9413 } else if (ptgt->m_reset_delay == 0) {
9414 if (what == MAX_THROTTLE)
9415 ptgt->m_t_throttle = mpt->m_max_tune_throttle;
9416 else
9417 ptgt->m_t_throttle = what;
9418 }
9419 }
9420
9421 /*
9422 * Clean up from a device reset.
9423 * For the case of target reset, this function clears the waitq of all
9424 * commands for a particular target. For the case of abort task set, this
9425 * function clears the waitq of all commonds for a particular target/lun.
9426 */
9427 static void
9428 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9429 {
9430 mptsas_slots_t *slots = mpt->m_active;
9431 mptsas_cmd_t *cmd, *next_cmd;
9432 int slot;
9433 uchar_t reason;
9434 uint_t stat;
9435 hrtime_t timestamp;
9436
9437 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9438
9439 timestamp = gethrtime();
9440
9441 /*
9442 * Make sure the I/O Controller has flushed all cmds
9443 * that are associated with this target for a target reset
9444 * and target/lun for abort task set.
9445 * Account for TM requests, which use the last SMID.
9446 */
9447 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9448 if ((cmd = slots->m_slot[slot]) == NULL)
9449 continue;
9450 reason = CMD_RESET;
9451 stat = STAT_DEV_RESET;
9452 switch (tasktype) {
9453 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9454 if (Tgt(cmd) == target) {
9455 if (cmd->cmd_active_expiration <= timestamp) {
9456 /*
9457 * When timeout requested, propagate
9458 * proper reason and statistics to
9459 * target drivers.
9460 */
9461 reason = CMD_TIMEOUT;
9462 stat |= STAT_TIMEOUT;
9463 }
9464 NDBG25(("mptsas_flush_target discovered non-"
9465 "NULL cmd in slot %d, tasktype 0x%x", slot,
9466 tasktype));
9467 mptsas_dump_cmd(mpt, cmd);
9468 mptsas_remove_cmd(mpt, cmd);
9469 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9470 mptsas_doneq_add(mpt, cmd);
9471 }
9472 break;
9473 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9474 reason = CMD_ABORTED;
9475 stat = STAT_ABORTED;
9476 /*FALLTHROUGH*/
9477 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9478 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9479
9480 NDBG25(("mptsas_flush_target discovered non-"
9481 "NULL cmd in slot %d, tasktype 0x%x", slot,
9482 tasktype));
9483 mptsas_dump_cmd(mpt, cmd);
9484 mptsas_remove_cmd(mpt, cmd);
9485 mptsas_set_pkt_reason(mpt, cmd, reason,
9486 stat);
9487 mptsas_doneq_add(mpt, cmd);
9488 }
9489 break;
9490 default:
9491 break;
9492 }
9493 }
9494
9495 /*
9496 * Flush the waitq and tx_waitq of this target's cmds
9497 */
9498 cmd = mpt->m_waitq;
9499
9500 reason = CMD_RESET;
9501 stat = STAT_DEV_RESET;
9502
9503 switch (tasktype) {
9504 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9505 while (cmd != NULL) {
9506 next_cmd = cmd->cmd_linkp;
9507 if (Tgt(cmd) == target) {
9508 mptsas_waitq_delete(mpt, cmd);
9509 mptsas_set_pkt_reason(mpt, cmd,
9510 reason, stat);
9511 mptsas_doneq_add(mpt, cmd);
9512 }
9513 cmd = next_cmd;
9514 }
9515 mutex_enter(&mpt->m_tx_waitq_mutex);
9516 cmd = mpt->m_tx_waitq;
9517 while (cmd != NULL) {
9518 next_cmd = cmd->cmd_linkp;
9519 if (Tgt(cmd) == target) {
9520 mptsas_tx_waitq_delete(mpt, cmd);
9521 mutex_exit(&mpt->m_tx_waitq_mutex);
9522 mptsas_set_pkt_reason(mpt, cmd,
9523 reason, stat);
9524 mptsas_doneq_add(mpt, cmd);
9525 mutex_enter(&mpt->m_tx_waitq_mutex);
9526 }
9527 cmd = next_cmd;
9528 }
9529 mutex_exit(&mpt->m_tx_waitq_mutex);
9530 break;
9531 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9532 reason = CMD_ABORTED;
9533 stat = STAT_ABORTED;
9534 /*FALLTHROUGH*/
9535 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9536 while (cmd != NULL) {
9537 next_cmd = cmd->cmd_linkp;
9538 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9539 mptsas_waitq_delete(mpt, cmd);
9540 mptsas_set_pkt_reason(mpt, cmd,
9541 reason, stat);
9542 mptsas_doneq_add(mpt, cmd);
9543 }
9544 cmd = next_cmd;
9545 }
9546 mutex_enter(&mpt->m_tx_waitq_mutex);
9547 cmd = mpt->m_tx_waitq;
9548 while (cmd != NULL) {
9549 next_cmd = cmd->cmd_linkp;
9550 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9551 mptsas_tx_waitq_delete(mpt, cmd);
9552 mutex_exit(&mpt->m_tx_waitq_mutex);
9553 mptsas_set_pkt_reason(mpt, cmd,
9554 reason, stat);
9555 mptsas_doneq_add(mpt, cmd);
9556 mutex_enter(&mpt->m_tx_waitq_mutex);
9557 }
9558 cmd = next_cmd;
9559 }
9560 mutex_exit(&mpt->m_tx_waitq_mutex);
9561 break;
9562 default:
9563 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9564 tasktype);
9565 break;
9566 }
9567
9568 #ifdef MPTSAS_FAULTINJECTION
9569 mptsas_fminj_move_tgt_to_doneq(mpt, target, reason, stat);
9570 #endif
9571 }
9572
9573 /*
9574 * Clean up hba state, abort all outstanding command and commands in waitq
9575 * reset timeout of all targets.
9576 */
9577 static void
9578 mptsas_flush_hba(mptsas_t *mpt)
9579 {
9580 mptsas_slots_t *slots = mpt->m_active;
9581 mptsas_cmd_t *cmd;
9582 int slot;
9583
9584 NDBG25(("mptsas_flush_hba"));
9585
9586 /*
9587 * The I/O Controller should have already sent back
9588 * all commands via the scsi I/O reply frame. Make
9589 * sure all commands have been flushed.
9590 * Account for TM request, which use the last SMID.
9591 */
9592 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9593 if ((cmd = slots->m_slot[slot]) == NULL)
9594 continue;
9595
9596 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9597 /*
9598 * Need to make sure to tell everyone that might be
9599 * waiting on this command that it's going to fail. If
9600 * we get here, this command will never timeout because
9601 * the active command table is going to be re-allocated,
9602 * so there will be nothing to check against a time out.
9603 * Instead, mark the command as failed due to reset.
9604 */
9605 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9606 STAT_BUS_RESET);
9607 if ((cmd->cmd_flags &
9608 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9609 cmd->cmd_flags |= CFLAG_FINISHED;
9610 cv_broadcast(&mpt->m_passthru_cv);
9611 cv_broadcast(&mpt->m_config_cv);
9612 cv_broadcast(&mpt->m_fw_diag_cv);
9613 }
9614 continue;
9615 }
9616
9617 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9618 slot));
9619 mptsas_dump_cmd(mpt, cmd);
9620
9621 mptsas_remove_cmd(mpt, cmd);
9622 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9623 mptsas_doneq_add(mpt, cmd);
9624 }
9625
9626 /*
9627 * Flush the waitq.
9628 */
9629 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9630 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9631 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9632 (cmd->cmd_flags & CFLAG_CONFIG) ||
9633 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9634 cmd->cmd_flags |= CFLAG_FINISHED;
9635 cv_broadcast(&mpt->m_passthru_cv);
9636 cv_broadcast(&mpt->m_config_cv);
9637 cv_broadcast(&mpt->m_fw_diag_cv);
9638 } else {
9639 mptsas_doneq_add(mpt, cmd);
9640 }
9641 }
9642
9643 /*
9644 * Flush the tx_waitq
9645 */
9646 mutex_enter(&mpt->m_tx_waitq_mutex);
9647 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9648 mutex_exit(&mpt->m_tx_waitq_mutex);
9649 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9650 mptsas_doneq_add(mpt, cmd);
9651 mutex_enter(&mpt->m_tx_waitq_mutex);
9652 }
9653 mutex_exit(&mpt->m_tx_waitq_mutex);
9654
9655 /*
9656 * Drain the taskqs prior to reallocating resources. The thread
9657 * passing through here could be launched from either (dr)
9658 * or (event) taskqs so only wait on the 'other' queue since
9659 * waiting on 'this' queue is a deadlock condition.
9660 */
9661 mutex_exit(&mpt->m_mutex);
9662 if (!taskq_member((taskq_t *)mpt->m_event_taskq, curthread))
9663 ddi_taskq_wait(mpt->m_event_taskq);
9664 if (!taskq_member((taskq_t *)mpt->m_dr_taskq, curthread))
9665 ddi_taskq_wait(mpt->m_dr_taskq);
9666
9667 mutex_enter(&mpt->m_mutex);
9668 }
9669
9670 /*
9671 * set pkt_reason and OR in pkt_statistics flag
9672 */
9673 static void
9674 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9675 uint_t stat)
9676 {
9677 #ifndef __lock_lint
9678 _NOTE(ARGUNUSED(mpt))
9679 #endif
9680
9681 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9682 (void *)cmd, reason, stat));
9683
9684 if (cmd) {
9685 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9686 cmd->cmd_pkt->pkt_reason = reason;
9687 }
9688 cmd->cmd_pkt->pkt_statistics |= stat;
9689 }
9690 }
9691
9692 static void
9693 mptsas_start_watch_reset_delay()
9694 {
9695 NDBG22(("mptsas_start_watch_reset_delay"));
9696
9697 mutex_enter(&mptsas_global_mutex);
9698 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9699 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9700 drv_usectohz((clock_t)
9701 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9702 ASSERT(mptsas_reset_watch != NULL);
9703 }
9704 mutex_exit(&mptsas_global_mutex);
9705 }
9706
9707 static void
9708 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9709 {
9710 mptsas_target_t *ptgt = NULL;
9711
9712 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9713
9714 NDBG22(("mptsas_setup_bus_reset_delay"));
9715 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9716 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9717 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9718 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9719 }
9720
9721 mptsas_start_watch_reset_delay();
9722 }
9723
9724 /*
9725 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9726 * mpt instance for active reset delays
9727 */
9728 static void
9729 mptsas_watch_reset_delay(void *arg)
9730 {
9731 #ifndef __lock_lint
9732 _NOTE(ARGUNUSED(arg))
9733 #endif
9734
9735 mptsas_t *mpt;
9736 int not_done = 0;
9737
9738 NDBG22(("mptsas_watch_reset_delay"));
9739
9740 mutex_enter(&mptsas_global_mutex);
9741 mptsas_reset_watch = 0;
9742 mutex_exit(&mptsas_global_mutex);
9743 rw_enter(&mptsas_global_rwlock, RW_READER);
9744 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9745 if (mpt->m_tran == 0) {
9746 continue;
9747 }
9748 mutex_enter(&mpt->m_mutex);
9749 not_done += mptsas_watch_reset_delay_subr(mpt);
9750 mutex_exit(&mpt->m_mutex);
9751 }
9752 rw_exit(&mptsas_global_rwlock);
9753
9754 if (not_done) {
9755 mptsas_start_watch_reset_delay();
9756 }
9757 }
9758
9759 static int
9760 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9761 {
9762 int done = 0;
9763 int restart = 0;
9764 mptsas_target_t *ptgt = NULL;
9765
9766 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9767
9768 ASSERT(mutex_owned(&mpt->m_mutex));
9769
9770 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9771 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9772 if (ptgt->m_reset_delay != 0) {
9773 ptgt->m_reset_delay -=
9774 MPTSAS_WATCH_RESET_DELAY_TICK;
9775 if (ptgt->m_reset_delay <= 0) {
9776 ptgt->m_reset_delay = 0;
9777 mptsas_set_throttle(mpt, ptgt,
9778 MAX_THROTTLE);
9779 restart++;
9780 } else {
9781 done = -1;
9782 }
9783 }
9784 }
9785
9786 if (restart > 0) {
9787 mptsas_restart_hba(mpt);
9788 }
9789 return (done);
9790 }
9791
9792 #ifdef MPTSAS_TEST
9793 static void
9794 mptsas_test_reset(mptsas_t *mpt, int target)
9795 {
9796 mptsas_target_t *ptgt = NULL;
9797
9798 if (mptsas_rtest == target) {
9799 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9800 mptsas_rtest = -1;
9801 }
9802 if (mptsas_rtest == -1) {
9803 NDBG22(("mptsas_test_reset success"));
9804 }
9805 }
9806 }
9807 #endif
9808
9809 /*
9810 * abort handling:
9811 *
9812 * Notes:
9813 * - if pkt is not NULL, abort just that command
9814 * - if pkt is NULL, abort all outstanding commands for target
9815 */
9816 static int
9817 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9818 {
9819 mptsas_t *mpt = ADDR2MPT(ap);
9820 int rval;
9821 mptsas_tgt_private_t *tgt_private;
9822 int target, lun;
9823
9824 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9825 tran_tgt_private;
9826 ASSERT(tgt_private != NULL);
9827 target = tgt_private->t_private->m_devhdl;
9828 lun = tgt_private->t_lun;
9829
9830 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9831
9832 mutex_enter(&mpt->m_mutex);
9833 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9834 mutex_exit(&mpt->m_mutex);
9835 return (rval);
9836 }
9837
9838 static int
9839 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9840 {
9841 mptsas_cmd_t *sp = NULL;
9842 mptsas_slots_t *slots = mpt->m_active;
9843 int rval = FALSE;
9844
9845 ASSERT(mutex_owned(&mpt->m_mutex));
9846
9847 /*
9848 * Abort the command pkt on the target/lun in ap. If pkt is
9849 * NULL, abort all outstanding commands on that target/lun.
9850 * If you can abort them, return 1, else return 0.
9851 * Each packet that's aborted should be sent back to the target
9852 * driver through the callback routine, with pkt_reason set to
9853 * CMD_ABORTED.
9854 *
9855 * abort cmd pkt on HBA hardware; clean out of outstanding
9856 * command lists, etc.
9857 */
9858 if (pkt != NULL) {
9859 /* abort the specified packet */
9860 sp = PKT2CMD(pkt);
9861
9862 #ifdef MPTSAS_FAULTINJECTION
9863 /* Command already on the list. */
9864 if (((pkt->pkt_flags & FLAG_PKT_TIMEOUT) != 0) &&
9865 (sp->cmd_active_expiration != 0)) {
9866 mptsas_fminj_move_cmd_to_doneq(mpt, sp, CMD_ABORTED,
9867 STAT_ABORTED);
9868 rval = TRUE;
9869 goto done;
9870 }
9871 #endif
9872
9873 if (sp->cmd_queued) {
9874 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9875 (void *)sp));
9876 mptsas_waitq_delete(mpt, sp);
9877 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9878 STAT_ABORTED);
9879 mptsas_doneq_add(mpt, sp);
9880 rval = TRUE;
9881 goto done;
9882 }
9883
9884 /*
9885 * Have mpt firmware abort this command
9886 */
9887
9888 if (slots->m_slot[sp->cmd_slot] != NULL) {
9889 rval = mptsas_ioc_task_management(mpt,
9890 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9891 lun, NULL, 0, 0);
9892
9893 /*
9894 * The transport layer expects only TRUE and FALSE.
9895 * Therefore, if mptsas_ioc_task_management returns
9896 * FAILED we will return FALSE.
9897 */
9898 if (rval == FAILED)
9899 rval = FALSE;
9900 goto done;
9901 }
9902 }
9903
9904 /*
9905 * If pkt is NULL then abort task set
9906 */
9907 rval = mptsas_ioc_task_management(mpt,
9908 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9909
9910 /*
9911 * The transport layer expects only TRUE and FALSE.
9912 * Therefore, if mptsas_ioc_task_management returns
9913 * FAILED we will return FALSE.
9914 */
9915 if (rval == FAILED)
9916 rval = FALSE;
9917
9918 #ifdef MPTSAS_TEST
9919 if (rval && mptsas_test_stop) {
9920 debug_enter("mptsas_do_scsi_abort");
9921 }
9922 #endif
9923
9924 done:
9925 mptsas_doneq_empty(mpt);
9926 return (rval);
9927 }
9928
9929 /*
9930 * capability handling:
9931 * (*tran_getcap). Get the capability named, and return its value.
9932 */
9933 static int
9934 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9935 {
9936 mptsas_t *mpt = ADDR2MPT(ap);
9937 int ckey;
9938 int rval = FALSE;
9939
9940 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9941 ap->a_target, cap, tgtonly));
9942
9943 mutex_enter(&mpt->m_mutex);
9944
9945 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9946 mutex_exit(&mpt->m_mutex);
9947 return (UNDEFINED);
9948 }
9949
9950 switch (ckey) {
9951 case SCSI_CAP_DMA_MAX:
9952 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9953 break;
9954 case SCSI_CAP_ARQ:
9955 rval = TRUE;
9956 break;
9957 case SCSI_CAP_MSG_OUT:
9958 case SCSI_CAP_PARITY:
9959 case SCSI_CAP_UNTAGGED_QING:
9960 rval = TRUE;
9961 break;
9962 case SCSI_CAP_TAGGED_QING:
9963 rval = TRUE;
9964 break;
9965 case SCSI_CAP_RESET_NOTIFICATION:
9966 rval = TRUE;
9967 break;
9968 case SCSI_CAP_LINKED_CMDS:
9969 rval = FALSE;
9970 break;
9971 case SCSI_CAP_QFULL_RETRIES:
9972 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9973 tran_tgt_private))->t_private->m_qfull_retries;
9974 break;
9975 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9976 rval = drv_hztousec(((mptsas_tgt_private_t *)
9977 (ap->a_hba_tran->tran_tgt_private))->
9978 t_private->m_qfull_retry_interval) / 1000;
9979 break;
9980 case SCSI_CAP_CDB_LEN:
9981 rval = CDB_GROUP4;
9982 break;
9983 case SCSI_CAP_INTERCONNECT_TYPE:
9984 rval = INTERCONNECT_SAS;
9985 break;
9986 case SCSI_CAP_TRAN_LAYER_RETRIES:
9987 if (mpt->m_ioc_capabilities &
9988 MPI2_IOCFACTS_CAPABILITY_TLR)
9989 rval = TRUE;
9990 else
9991 rval = FALSE;
9992 break;
9993 default:
9994 rval = UNDEFINED;
9995 break;
9996 }
9997
9998 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9999
10000 mutex_exit(&mpt->m_mutex);
10001 return (rval);
10002 }
10003
10004 /*
10005 * (*tran_setcap). Set the capability named to the value given.
10006 */
10007 static int
10008 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
10009 {
10010 mptsas_t *mpt = ADDR2MPT(ap);
10011 int ckey;
10012 int rval = FALSE;
10013
10014 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
10015 ap->a_target, cap, value, tgtonly));
10016
10017 if (!tgtonly) {
10018 return (rval);
10019 }
10020
10021 mutex_enter(&mpt->m_mutex);
10022
10023 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
10024 mutex_exit(&mpt->m_mutex);
10025 return (UNDEFINED);
10026 }
10027
10028 switch (ckey) {
10029 case SCSI_CAP_DMA_MAX:
10030 case SCSI_CAP_MSG_OUT:
10031 case SCSI_CAP_PARITY:
10032 case SCSI_CAP_INITIATOR_ID:
10033 case SCSI_CAP_LINKED_CMDS:
10034 case SCSI_CAP_UNTAGGED_QING:
10035 case SCSI_CAP_RESET_NOTIFICATION:
10036 /*
10037 * None of these are settable via
10038 * the capability interface.
10039 */
10040 break;
10041 case SCSI_CAP_ARQ:
10042 /*
10043 * We cannot turn off arq so return false if asked to
10044 */
10045 if (value) {
10046 rval = TRUE;
10047 } else {
10048 rval = FALSE;
10049 }
10050 break;
10051 case SCSI_CAP_TAGGED_QING:
10052 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
10053 (ap->a_hba_tran->tran_tgt_private))->t_private,
10054 MAX_THROTTLE);
10055 rval = TRUE;
10056 break;
10057 case SCSI_CAP_QFULL_RETRIES:
10058 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
10059 t_private->m_qfull_retries = (uchar_t)value;
10060 rval = TRUE;
10061 break;
10062 case SCSI_CAP_QFULL_RETRY_INTERVAL:
10063 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
10064 t_private->m_qfull_retry_interval =
10065 drv_usectohz(value * 1000);
10066 rval = TRUE;
10067 break;
10068 default:
10069 rval = UNDEFINED;
10070 break;
10071 }
10072 mutex_exit(&mpt->m_mutex);
10073 return (rval);
10074 }
10075
10076 /*
10077 * Utility routine for mptsas_ifsetcap/ifgetcap
10078 */
10079 /*ARGSUSED*/
10080 static int
10081 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
10082 {
10083 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
10084
10085 if (!cap)
10086 return (FALSE);
10087
10088 *cidxp = scsi_hba_lookup_capstr(cap);
10089 return (TRUE);
10090 }
10091
10092 static int
10093 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
10094 {
10095 mptsas_slots_t *old_active = mpt->m_active;
10096 mptsas_slots_t *new_active;
10097 size_t size;
10098
10099 /*
10100 * if there are active commands, then we cannot
10101 * change size of active slots array.
10102 */
10103 ASSERT(mpt->m_ncmds == 0);
10104
10105 size = MPTSAS_SLOTS_SIZE(mpt);
10106 new_active = kmem_zalloc(size, flag);
10107 if (new_active == NULL) {
10108 NDBG1(("new active alloc failed"));
10109 return (-1);
10110 }
10111 /*
10112 * Since SMID 0 is reserved and the TM slot is reserved, the
10113 * number of slots that can be used at any one time is
10114 * m_max_requests - 2.
10115 */
10116 new_active->m_n_normal = (mpt->m_max_requests - 2);
10117 new_active->m_size = size;
10118 new_active->m_rotor = 1;
10119 if (old_active)
10120 mptsas_free_active_slots(mpt);
10121 mpt->m_active = new_active;
10122
10123 return (0);
10124 }
10125
10126 static void
10127 mptsas_free_active_slots(mptsas_t *mpt)
10128 {
10129 mptsas_slots_t *active = mpt->m_active;
10130 size_t size;
10131
10132 if (active == NULL)
10133 return;
10134 size = active->m_size;
10135 kmem_free(active, size);
10136 mpt->m_active = NULL;
10137 }
10138
10139 /*
10140 * Error logging, printing, and debug print routines.
10141 */
10142 static char *mptsas_label = "mpt_sas";
10143
10144 /*PRINTFLIKE3*/
10145 void
10146 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
10147 {
10148 dev_info_t *dev;
10149 va_list ap;
10150
10151 if (mpt) {
10152 dev = mpt->m_dip;
10153 } else {
10154 dev = 0;
10155 }
10156
10157 mutex_enter(&mptsas_log_mutex);
10158
10159 va_start(ap, fmt);
10160 (void) vsprintf(mptsas_log_buf, fmt, ap);
10161 va_end(ap);
10162
10163 if (level == CE_CONT || level == CE_NOTE) {
10164 scsi_log(dev, mptsas_label, level, "!%s\n", mptsas_log_buf);
10165 } else {
10166 scsi_log(dev, mptsas_label, level, "!%s", mptsas_log_buf);
10167 }
10168
10169 mutex_exit(&mptsas_log_mutex);
10170 }
10171
10172 #ifdef MPTSAS_DEBUG
10173 /*
10174 * Use a circular buffer to log messages to private memory.
10175 * Increment idx atomically to minimize risk to miss lines.
10176 * It's fast and does not hold up the proceedings too much.
10177 */
10178 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10179 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10180 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10181 static uint32_t mptsas_dbglog_idx = 0;
10182
10183 /*PRINTFLIKE1*/
10184 void
10185 mptsas_debug_log(char *fmt, ...)
10186 {
10187 va_list ap;
10188 uint32_t idx;
10189
10190 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
10191 (mptsas_dbglog_linecnt - 1);
10192
10193 va_start(ap, fmt);
10194 (void) vsnprintf(mptsas_dbglog_bufs[idx],
10195 mptsas_dbglog_linelen, fmt, ap);
10196 va_end(ap);
10197 }
10198
10199 /*PRINTFLIKE1*/
10200 void
10201 mptsas_printf(char *fmt, ...)
10202 {
10203 dev_info_t *dev = 0;
10204 va_list ap;
10205
10206 mutex_enter(&mptsas_log_mutex);
10207
10208 va_start(ap, fmt);
10209 (void) vsprintf(mptsas_log_buf, fmt, ap);
10210 va_end(ap);
10211
10212 #ifdef PROM_PRINTF
10213 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
10214 #else
10215 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10216 #endif
10217 mutex_exit(&mptsas_log_mutex);
10218 }
10219 #endif
10220
10221 /*
10222 * timeout handling
10223 */
10224 static void
10225 mptsas_watch(void *arg)
10226 {
10227 #ifndef __lock_lint
10228 _NOTE(ARGUNUSED(arg))
10229 #endif
10230
10231 mptsas_t *mpt;
10232 uint32_t doorbell;
10233
10234 #ifdef MPTSAS_FAULTINJECTION
10235 struct mptsas_active_cmdq finj_cmds;
10236
10237 TAILQ_INIT(&finj_cmds);
10238 #endif
10239
10240 NDBG30(("mptsas_watch"));
10241
10242 rw_enter(&mptsas_global_rwlock, RW_READER);
10243 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10244
10245 mutex_enter(&mpt->m_mutex);
10246
10247 /* Skip device if not powered on */
10248 if (mpt->m_options & MPTSAS_OPT_PM) {
10249 if (mpt->m_power_level == PM_LEVEL_D0) {
10250 (void) pm_busy_component(mpt->m_dip, 0);
10251 mpt->m_busy = 1;
10252 } else {
10253 mutex_exit(&mpt->m_mutex);
10254 continue;
10255 }
10256 }
10257
10258 /*
10259 * Check if controller is in a FAULT state. If so, reset it.
10260 */
10261 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
10262 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
10263 doorbell &= MPI2_DOORBELL_DATA_MASK;
10264 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10265 "code: %04x", doorbell);
10266 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10267 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10268 mptsas_log(mpt, CE_WARN, "Reset failed"
10269 "after fault was detected");
10270 }
10271 }
10272
10273 /*
10274 * For now, always call mptsas_watchsubr.
10275 */
10276 mptsas_watchsubr(mpt);
10277
10278 if (mpt->m_options & MPTSAS_OPT_PM) {
10279 mpt->m_busy = 0;
10280 (void) pm_idle_component(mpt->m_dip, 0);
10281 }
10282
10283 #ifdef MPTSAS_FAULTINJECTION
10284 mptsas_fminj_watchsubr(mpt, &finj_cmds);
10285 #endif
10286
10287 mutex_exit(&mpt->m_mutex);
10288 }
10289 rw_exit(&mptsas_global_rwlock);
10290
10291 mutex_enter(&mptsas_global_mutex);
10292 if (mptsas_timeouts_enabled)
10293 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10294 mutex_exit(&mptsas_global_mutex);
10295
10296 #ifdef MPTSAS_FAULTINJECTION
10297 /* Complete all completed commands. */
10298 if (!TAILQ_EMPTY(&finj_cmds)) {
10299 mptsas_cmd_t *cmd;
10300
10301 while ((cmd = TAILQ_FIRST(&finj_cmds)) != NULL) {
10302 TAILQ_REMOVE(&finj_cmds, cmd, cmd_active_link);
10303 struct scsi_pkt *pkt = cmd->cmd_pkt;
10304
10305 if (pkt->pkt_comp != NULL) {
10306 (*pkt->pkt_comp)(pkt);
10307 }
10308 }
10309 }
10310 #endif
10311 }
10312
10313 static void
10314 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10315 {
10316 mptsas_cmd_t *cmd;
10317
10318 /*
10319 * If we were draining due to a qfull condition,
10320 * go back to full throttle.
10321 */
10322 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10323 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10324 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10325 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10326 mptsas_restart_hba(mpt);
10327 }
10328
10329 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10330 if (cmd == NULL)
10331 return;
10332
10333 if (cmd->cmd_active_expiration <= timestamp) {
10334 /*
10335 * Earliest command timeout expired. Drain throttle.
10336 */
10337 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10338
10339 /*
10340 * Check for remaining commands.
10341 */
10342 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
10343 if (cmd->cmd_active_expiration > timestamp) {
10344 /*
10345 * Wait for remaining commands to complete or
10346 * time out.
10347 */
10348 NDBG23(("command timed out, pending drain"));
10349 return;
10350 }
10351
10352 /*
10353 * All command timeouts expired.
10354 */
10355 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
10356 "expired with %d commands on target %d lun %d.",
10357 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
10358 ptgt->m_devhdl, Lun(cmd));
10359
10360 mptsas_cmd_timeout(mpt, ptgt);
10361 } else if (cmd->cmd_active_expiration <=
10362 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
10363 NDBG23(("pending timeout"));
10364 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10365 }
10366 }
10367
10368 static void
10369 mptsas_watchsubr(mptsas_t *mpt)
10370 {
10371 int i;
10372 mptsas_cmd_t *cmd;
10373 mptsas_target_t *ptgt = NULL;
10374 hrtime_t timestamp = gethrtime();
10375
10376 ASSERT(MUTEX_HELD(&mpt->m_mutex));
10377
10378 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
10379
10380 #ifdef MPTSAS_TEST
10381 if (mptsas_enable_untagged) {
10382 mptsas_test_untagged++;
10383 }
10384 #endif
10385
10386 /*
10387 * Check for commands stuck in active slot
10388 * Account for TM requests, which use the last SMID.
10389 */
10390 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
10391 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
10392 if (cmd->cmd_active_expiration <= timestamp) {
10393 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
10394 /*
10395 * There seems to be a command stuck
10396 * in the active slot. Drain throttle.
10397 */
10398 mptsas_set_throttle(mpt,
10399 cmd->cmd_tgt_addr,
10400 DRAIN_THROTTLE);
10401 } else if (cmd->cmd_flags &
10402 (CFLAG_PASSTHRU | CFLAG_CONFIG |
10403 CFLAG_FW_DIAG)) {
10404 /*
10405 * passthrough command timeout
10406 */
10407 cmd->cmd_flags |= (CFLAG_FINISHED |
10408 CFLAG_TIMEOUT);
10409 cv_broadcast(&mpt->m_passthru_cv);
10410 cv_broadcast(&mpt->m_config_cv);
10411 cv_broadcast(&mpt->m_fw_diag_cv);
10412 }
10413 }
10414 }
10415 }
10416
10417 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10418 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10419 mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10420 }
10421
10422 for (ptgt = refhash_first(mpt->m_tmp_targets); ptgt != NULL;
10423 ptgt = refhash_next(mpt->m_tmp_targets, ptgt)) {
10424 mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10425 }
10426 }
10427
10428 /*
10429 * timeout recovery
10430 */
10431 static void
10432 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
10433 {
10434 uint16_t devhdl;
10435 uint64_t sas_wwn;
10436 uint8_t phy;
10437 char wwn_str[MPTSAS_WWN_STRLEN];
10438
10439 devhdl = ptgt->m_devhdl;
10440 sas_wwn = ptgt->m_addr.mta_wwn;
10441 phy = ptgt->m_phynum;
10442 if (sas_wwn == 0) {
10443 (void) sprintf(wwn_str, "p%x", phy);
10444 } else {
10445 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
10446 }
10447
10448 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10449 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10450 "target %d %s, enclosure %u", devhdl, wwn_str,
10451 ptgt->m_enclosure);
10452
10453 /*
10454 * Abort all outstanding commands on the device.
10455 */
10456 NDBG29(("mptsas_cmd_timeout: device reset"));
10457 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10458 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10459 "recovery failed!", devhdl);
10460 }
10461 }
10462
10463 /*
10464 * Device / Hotplug control
10465 */
10466 static int
10467 mptsas_scsi_quiesce(dev_info_t *dip)
10468 {
10469 mptsas_t *mpt;
10470 scsi_hba_tran_t *tran;
10471
10472 tran = ddi_get_driver_private(dip);
10473 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10474 return (-1);
10475
10476 return (mptsas_quiesce_bus(mpt));
10477 }
10478
10479 static int
10480 mptsas_scsi_unquiesce(dev_info_t *dip)
10481 {
10482 mptsas_t *mpt;
10483 scsi_hba_tran_t *tran;
10484
10485 tran = ddi_get_driver_private(dip);
10486 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10487 return (-1);
10488
10489 return (mptsas_unquiesce_bus(mpt));
10490 }
10491
10492 static int
10493 mptsas_quiesce_bus(mptsas_t *mpt)
10494 {
10495 mptsas_target_t *ptgt = NULL;
10496
10497 NDBG28(("mptsas_quiesce_bus"));
10498 mutex_enter(&mpt->m_mutex);
10499
10500 /* Set all the throttles to zero */
10501 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10502 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10503 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10504 }
10505
10506 /* If there are any outstanding commands in the queue */
10507 if (mpt->m_ncmds) {
10508 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10509 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10510 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10511 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10512 /*
10513 * Quiesce has been interrupted
10514 */
10515 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10516 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10517 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10518 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10519 }
10520 mptsas_restart_hba(mpt);
10521 if (mpt->m_quiesce_timeid != 0) {
10522 timeout_id_t tid = mpt->m_quiesce_timeid;
10523 mpt->m_quiesce_timeid = 0;
10524 mutex_exit(&mpt->m_mutex);
10525 (void) untimeout(tid);
10526 return (-1);
10527 }
10528 mutex_exit(&mpt->m_mutex);
10529 return (-1);
10530 } else {
10531 /* Bus has been quiesced */
10532 ASSERT(mpt->m_quiesce_timeid == 0);
10533 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10534 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10535 mutex_exit(&mpt->m_mutex);
10536 return (0);
10537 }
10538 }
10539 /* Bus was not busy - QUIESCED */
10540 mutex_exit(&mpt->m_mutex);
10541
10542 return (0);
10543 }
10544
10545 static int
10546 mptsas_unquiesce_bus(mptsas_t *mpt)
10547 {
10548 mptsas_target_t *ptgt = NULL;
10549
10550 NDBG28(("mptsas_unquiesce_bus"));
10551 mutex_enter(&mpt->m_mutex);
10552 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10553 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10554 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10555 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10556 }
10557 mptsas_restart_hba(mpt);
10558 mutex_exit(&mpt->m_mutex);
10559 return (0);
10560 }
10561
10562 static void
10563 mptsas_ncmds_checkdrain(void *arg)
10564 {
10565 mptsas_t *mpt = arg;
10566 mptsas_target_t *ptgt = NULL;
10567
10568 mutex_enter(&mpt->m_mutex);
10569 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10570 mpt->m_quiesce_timeid = 0;
10571 if (mpt->m_ncmds == 0) {
10572 /* Command queue has been drained */
10573 cv_signal(&mpt->m_cv);
10574 } else {
10575 /*
10576 * The throttle may have been reset because
10577 * of a SCSI bus reset
10578 */
10579 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10580 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10581 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10582 }
10583
10584 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10585 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10586 drv_usectohz(1000000)));
10587 }
10588 }
10589 mutex_exit(&mpt->m_mutex);
10590 }
10591
10592 /*ARGSUSED*/
10593 static void
10594 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10595 {
10596 int i;
10597 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10598 char buf[128];
10599
10600 buf[0] = '\0';
10601 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10602 Tgt(cmd), Lun(cmd)));
10603 (void) sprintf(&buf[0], "\tcdb=[");
10604 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10605 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10606 }
10607 (void) sprintf(&buf[strlen(buf)], " ]");
10608 NDBG25(("?%s\n", buf));
10609 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10610 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10611 cmd->cmd_pkt->pkt_state));
10612 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10613 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10614 }
10615
10616 static void
10617 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10618 pMpi2SGESimple64_t sgep)
10619 {
10620 uint32_t sge_flags;
10621 uint32_t data_size, dataout_size;
10622 ddi_dma_cookie_t data_cookie;
10623 ddi_dma_cookie_t dataout_cookie;
10624
10625 data_size = pt->data_size;
10626 dataout_size = pt->dataout_size;
10627 data_cookie = pt->data_cookie;
10628 dataout_cookie = pt->dataout_cookie;
10629
10630 if (dataout_size) {
10631 sge_flags = dataout_size |
10632 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10633 MPI2_SGE_FLAGS_END_OF_BUFFER |
10634 MPI2_SGE_FLAGS_HOST_TO_IOC |
10635 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10636 MPI2_SGE_FLAGS_SHIFT);
10637 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10638 ddi_put32(acc_hdl, &sgep->Address.Low,
10639 (uint32_t)(dataout_cookie.dmac_laddress &
10640 0xffffffffull));
10641 ddi_put32(acc_hdl, &sgep->Address.High,
10642 (uint32_t)(dataout_cookie.dmac_laddress
10643 >> 32));
10644 sgep++;
10645 }
10646 sge_flags = data_size;
10647 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10648 MPI2_SGE_FLAGS_LAST_ELEMENT |
10649 MPI2_SGE_FLAGS_END_OF_BUFFER |
10650 MPI2_SGE_FLAGS_END_OF_LIST |
10651 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10652 MPI2_SGE_FLAGS_SHIFT);
10653 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10654 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10655 MPI2_SGE_FLAGS_SHIFT);
10656 } else {
10657 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10658 MPI2_SGE_FLAGS_SHIFT);
10659 }
10660 ddi_put32(acc_hdl, &sgep->FlagsLength,
10661 sge_flags);
10662 ddi_put32(acc_hdl, &sgep->Address.Low,
10663 (uint32_t)(data_cookie.dmac_laddress &
10664 0xffffffffull));
10665 ddi_put32(acc_hdl, &sgep->Address.High,
10666 (uint32_t)(data_cookie.dmac_laddress >> 32));
10667 }
10668
10669 static void
10670 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10671 pMpi2IeeeSgeSimple64_t ieeesgep)
10672 {
10673 uint8_t sge_flags;
10674 uint32_t data_size, dataout_size;
10675 ddi_dma_cookie_t data_cookie;
10676 ddi_dma_cookie_t dataout_cookie;
10677
10678 data_size = pt->data_size;
10679 dataout_size = pt->dataout_size;
10680 data_cookie = pt->data_cookie;
10681 dataout_cookie = pt->dataout_cookie;
10682
10683 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10684 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10685 if (dataout_size) {
10686 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10687 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10688 (uint32_t)(dataout_cookie.dmac_laddress &
10689 0xffffffffull));
10690 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10691 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10692 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10693 ieeesgep++;
10694 }
10695 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10696 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10697 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10698 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10699 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10700 (uint32_t)(data_cookie.dmac_laddress >> 32));
10701 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10702 }
10703
10704 static void
10705 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10706 {
10707 caddr_t memp;
10708 pMPI2RequestHeader_t request_hdrp;
10709 struct scsi_pkt *pkt = cmd->cmd_pkt;
10710 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10711 uint32_t request_size;
10712 uint32_t i;
10713 uint64_t request_desc = 0;
10714 uint8_t desc_type;
10715 uint16_t SMID;
10716 uint8_t *request, function;
10717 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10718 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10719
10720 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10721
10722 request = pt->request;
10723 request_size = pt->request_size;
10724
10725 SMID = cmd->cmd_slot;
10726
10727 /*
10728 * Store the passthrough message in memory location
10729 * corresponding to our slot number
10730 */
10731 memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10732 request_hdrp = (pMPI2RequestHeader_t)memp;
10733 bzero(memp, mpt->m_req_frame_size);
10734
10735 for (i = 0; i < request_size; i++) {
10736 bcopy(request + i, memp + i, 1);
10737 }
10738
10739 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10740 "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10741 request_hdrp->MsgFlags, request_size,
10742 pt->data_size, pt->dataout_size, SMID));
10743
10744 /*
10745 * Add an SGE, even if the length is zero.
10746 */
10747 if (mpt->m_MPI25 && pt->simple == 0) {
10748 mptsas_passthru_ieee_sge(acc_hdl, pt,
10749 (pMpi2IeeeSgeSimple64_t)
10750 ((uint8_t *)request_hdrp + pt->sgl_offset));
10751 } else {
10752 mptsas_passthru_sge(acc_hdl, pt,
10753 (pMpi2SGESimple64_t)
10754 ((uint8_t *)request_hdrp + pt->sgl_offset));
10755 }
10756
10757 function = request_hdrp->Function;
10758 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10759 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10760 pMpi2SCSIIORequest_t scsi_io_req;
10761 caddr_t arsbuf;
10762 uint8_t ars_size;
10763 uint32_t ars_dmaaddrlow;
10764
10765 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10766 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10767
10768 if (cmd->cmd_extrqslen != 0) {
10769 /*
10770 * Mapping of the buffer was done in
10771 * mptsas_do_passthru().
10772 * Calculate the DMA address with the same offset.
10773 */
10774 arsbuf = cmd->cmd_arq_buf;
10775 ars_size = cmd->cmd_extrqslen;
10776 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10777 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10778 0xffffffffu;
10779 } else {
10780 arsbuf = mpt->m_req_sense +
10781 (mpt->m_req_sense_size * (SMID-1));
10782 cmd->cmd_arq_buf = arsbuf;
10783 ars_size = mpt->m_req_sense_size;
10784 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10785 (mpt->m_req_sense_size * (SMID-1))) &
10786 0xffffffffu;
10787 }
10788 bzero(arsbuf, ars_size);
10789
10790 ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10791 ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10792 ars_dmaaddrlow);
10793
10794 /*
10795 * Put SGE for data and data_out buffer at the end of
10796 * scsi_io_request message header.(64 bytes in total)
10797 * Set SGLOffset0 value
10798 */
10799 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10800 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10801
10802 /*
10803 * Setup descriptor info. RAID passthrough must use the
10804 * default request descriptor which is already set, so if this
10805 * is a SCSI IO request, change the descriptor to SCSI IO.
10806 */
10807 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10808 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10809 request_desc = ((uint64_t)ddi_get16(acc_hdl,
10810 &scsi_io_req->DevHandle) << 48);
10811 }
10812 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10813 DDI_DMA_SYNC_FORDEV);
10814 }
10815
10816 /*
10817 * We must wait till the message has been completed before
10818 * beginning the next message so we wait for this one to
10819 * finish.
10820 */
10821 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10822 request_desc |= (SMID << 16) + desc_type;
10823 cmd->cmd_rfm = NULL;
10824 MPTSAS_START_CMD(mpt, request_desc);
10825 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10826 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10827 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10828 }
10829 }
10830
10831 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10832 static mptsas_pre_f mpi_pre_ioc_facts;
10833 static mptsas_pre_f mpi_pre_port_facts;
10834 static mptsas_pre_f mpi_pre_fw_download;
10835 static mptsas_pre_f mpi_pre_fw_25_download;
10836 static mptsas_pre_f mpi_pre_fw_upload;
10837 static mptsas_pre_f mpi_pre_fw_25_upload;
10838 static mptsas_pre_f mpi_pre_sata_passthrough;
10839 static mptsas_pre_f mpi_pre_smp_passthrough;
10840 static mptsas_pre_f mpi_pre_config;
10841 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10842 static mptsas_pre_f mpi_pre_scsi_io_req;
10843
10844 /*
10845 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10846 */
10847 static void
10848 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10849 {
10850 pMpi2FWDownloadTCSGE_t tcsge;
10851 pMpi2FWDownloadRequest req;
10852
10853 /*
10854 * If SAS3, call separate function.
10855 */
10856 if (mpt->m_MPI25) {
10857 mpi_pre_fw_25_download(mpt, pt);
10858 return;
10859 }
10860
10861 /*
10862 * User requests should come in with the Transaction
10863 * context element where the SGL will go. Putting the
10864 * SGL after that seems to work, but don't really know
10865 * why. Other drivers tend to create an extra SGL and
10866 * refer to the TCE through that.
10867 */
10868 req = (pMpi2FWDownloadRequest)pt->request;
10869 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10870 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10871 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10872 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10873 }
10874
10875 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10876 sizeof (*tcsge);
10877 if (pt->request_size != pt->sgl_offset) {
10878 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10879 "0x%x, should be 0x%x, dataoutsz 0x%x",
10880 (int)pt->request_size, (int)pt->sgl_offset,
10881 (int)pt->dataout_size));
10882 }
10883 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10884 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10885 "0x%x, should be 0x%x", pt->data_size,
10886 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10887 }
10888 }
10889
10890 /*
10891 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10892 */
10893 static void
10894 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10895 {
10896 pMpi2FWDownloadTCSGE_t tcsge;
10897 pMpi2FWDownloadRequest req2;
10898 pMpi25FWDownloadRequest req25;
10899
10900 /*
10901 * User requests should come in with the Transaction
10902 * context element where the SGL will go. The new firmware
10903 * Doesn't use TCE and has space in the main request for
10904 * this information. So move to the right place.
10905 */
10906 req2 = (pMpi2FWDownloadRequest)pt->request;
10907 req25 = (pMpi25FWDownloadRequest)pt->request;
10908 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10909 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10910 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10911 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10912 }
10913 req25->ImageOffset = tcsge->ImageOffset;
10914 req25->ImageSize = tcsge->ImageSize;
10915
10916 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10917 if (pt->request_size != pt->sgl_offset) {
10918 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10919 "0x%x, should be 0x%x, dataoutsz 0x%x",
10920 pt->request_size, pt->sgl_offset,
10921 pt->dataout_size));
10922 }
10923 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10924 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10925 "0x%x, should be 0x%x", pt->data_size,
10926 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10927 }
10928 }
10929
10930 /*
10931 * Prepare the pt for a SAS2 FW_UPLOAD request.
10932 */
10933 static void
10934 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10935 {
10936 pMpi2FWUploadTCSGE_t tcsge;
10937 pMpi2FWUploadRequest_t req;
10938
10939 /*
10940 * If SAS3, call separate function.
10941 */
10942 if (mpt->m_MPI25) {
10943 mpi_pre_fw_25_upload(mpt, pt);
10944 return;
10945 }
10946
10947 /*
10948 * User requests should come in with the Transaction
10949 * context element where the SGL will go. Putting the
10950 * SGL after that seems to work, but don't really know
10951 * why. Other drivers tend to create an extra SGL and
10952 * refer to the TCE through that.
10953 */
10954 req = (pMpi2FWUploadRequest_t)pt->request;
10955 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10956 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10957 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10958 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10959 }
10960
10961 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10962 sizeof (*tcsge);
10963 if (pt->request_size != pt->sgl_offset) {
10964 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10965 "0x%x, should be 0x%x, dataoutsz 0x%x",
10966 pt->request_size, pt->sgl_offset,
10967 pt->dataout_size));
10968 }
10969 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10970 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10971 "0x%x, should be 0x%x", pt->data_size,
10972 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10973 }
10974 }
10975
10976 /*
10977 * Prepare the pt a SAS3 FW_UPLOAD request.
10978 */
10979 static void
10980 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10981 {
10982 pMpi2FWUploadTCSGE_t tcsge;
10983 pMpi2FWUploadRequest_t req2;
10984 pMpi25FWUploadRequest_t req25;
10985
10986 /*
10987 * User requests should come in with the Transaction
10988 * context element where the SGL will go. The new firmware
10989 * Doesn't use TCE and has space in the main request for
10990 * this information. So move to the right place.
10991 */
10992 req2 = (pMpi2FWUploadRequest_t)pt->request;
10993 req25 = (pMpi25FWUploadRequest_t)pt->request;
10994 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10995 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10996 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10997 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10998 }
10999 req25->ImageOffset = tcsge->ImageOffset;
11000 req25->ImageSize = tcsge->ImageSize;
11001
11002 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
11003 if (pt->request_size != pt->sgl_offset) {
11004 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
11005 "0x%x, should be 0x%x, dataoutsz 0x%x",
11006 pt->request_size, pt->sgl_offset,
11007 pt->dataout_size));
11008 }
11009 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
11010 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
11011 "0x%x, should be 0x%x", pt->data_size,
11012 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
11013 }
11014 }
11015
11016 /*
11017 * Prepare the pt for an IOC_FACTS request.
11018 */
11019 static void
11020 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
11021 {
11022 #ifndef __lock_lint
11023 _NOTE(ARGUNUSED(mpt))
11024 #endif
11025 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST)) {
11026 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
11027 "0x%x, should be 0x%x, dataoutsz 0x%x",
11028 pt->request_size,
11029 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
11030 pt->dataout_size));
11031 }
11032 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY)) {
11033 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
11034 "0x%x, should be 0x%x", pt->data_size,
11035 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
11036 }
11037 pt->sgl_offset = (uint16_t)pt->request_size;
11038 }
11039
11040 /*
11041 * Prepare the pt for a PORT_FACTS request.
11042 */
11043 static void
11044 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
11045 {
11046 #ifndef __lock_lint
11047 _NOTE(ARGUNUSED(mpt))
11048 #endif
11049 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST)) {
11050 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
11051 "0x%x, should be 0x%x, dataoutsz 0x%x",
11052 pt->request_size,
11053 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
11054 pt->dataout_size));
11055 }
11056 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY)) {
11057 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
11058 "0x%x, should be 0x%x", pt->data_size,
11059 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
11060 }
11061 pt->sgl_offset = (uint16_t)pt->request_size;
11062 }
11063
11064 /*
11065 * Prepare pt for a SATA_PASSTHROUGH request.
11066 */
11067 static void
11068 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
11069 {
11070 #ifndef __lock_lint
11071 _NOTE(ARGUNUSED(mpt))
11072 #endif
11073 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
11074 if (pt->request_size != pt->sgl_offset) {
11075 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
11076 "0x%x, should be 0x%x, dataoutsz 0x%x",
11077 pt->request_size, pt->sgl_offset,
11078 pt->dataout_size));
11079 }
11080 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY)) {
11081 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
11082 "0x%x, should be 0x%x", pt->data_size,
11083 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
11084 }
11085 }
11086
11087 static void
11088 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
11089 {
11090 #ifndef __lock_lint
11091 _NOTE(ARGUNUSED(mpt))
11092 #endif
11093 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
11094 if (pt->request_size != pt->sgl_offset) {
11095 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
11096 "0x%x, should be 0x%x, dataoutsz 0x%x",
11097 pt->request_size, pt->sgl_offset,
11098 pt->dataout_size));
11099 }
11100 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY)) {
11101 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
11102 "0x%x, should be 0x%x", pt->data_size,
11103 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
11104 }
11105 }
11106
11107 /*
11108 * Prepare pt for a CONFIG request.
11109 */
11110 static void
11111 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
11112 {
11113 #ifndef __lock_lint
11114 _NOTE(ARGUNUSED(mpt))
11115 #endif
11116 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
11117 if (pt->request_size != pt->sgl_offset) {
11118 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11119 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11120 pt->sgl_offset, pt->dataout_size));
11121 }
11122 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY)) {
11123 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11124 "should be 0x%x", pt->data_size,
11125 (int)sizeof (MPI2_CONFIG_REPLY)));
11126 }
11127 pt->simple = 1;
11128 }
11129
11130 /*
11131 * Prepare pt for a SCSI_IO_REQ request.
11132 */
11133 static void
11134 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
11135 {
11136 #ifndef __lock_lint
11137 _NOTE(ARGUNUSED(mpt))
11138 #endif
11139 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
11140 if (pt->request_size != pt->sgl_offset) {
11141 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11142 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11143 pt->sgl_offset,
11144 pt->dataout_size));
11145 }
11146 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY)) {
11147 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11148 "should be 0x%x", pt->data_size,
11149 (int)sizeof (MPI2_SCSI_IO_REPLY)));
11150 }
11151 }
11152
11153 /*
11154 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
11155 */
11156 static void
11157 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
11158 {
11159 #ifndef __lock_lint
11160 _NOTE(ARGUNUSED(mpt))
11161 #endif
11162 pt->sgl_offset = (uint16_t)pt->request_size;
11163 }
11164
11165 /*
11166 * A set of functions to prepare an mptsas_cmd for the various
11167 * supported requests.
11168 */
11169 static struct mptsas_func {
11170 U8 Function;
11171 char *Name;
11172 mptsas_pre_f *f_pre;
11173 } mptsas_func_list[] = {
11174 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
11175 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
11176 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
11177 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
11178 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
11179 mpi_pre_sata_passthrough },
11180 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
11181 mpi_pre_smp_passthrough},
11182 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
11183 mpi_pre_scsi_io_req},
11184 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
11185 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
11186 mpi_pre_sas_io_unit_control },
11187 { 0xFF, NULL, NULL } /* list end */
11188 };
11189
11190 static void
11191 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
11192 {
11193 pMPI2RequestHeader_t hdr;
11194 struct mptsas_func *f;
11195
11196 hdr = (pMPI2RequestHeader_t)pt->request;
11197
11198 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
11199 if (hdr->Function == f->Function) {
11200 f->f_pre(mpt, pt);
11201 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
11202 " sgl_offset 0x%x", f->Name,
11203 pt->sgl_offset));
11204 return;
11205 }
11206 }
11207 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
11208 " returning req_size 0x%x for sgl_offset",
11209 hdr->Function, pt->request_size));
11210 pt->sgl_offset = (uint16_t)pt->request_size;
11211 }
11212
11213
11214 static int
11215 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
11216 uint8_t *data, uint32_t request_size, uint32_t reply_size,
11217 uint32_t data_size, uint32_t direction, uint8_t *dataout,
11218 uint32_t dataout_size, short timeout, int mode)
11219 {
11220 mptsas_pt_request_t pt;
11221 mptsas_dma_alloc_state_t data_dma_state;
11222 mptsas_dma_alloc_state_t dataout_dma_state;
11223 caddr_t memp;
11224 mptsas_cmd_t *cmd = NULL;
11225 struct scsi_pkt *pkt;
11226 uint32_t reply_len = 0, sense_len = 0;
11227 pMPI2RequestHeader_t request_hdrp;
11228 pMPI2RequestHeader_t request_msg;
11229 pMPI2DefaultReply_t reply_msg;
11230 Mpi2SCSIIOReply_t rep_msg;
11231 int rvalue;
11232 int i, status = 0, pt_flags = 0, rv = 0;
11233 uint8_t function;
11234
11235 ASSERT(mutex_owned(&mpt->m_mutex));
11236
11237 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
11238 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
11239 request_msg = kmem_zalloc(request_size, KM_SLEEP);
11240
11241 mutex_exit(&mpt->m_mutex);
11242 /*
11243 * copy in the request buffer since it could be used by
11244 * another thread when the pt request into waitq
11245 */
11246 if (ddi_copyin(request, request_msg, request_size, mode)) {
11247 mutex_enter(&mpt->m_mutex);
11248 status = EFAULT;
11249 mptsas_log(mpt, CE_WARN, "failed to copy request data");
11250 goto out;
11251 }
11252 NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
11253 mode, request_size, request_msg->Function));
11254 mutex_enter(&mpt->m_mutex);
11255
11256 function = request_msg->Function;
11257 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
11258 pMpi2SCSITaskManagementRequest_t task;
11259 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
11260 mptsas_setup_bus_reset_delay(mpt);
11261 rv = mptsas_ioc_task_management(mpt, task->TaskType,
11262 task->DevHandle, (int)task->LUN[1], reply, reply_size,
11263 mode);
11264
11265 if (rv != TRUE) {
11266 status = EIO;
11267 mptsas_log(mpt, CE_WARN, "task management failed");
11268 }
11269 goto out;
11270 }
11271
11272 if (data_size != 0) {
11273 data_dma_state.size = data_size;
11274 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
11275 status = ENOMEM;
11276 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11277 "resource");
11278 goto out;
11279 }
11280 pt_flags |= MPTSAS_DATA_ALLOCATED;
11281 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11282 mutex_exit(&mpt->m_mutex);
11283 for (i = 0; i < data_size; i++) {
11284 if (ddi_copyin(data + i, (uint8_t *)
11285 data_dma_state.memp + i, 1, mode)) {
11286 mutex_enter(&mpt->m_mutex);
11287 status = EFAULT;
11288 mptsas_log(mpt, CE_WARN, "failed to "
11289 "copy read data");
11290 goto out;
11291 }
11292 }
11293 mutex_enter(&mpt->m_mutex);
11294 }
11295 } else {
11296 bzero(&data_dma_state, sizeof (data_dma_state));
11297 }
11298
11299 if (dataout_size != 0) {
11300 dataout_dma_state.size = dataout_size;
11301 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
11302 status = ENOMEM;
11303 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11304 "resource");
11305 goto out;
11306 }
11307 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
11308 mutex_exit(&mpt->m_mutex);
11309 for (i = 0; i < dataout_size; i++) {
11310 if (ddi_copyin(dataout + i, (uint8_t *)
11311 dataout_dma_state.memp + i, 1, mode)) {
11312 mutex_enter(&mpt->m_mutex);
11313 mptsas_log(mpt, CE_WARN, "failed to copy out"
11314 " data");
11315 status = EFAULT;
11316 goto out;
11317 }
11318 }
11319 mutex_enter(&mpt->m_mutex);
11320 } else {
11321 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
11322 }
11323
11324 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11325 status = EAGAIN;
11326 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
11327 goto out;
11328 }
11329 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
11330
11331 bzero((caddr_t)cmd, sizeof (*cmd));
11332 bzero((caddr_t)pkt, scsi_pkt_size());
11333 bzero((caddr_t)&pt, sizeof (pt));
11334
11335 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11336
11337 pt.request = (uint8_t *)request_msg;
11338 pt.direction = direction;
11339 pt.simple = 0;
11340 pt.request_size = request_size;
11341 pt.data_size = data_size;
11342 pt.dataout_size = dataout_size;
11343 pt.data_cookie = data_dma_state.cookie;
11344 pt.dataout_cookie = dataout_dma_state.cookie;
11345 mptsas_prep_sgl_offset(mpt, &pt);
11346
11347 /*
11348 * Form a blank cmd/pkt to store the acknowledgement message
11349 */
11350 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
11351 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
11352 pkt->pkt_ha_private = (opaque_t)&pt;
11353 pkt->pkt_flags = FLAG_HEAD;
11354 pkt->pkt_time = timeout;
11355 pkt->pkt_start = gethrtime();
11356 cmd->cmd_pkt = pkt;
11357 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
11358
11359 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11360 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11361 uint8_t com, cdb_group_id;
11362 boolean_t ret;
11363
11364 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11365 com = pkt->pkt_cdbp[0];
11366 cdb_group_id = CDB_GROUPID(com);
11367 switch (cdb_group_id) {
11368 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11369 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11370 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11371 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11372 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11373 default:
11374 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11375 "CDBGROUP 0x%x requested!", cdb_group_id));
11376 break;
11377 }
11378
11379 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11380 sense_len = reply_size - reply_len;
11381 ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
11382 VERIFY(ret == B_TRUE);
11383 } else {
11384 reply_len = reply_size;
11385 sense_len = 0;
11386 }
11387
11388 NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
11389 "snslen 0x%x",
11390 (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
11391 data_size, dataout_size, reply_len, sense_len));
11392
11393 /*
11394 * Save the command in a slot
11395 */
11396 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11397 /*
11398 * Once passthru command get slot, set cmd_flags
11399 * CFLAG_PREPARED.
11400 */
11401 cmd->cmd_flags |= CFLAG_PREPARED;
11402 mptsas_start_passthru(mpt, cmd);
11403 } else {
11404 mptsas_waitq_add(mpt, cmd);
11405 }
11406
11407 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11408 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
11409 }
11410
11411 NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
11412 "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
11413 pkt->pkt_reason));
11414
11415 if (cmd->cmd_flags & CFLAG_PREPARED) {
11416 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
11417 cmd->cmd_slot);
11418 request_hdrp = (pMPI2RequestHeader_t)memp;
11419 }
11420
11421 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11422 status = ETIMEDOUT;
11423 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
11424 pt_flags |= MPTSAS_CMD_TIMEOUT;
11425 goto out;
11426 }
11427
11428 if (cmd->cmd_rfm) {
11429 /*
11430 * cmd_rfm is zero means the command reply is a CONTEXT
11431 * reply and no PCI Write to post the free reply SMFA
11432 * because no reply message frame is used.
11433 * cmd_rfm is non-zero means the reply is a ADDRESS
11434 * reply and reply message frame is used.
11435 */
11436 pt_flags |= MPTSAS_ADDRESS_REPLY;
11437 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11438 DDI_DMA_SYNC_FORCPU);
11439 reply_msg = (pMPI2DefaultReply_t)
11440 (mpt->m_reply_frame + (cmd->cmd_rfm -
11441 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11442 }
11443
11444 mptsas_fma_check(mpt, cmd);
11445 if (pkt->pkt_reason == CMD_TRAN_ERR) {
11446 status = EAGAIN;
11447 mptsas_log(mpt, CE_WARN, "passthru fma error");
11448 goto out;
11449 }
11450 if (pkt->pkt_reason == CMD_RESET) {
11451 status = EAGAIN;
11452 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
11453 goto out;
11454 }
11455
11456 if (pkt->pkt_reason == CMD_INCOMPLETE) {
11457 status = EIO;
11458 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
11459 goto out;
11460 }
11461
11462 mutex_exit(&mpt->m_mutex);
11463 if (cmd->cmd_flags & CFLAG_PREPARED) {
11464 function = request_hdrp->Function;
11465 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11466 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11467 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11468 sense_len = cmd->cmd_extrqslen ?
11469 min(sense_len, cmd->cmd_extrqslen) :
11470 min(sense_len, cmd->cmd_rqslen);
11471 } else {
11472 reply_len = reply_size;
11473 sense_len = 0;
11474 }
11475
11476 for (i = 0; i < reply_len; i++) {
11477 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11478 mode)) {
11479 mutex_enter(&mpt->m_mutex);
11480 status = EFAULT;
11481 mptsas_log(mpt, CE_WARN, "failed to copy out "
11482 "reply data");
11483 goto out;
11484 }
11485 }
11486 for (i = 0; i < sense_len; i++) {
11487 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11488 reply + reply_len + i, 1, mode)) {
11489 mutex_enter(&mpt->m_mutex);
11490 status = EFAULT;
11491 mptsas_log(mpt, CE_WARN, "failed to copy out "
11492 "sense data");
11493 goto out;
11494 }
11495 }
11496 }
11497
11498 if (data_size) {
11499 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11500 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11501 DDI_DMA_SYNC_FORCPU);
11502 for (i = 0; i < data_size; i++) {
11503 if (ddi_copyout((uint8_t *)(
11504 data_dma_state.memp + i), data + i, 1,
11505 mode)) {
11506 mutex_enter(&mpt->m_mutex);
11507 status = EFAULT;
11508 mptsas_log(mpt, CE_WARN, "failed to "
11509 "copy out the reply data");
11510 goto out;
11511 }
11512 }
11513 }
11514 }
11515 mutex_enter(&mpt->m_mutex);
11516 out:
11517 /*
11518 * Put the reply frame back on the free queue, increment the free
11519 * index, and write the new index to the free index register. But only
11520 * if this reply is an ADDRESS reply.
11521 */
11522 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11523 ddi_put32(mpt->m_acc_free_queue_hdl,
11524 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11525 cmd->cmd_rfm);
11526 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11527 DDI_DMA_SYNC_FORDEV);
11528 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11529 mpt->m_free_index = 0;
11530 }
11531 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11532 mpt->m_free_index);
11533 }
11534 if (cmd) {
11535 if (cmd->cmd_extrqslen != 0) {
11536 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11537 cmd->cmd_extrqsidx + 1);
11538 }
11539 if (cmd->cmd_flags & CFLAG_PREPARED) {
11540 mptsas_remove_cmd(mpt, cmd);
11541 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11542 }
11543 }
11544 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11545 mptsas_return_to_pool(mpt, cmd);
11546 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11547 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11548 DDI_SUCCESS) {
11549 ddi_fm_service_impact(mpt->m_dip,
11550 DDI_SERVICE_UNAFFECTED);
11551 status = EFAULT;
11552 }
11553 mptsas_dma_free(&data_dma_state);
11554 }
11555 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11556 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11557 DDI_SUCCESS) {
11558 ddi_fm_service_impact(mpt->m_dip,
11559 DDI_SERVICE_UNAFFECTED);
11560 status = EFAULT;
11561 }
11562 mptsas_dma_free(&dataout_dma_state);
11563 }
11564 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11565 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11566 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11567 }
11568 }
11569 if (request_msg)
11570 kmem_free(request_msg, request_size);
11571 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11572
11573 return (status);
11574 }
11575
11576 static int
11577 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11578 {
11579 /*
11580 * If timeout is 0, set timeout to default of 60 seconds.
11581 */
11582 if (data->Timeout == 0) {
11583 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11584 }
11585
11586 if (((data->DataSize == 0) &&
11587 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11588 ((data->DataSize != 0) &&
11589 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11590 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11591 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11592 (data->DataOutSize != 0))))) {
11593 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11594 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11595 } else {
11596 data->DataOutSize = 0;
11597 }
11598 /*
11599 * Send passthru request messages
11600 */
11601 return (mptsas_do_passthru(mpt,
11602 (uint8_t *)((uintptr_t)data->PtrRequest),
11603 (uint8_t *)((uintptr_t)data->PtrReply),
11604 (uint8_t *)((uintptr_t)data->PtrData),
11605 data->RequestSize, data->ReplySize,
11606 data->DataSize, data->DataDirection,
11607 (uint8_t *)((uintptr_t)data->PtrDataOut),
11608 data->DataOutSize, data->Timeout, mode));
11609 } else {
11610 return (EINVAL);
11611 }
11612 }
11613
11614 static uint8_t
11615 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11616 {
11617 uint8_t index;
11618
11619 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11620 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11621 return (index);
11622 }
11623 }
11624
11625 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11626 }
11627
11628 static void
11629 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11630 {
11631 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11632 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11633 struct scsi_pkt *pkt = cmd->cmd_pkt;
11634 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11635 uint32_t i;
11636 uint64_t request_desc;
11637
11638 ASSERT(mutex_owned(&mpt->m_mutex));
11639
11640 /*
11641 * Form the diag message depending on the post or release function.
11642 */
11643 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11644 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11645 (mpt->m_req_frame + (mpt->m_req_frame_size *
11646 cmd->cmd_slot));
11647 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11648 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11649 diag->function);
11650 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11651 diag->pBuffer->buffer_type);
11652 ddi_put8(mpt->m_acc_req_frame_hdl,
11653 &pDiag_post_msg->ExtendedType,
11654 diag->pBuffer->extended_type);
11655 ddi_put32(mpt->m_acc_req_frame_hdl,
11656 &pDiag_post_msg->BufferLength,
11657 diag->pBuffer->buffer_data.size);
11658 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11659 i++) {
11660 ddi_put32(mpt->m_acc_req_frame_hdl,
11661 &pDiag_post_msg->ProductSpecific[i],
11662 diag->pBuffer->product_specific[i]);
11663 }
11664 ddi_put32(mpt->m_acc_req_frame_hdl,
11665 &pDiag_post_msg->BufferAddress.Low,
11666 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11667 & 0xffffffffull));
11668 ddi_put32(mpt->m_acc_req_frame_hdl,
11669 &pDiag_post_msg->BufferAddress.High,
11670 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11671 >> 32));
11672 } else {
11673 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11674 (mpt->m_req_frame + (mpt->m_req_frame_size *
11675 cmd->cmd_slot));
11676 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11677 ddi_put8(mpt->m_acc_req_frame_hdl,
11678 &pDiag_release_msg->Function, diag->function);
11679 ddi_put8(mpt->m_acc_req_frame_hdl,
11680 &pDiag_release_msg->BufferType,
11681 diag->pBuffer->buffer_type);
11682 }
11683
11684 /*
11685 * Send the message
11686 */
11687 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11688 DDI_DMA_SYNC_FORDEV);
11689 request_desc = (cmd->cmd_slot << 16) +
11690 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11691 cmd->cmd_rfm = NULL;
11692 MPTSAS_START_CMD(mpt, request_desc);
11693 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11694 DDI_SUCCESS) ||
11695 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11696 DDI_SUCCESS)) {
11697 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11698 }
11699 }
11700
11701 static int
11702 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11703 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11704 {
11705 mptsas_diag_request_t diag;
11706 int status, slot_num, post_flags = 0;
11707 mptsas_cmd_t *cmd = NULL;
11708 struct scsi_pkt *pkt;
11709 pMpi2DiagBufferPostReply_t reply;
11710 uint16_t iocstatus;
11711 uint32_t iocloginfo, transfer_length;
11712
11713 /*
11714 * If buffer is not enabled, just leave.
11715 */
11716 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11717 if (!pBuffer->enabled) {
11718 status = DDI_FAILURE;
11719 goto out;
11720 }
11721
11722 /*
11723 * Clear some flags initially.
11724 */
11725 pBuffer->force_release = FALSE;
11726 pBuffer->valid_data = FALSE;
11727 pBuffer->owned_by_firmware = FALSE;
11728
11729 /*
11730 * Get a cmd buffer from the cmd buffer pool
11731 */
11732 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11733 status = DDI_FAILURE;
11734 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11735 goto out;
11736 }
11737 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11738
11739 bzero((caddr_t)cmd, sizeof (*cmd));
11740 bzero((caddr_t)pkt, scsi_pkt_size());
11741
11742 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11743
11744 diag.pBuffer = pBuffer;
11745 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11746
11747 /*
11748 * Form a blank cmd/pkt to store the acknowledgement message
11749 */
11750 pkt->pkt_ha_private = (opaque_t)&diag;
11751 pkt->pkt_flags = FLAG_HEAD;
11752 pkt->pkt_time = 60;
11753 cmd->cmd_pkt = pkt;
11754 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11755
11756 /*
11757 * Save the command in a slot
11758 */
11759 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11760 /*
11761 * Once passthru command get slot, set cmd_flags
11762 * CFLAG_PREPARED.
11763 */
11764 cmd->cmd_flags |= CFLAG_PREPARED;
11765 mptsas_start_diag(mpt, cmd);
11766 } else {
11767 mptsas_waitq_add(mpt, cmd);
11768 }
11769
11770 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11771 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11772 }
11773
11774 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11775 status = DDI_FAILURE;
11776 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11777 goto out;
11778 }
11779
11780 /*
11781 * cmd_rfm points to the reply message if a reply was given. Check the
11782 * IOCStatus to make sure everything went OK with the FW diag request
11783 * and set buffer flags.
11784 */
11785 if (cmd->cmd_rfm) {
11786 post_flags |= MPTSAS_ADDRESS_REPLY;
11787 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11788 DDI_DMA_SYNC_FORCPU);
11789 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11790 (cmd->cmd_rfm -
11791 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11792
11793 /*
11794 * Get the reply message data
11795 */
11796 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11797 &reply->IOCStatus);
11798 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11799 &reply->IOCLogInfo);
11800 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11801 &reply->TransferLength);
11802
11803 /*
11804 * If post failed quit.
11805 */
11806 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11807 status = DDI_FAILURE;
11808 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11809 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11810 iocloginfo, transfer_length));
11811 goto out;
11812 }
11813
11814 /*
11815 * Post was successful.
11816 */
11817 pBuffer->valid_data = TRUE;
11818 pBuffer->owned_by_firmware = TRUE;
11819 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11820 status = DDI_SUCCESS;
11821 }
11822
11823 out:
11824 /*
11825 * Put the reply frame back on the free queue, increment the free
11826 * index, and write the new index to the free index register. But only
11827 * if this reply is an ADDRESS reply.
11828 */
11829 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11830 ddi_put32(mpt->m_acc_free_queue_hdl,
11831 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11832 cmd->cmd_rfm);
11833 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11834 DDI_DMA_SYNC_FORDEV);
11835 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11836 mpt->m_free_index = 0;
11837 }
11838 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11839 mpt->m_free_index);
11840 }
11841 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11842 mptsas_remove_cmd(mpt, cmd);
11843 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11844 }
11845 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11846 mptsas_return_to_pool(mpt, cmd);
11847 }
11848
11849 return (status);
11850 }
11851
11852 static int
11853 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11854 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11855 uint32_t diag_type)
11856 {
11857 mptsas_diag_request_t diag;
11858 int status, slot_num, rel_flags = 0;
11859 mptsas_cmd_t *cmd = NULL;
11860 struct scsi_pkt *pkt;
11861 pMpi2DiagReleaseReply_t reply;
11862 uint16_t iocstatus;
11863 uint32_t iocloginfo;
11864
11865 /*
11866 * If buffer is not enabled, just leave.
11867 */
11868 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11869 if (!pBuffer->enabled) {
11870 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11871 "by the IOC");
11872 status = DDI_FAILURE;
11873 goto out;
11874 }
11875
11876 /*
11877 * Clear some flags initially.
11878 */
11879 pBuffer->force_release = FALSE;
11880 pBuffer->valid_data = FALSE;
11881 pBuffer->owned_by_firmware = FALSE;
11882
11883 /*
11884 * Get a cmd buffer from the cmd buffer pool
11885 */
11886 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11887 status = DDI_FAILURE;
11888 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11889 "Diag");
11890 goto out;
11891 }
11892 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11893
11894 bzero((caddr_t)cmd, sizeof (*cmd));
11895 bzero((caddr_t)pkt, scsi_pkt_size());
11896
11897 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11898
11899 diag.pBuffer = pBuffer;
11900 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11901
11902 /*
11903 * Form a blank cmd/pkt to store the acknowledgement message
11904 */
11905 pkt->pkt_ha_private = (opaque_t)&diag;
11906 pkt->pkt_flags = FLAG_HEAD;
11907 pkt->pkt_time = 60;
11908 cmd->cmd_pkt = pkt;
11909 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11910
11911 /*
11912 * Save the command in a slot
11913 */
11914 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11915 /*
11916 * Once passthru command get slot, set cmd_flags
11917 * CFLAG_PREPARED.
11918 */
11919 cmd->cmd_flags |= CFLAG_PREPARED;
11920 mptsas_start_diag(mpt, cmd);
11921 } else {
11922 mptsas_waitq_add(mpt, cmd);
11923 }
11924
11925 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11926 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11927 }
11928
11929 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11930 status = DDI_FAILURE;
11931 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11932 goto out;
11933 }
11934
11935 /*
11936 * cmd_rfm points to the reply message if a reply was given. Check the
11937 * IOCStatus to make sure everything went OK with the FW diag request
11938 * and set buffer flags.
11939 */
11940 if (cmd->cmd_rfm) {
11941 rel_flags |= MPTSAS_ADDRESS_REPLY;
11942 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11943 DDI_DMA_SYNC_FORCPU);
11944 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11945 (cmd->cmd_rfm -
11946 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11947
11948 /*
11949 * Get the reply message data
11950 */
11951 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11952 &reply->IOCStatus);
11953 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11954 &reply->IOCLogInfo);
11955
11956 /*
11957 * If release failed quit.
11958 */
11959 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11960 pBuffer->owned_by_firmware) {
11961 status = DDI_FAILURE;
11962 NDBG13(("release FW Diag Buffer failed: "
11963 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11964 iocloginfo));
11965 goto out;
11966 }
11967
11968 /*
11969 * Release was successful.
11970 */
11971 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11972 status = DDI_SUCCESS;
11973
11974 /*
11975 * If this was for an UNREGISTER diag type command, clear the
11976 * unique ID.
11977 */
11978 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11979 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11980 }
11981 }
11982
11983 out:
11984 /*
11985 * Put the reply frame back on the free queue, increment the free
11986 * index, and write the new index to the free index register. But only
11987 * if this reply is an ADDRESS reply.
11988 */
11989 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11990 ddi_put32(mpt->m_acc_free_queue_hdl,
11991 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11992 cmd->cmd_rfm);
11993 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11994 DDI_DMA_SYNC_FORDEV);
11995 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11996 mpt->m_free_index = 0;
11997 }
11998 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11999 mpt->m_free_index);
12000 }
12001 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
12002 mptsas_remove_cmd(mpt, cmd);
12003 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
12004 }
12005 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
12006 mptsas_return_to_pool(mpt, cmd);
12007 }
12008
12009 return (status);
12010 }
12011
12012 static int
12013 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
12014 uint32_t *return_code)
12015 {
12016 mptsas_fw_diagnostic_buffer_t *pBuffer;
12017 uint8_t extended_type, buffer_type, i;
12018 uint32_t buffer_size;
12019 uint32_t unique_id;
12020 int status;
12021
12022 ASSERT(mutex_owned(&mpt->m_mutex));
12023
12024 extended_type = diag_register->ExtendedType;
12025 buffer_type = diag_register->BufferType;
12026 buffer_size = diag_register->RequestedBufferSize;
12027 unique_id = diag_register->UniqueId;
12028
12029 /*
12030 * Check for valid buffer type
12031 */
12032 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
12033 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12034 return (DDI_FAILURE);
12035 }
12036
12037 /*
12038 * Get the current buffer and look up the unique ID. The unique ID
12039 * should not be found. If it is, the ID is already in use.
12040 */
12041 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12042 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
12043 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12044 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12045 return (DDI_FAILURE);
12046 }
12047
12048 /*
12049 * The buffer's unique ID should not be registered yet, and the given
12050 * unique ID cannot be 0.
12051 */
12052 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
12053 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
12054 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12055 return (DDI_FAILURE);
12056 }
12057
12058 /*
12059 * If this buffer is already posted as immediate, just change owner.
12060 */
12061 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
12062 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
12063 pBuffer->immediate = FALSE;
12064 pBuffer->unique_id = unique_id;
12065 return (DDI_SUCCESS);
12066 }
12067
12068 /*
12069 * Post a new buffer after checking if it's enabled. The DMA buffer
12070 * that is allocated will be contiguous (sgl_len = 1).
12071 */
12072 if (!pBuffer->enabled) {
12073 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
12074 return (DDI_FAILURE);
12075 }
12076 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
12077 pBuffer->buffer_data.size = buffer_size;
12078 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
12079 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
12080 "diag buffer: size = %d bytes", buffer_size);
12081 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
12082 return (DDI_FAILURE);
12083 }
12084
12085 /*
12086 * Copy the given info to the diag buffer and post the buffer.
12087 */
12088 pBuffer->buffer_type = buffer_type;
12089 pBuffer->immediate = FALSE;
12090 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
12091 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
12092 i++) {
12093 pBuffer->product_specific[i] =
12094 diag_register->ProductSpecific[i];
12095 }
12096 }
12097 pBuffer->extended_type = extended_type;
12098 pBuffer->unique_id = unique_id;
12099 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
12100
12101 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12102 DDI_SUCCESS) {
12103 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
12104 "mptsas_diag_register.");
12105 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12106 status = DDI_FAILURE;
12107 }
12108
12109 /*
12110 * In case there was a failure, free the DMA buffer.
12111 */
12112 if (status == DDI_FAILURE) {
12113 mptsas_dma_free(&pBuffer->buffer_data);
12114 }
12115
12116 return (status);
12117 }
12118
12119 static int
12120 mptsas_diag_unregister(mptsas_t *mpt,
12121 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
12122 {
12123 mptsas_fw_diagnostic_buffer_t *pBuffer;
12124 uint8_t i;
12125 uint32_t unique_id;
12126 int status;
12127
12128 ASSERT(mutex_owned(&mpt->m_mutex));
12129
12130 unique_id = diag_unregister->UniqueId;
12131
12132 /*
12133 * Get the current buffer and look up the unique ID. The unique ID
12134 * should be there.
12135 */
12136 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12137 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12138 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12139 return (DDI_FAILURE);
12140 }
12141
12142 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12143
12144 /*
12145 * Try to release the buffer from FW before freeing it. If release
12146 * fails, don't free the DMA buffer in case FW tries to access it
12147 * later. If buffer is not owned by firmware, can't release it.
12148 */
12149 if (!pBuffer->owned_by_firmware) {
12150 status = DDI_SUCCESS;
12151 } else {
12152 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
12153 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
12154 }
12155
12156 /*
12157 * At this point, return the current status no matter what happens with
12158 * the DMA buffer.
12159 */
12160 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
12161 if (status == DDI_SUCCESS) {
12162 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12163 DDI_SUCCESS) {
12164 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
12165 "in mptsas_diag_unregister.");
12166 ddi_fm_service_impact(mpt->m_dip,
12167 DDI_SERVICE_UNAFFECTED);
12168 }
12169 mptsas_dma_free(&pBuffer->buffer_data);
12170 }
12171
12172 return (status);
12173 }
12174
12175 static int
12176 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
12177 uint32_t *return_code)
12178 {
12179 mptsas_fw_diagnostic_buffer_t *pBuffer;
12180 uint8_t i;
12181 uint32_t unique_id;
12182
12183 ASSERT(mutex_owned(&mpt->m_mutex));
12184
12185 unique_id = diag_query->UniqueId;
12186
12187 /*
12188 * If ID is valid, query on ID.
12189 * If ID is invalid, query on buffer type.
12190 */
12191 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
12192 i = diag_query->BufferType;
12193 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
12194 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12195 return (DDI_FAILURE);
12196 }
12197 } else {
12198 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12199 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12200 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12201 return (DDI_FAILURE);
12202 }
12203 }
12204
12205 /*
12206 * Fill query structure with the diag buffer info.
12207 */
12208 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12209 diag_query->BufferType = pBuffer->buffer_type;
12210 diag_query->ExtendedType = pBuffer->extended_type;
12211 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
12212 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
12213 i++) {
12214 diag_query->ProductSpecific[i] =
12215 pBuffer->product_specific[i];
12216 }
12217 }
12218 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
12219 diag_query->DriverAddedBufferSize = 0;
12220 diag_query->UniqueId = pBuffer->unique_id;
12221 diag_query->ApplicationFlags = 0;
12222 diag_query->DiagnosticFlags = 0;
12223
12224 /*
12225 * Set/Clear application flags
12226 */
12227 if (pBuffer->immediate) {
12228 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12229 } else {
12230 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12231 }
12232 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
12233 diag_query->ApplicationFlags |=
12234 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12235 } else {
12236 diag_query->ApplicationFlags &=
12237 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12238 }
12239 if (pBuffer->owned_by_firmware) {
12240 diag_query->ApplicationFlags |=
12241 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12242 } else {
12243 diag_query->ApplicationFlags &=
12244 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12245 }
12246
12247 return (DDI_SUCCESS);
12248 }
12249
12250 static int
12251 mptsas_diag_read_buffer(mptsas_t *mpt,
12252 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
12253 uint32_t *return_code, int ioctl_mode)
12254 {
12255 mptsas_fw_diagnostic_buffer_t *pBuffer;
12256 uint8_t i, *pData;
12257 uint32_t unique_id, byte;
12258 int status;
12259
12260 ASSERT(mutex_owned(&mpt->m_mutex));
12261
12262 unique_id = diag_read_buffer->UniqueId;
12263
12264 /*
12265 * Get the current buffer and look up the unique ID. The unique ID
12266 * should be there.
12267 */
12268 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12269 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12270 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12271 return (DDI_FAILURE);
12272 }
12273
12274 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12275
12276 /*
12277 * Make sure requested read is within limits
12278 */
12279 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
12280 pBuffer->buffer_data.size) {
12281 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12282 return (DDI_FAILURE);
12283 }
12284
12285 /*
12286 * Copy the requested data from DMA to the diag_read_buffer. The DMA
12287 * buffer that was allocated is one contiguous buffer.
12288 */
12289 pData = (uint8_t *)(pBuffer->buffer_data.memp +
12290 diag_read_buffer->StartingOffset);
12291 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
12292 DDI_DMA_SYNC_FORCPU);
12293 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
12294 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
12295 != 0) {
12296 return (DDI_FAILURE);
12297 }
12298 }
12299 diag_read_buffer->Status = 0;
12300
12301 /*
12302 * Set or clear the Force Release flag.
12303 */
12304 if (pBuffer->force_release) {
12305 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12306 } else {
12307 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12308 }
12309
12310 /*
12311 * If buffer is to be reregistered, make sure it's not already owned by
12312 * firmware first.
12313 */
12314 status = DDI_SUCCESS;
12315 if (!pBuffer->owned_by_firmware) {
12316 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
12317 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
12318 return_code);
12319 }
12320 }
12321
12322 return (status);
12323 }
12324
12325 static int
12326 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
12327 uint32_t *return_code)
12328 {
12329 mptsas_fw_diagnostic_buffer_t *pBuffer;
12330 uint8_t i;
12331 uint32_t unique_id;
12332 int status;
12333
12334 ASSERT(mutex_owned(&mpt->m_mutex));
12335
12336 unique_id = diag_release->UniqueId;
12337
12338 /*
12339 * Get the current buffer and look up the unique ID. The unique ID
12340 * should be there.
12341 */
12342 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12343 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12344 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12345 return (DDI_FAILURE);
12346 }
12347
12348 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12349
12350 /*
12351 * If buffer is not owned by firmware, it's already been released.
12352 */
12353 if (!pBuffer->owned_by_firmware) {
12354 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
12355 return (DDI_FAILURE);
12356 }
12357
12358 /*
12359 * Release the buffer.
12360 */
12361 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
12362 MPTSAS_FW_DIAG_TYPE_RELEASE);
12363 return (status);
12364 }
12365
12366 static int
12367 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
12368 uint32_t length, uint32_t *return_code, int ioctl_mode)
12369 {
12370 mptsas_fw_diag_register_t diag_register;
12371 mptsas_fw_diag_unregister_t diag_unregister;
12372 mptsas_fw_diag_query_t diag_query;
12373 mptsas_diag_read_buffer_t diag_read_buffer;
12374 mptsas_fw_diag_release_t diag_release;
12375 int status = DDI_SUCCESS;
12376 uint32_t original_return_code, read_buf_len;
12377
12378 ASSERT(mutex_owned(&mpt->m_mutex));
12379
12380 original_return_code = *return_code;
12381 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
12382
12383 switch (action) {
12384 case MPTSAS_FW_DIAG_TYPE_REGISTER:
12385 if (!length) {
12386 *return_code =
12387 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12388 status = DDI_FAILURE;
12389 break;
12390 }
12391 if (ddi_copyin(diag_action, &diag_register,
12392 sizeof (diag_register), ioctl_mode) != 0) {
12393 return (DDI_FAILURE);
12394 }
12395 status = mptsas_diag_register(mpt, &diag_register,
12396 return_code);
12397 break;
12398
12399 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
12400 if (length < sizeof (diag_unregister)) {
12401 *return_code =
12402 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12403 status = DDI_FAILURE;
12404 break;
12405 }
12406 if (ddi_copyin(diag_action, &diag_unregister,
12407 sizeof (diag_unregister), ioctl_mode) != 0) {
12408 return (DDI_FAILURE);
12409 }
12410 status = mptsas_diag_unregister(mpt, &diag_unregister,
12411 return_code);
12412 break;
12413
12414 case MPTSAS_FW_DIAG_TYPE_QUERY:
12415 if (length < sizeof (diag_query)) {
12416 *return_code =
12417 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12418 status = DDI_FAILURE;
12419 break;
12420 }
12421 if (ddi_copyin(diag_action, &diag_query,
12422 sizeof (diag_query), ioctl_mode) != 0) {
12423 return (DDI_FAILURE);
12424 }
12425 status = mptsas_diag_query(mpt, &diag_query,
12426 return_code);
12427 if (status == DDI_SUCCESS) {
12428 if (ddi_copyout(&diag_query, diag_action,
12429 sizeof (diag_query), ioctl_mode) != 0) {
12430 return (DDI_FAILURE);
12431 }
12432 }
12433 break;
12434
12435 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
12436 if (ddi_copyin(diag_action, &diag_read_buffer,
12437 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
12438 return (DDI_FAILURE);
12439 }
12440 read_buf_len = sizeof (diag_read_buffer) -
12441 sizeof (diag_read_buffer.DataBuffer) +
12442 diag_read_buffer.BytesToRead;
12443 if (length < read_buf_len) {
12444 *return_code =
12445 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12446 status = DDI_FAILURE;
12447 break;
12448 }
12449 status = mptsas_diag_read_buffer(mpt,
12450 &diag_read_buffer, diag_action +
12451 sizeof (diag_read_buffer) - 4, return_code,
12452 ioctl_mode);
12453 if (status == DDI_SUCCESS) {
12454 if (ddi_copyout(&diag_read_buffer, diag_action,
12455 sizeof (diag_read_buffer) - 4, ioctl_mode)
12456 != 0) {
12457 return (DDI_FAILURE);
12458 }
12459 }
12460 break;
12461
12462 case MPTSAS_FW_DIAG_TYPE_RELEASE:
12463 if (length < sizeof (diag_release)) {
12464 *return_code =
12465 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12466 status = DDI_FAILURE;
12467 break;
12468 }
12469 if (ddi_copyin(diag_action, &diag_release,
12470 sizeof (diag_release), ioctl_mode) != 0) {
12471 return (DDI_FAILURE);
12472 }
12473 status = mptsas_diag_release(mpt, &diag_release,
12474 return_code);
12475 break;
12476
12477 default:
12478 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12479 status = DDI_FAILURE;
12480 break;
12481 }
12482
12483 if ((status == DDI_FAILURE) &&
12484 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12485 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12486 status = DDI_SUCCESS;
12487 }
12488
12489 return (status);
12490 }
12491
12492 static int
12493 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12494 {
12495 int status;
12496 mptsas_diag_action_t driver_data;
12497
12498 ASSERT(mutex_owned(&mpt->m_mutex));
12499
12500 /*
12501 * Copy the user data to a driver data buffer.
12502 */
12503 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12504 mode) == 0) {
12505 /*
12506 * Send diag action request if Action is valid
12507 */
12508 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12509 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12510 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12511 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12512 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12513 status = mptsas_do_diag_action(mpt, driver_data.Action,
12514 (void *)(uintptr_t)driver_data.PtrDiagAction,
12515 driver_data.Length, &driver_data.ReturnCode,
12516 mode);
12517 if (status == DDI_SUCCESS) {
12518 if (ddi_copyout(&driver_data.ReturnCode,
12519 &user_data->ReturnCode,
12520 sizeof (user_data->ReturnCode), mode)
12521 != 0) {
12522 status = EFAULT;
12523 } else {
12524 status = 0;
12525 }
12526 } else {
12527 status = EIO;
12528 }
12529 } else {
12530 status = EINVAL;
12531 }
12532 } else {
12533 status = EFAULT;
12534 }
12535
12536 return (status);
12537 }
12538
12539 /*
12540 * This routine handles the "event query" ioctl.
12541 */
12542 static int
12543 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12544 int *rval)
12545 {
12546 int status;
12547 mptsas_event_query_t driverdata;
12548 uint8_t i;
12549
12550 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12551
12552 mutex_enter(&mpt->m_mutex);
12553 for (i = 0; i < 4; i++) {
12554 driverdata.Types[i] = mpt->m_event_mask[i];
12555 }
12556 mutex_exit(&mpt->m_mutex);
12557
12558 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12559 status = EFAULT;
12560 } else {
12561 *rval = MPTIOCTL_STATUS_GOOD;
12562 status = 0;
12563 }
12564
12565 return (status);
12566 }
12567
12568 /*
12569 * This routine handles the "event enable" ioctl.
12570 */
12571 static int
12572 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12573 int *rval)
12574 {
12575 int status;
12576 mptsas_event_enable_t driverdata;
12577 uint8_t i;
12578
12579 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12580 mutex_enter(&mpt->m_mutex);
12581 for (i = 0; i < 4; i++) {
12582 mpt->m_event_mask[i] = driverdata.Types[i];
12583 }
12584 mutex_exit(&mpt->m_mutex);
12585
12586 *rval = MPTIOCTL_STATUS_GOOD;
12587 status = 0;
12588 } else {
12589 status = EFAULT;
12590 }
12591 return (status);
12592 }
12593
12594 /*
12595 * This routine handles the "event report" ioctl.
12596 */
12597 static int
12598 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12599 int *rval)
12600 {
12601 int status;
12602 mptsas_event_report_t driverdata;
12603
12604 mutex_enter(&mpt->m_mutex);
12605
12606 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12607 mode) == 0) {
12608 if (driverdata.Size >= sizeof (mpt->m_events)) {
12609 if (ddi_copyout(mpt->m_events, data->Events,
12610 sizeof (mpt->m_events), mode) != 0) {
12611 status = EFAULT;
12612 } else {
12613 if (driverdata.Size > sizeof (mpt->m_events)) {
12614 driverdata.Size =
12615 sizeof (mpt->m_events);
12616 if (ddi_copyout(&driverdata.Size,
12617 &data->Size,
12618 sizeof (driverdata.Size),
12619 mode) != 0) {
12620 status = EFAULT;
12621 } else {
12622 *rval = MPTIOCTL_STATUS_GOOD;
12623 status = 0;
12624 }
12625 } else {
12626 *rval = MPTIOCTL_STATUS_GOOD;
12627 status = 0;
12628 }
12629 }
12630 } else {
12631 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12632 status = 0;
12633 }
12634 } else {
12635 status = EFAULT;
12636 }
12637
12638 mutex_exit(&mpt->m_mutex);
12639 return (status);
12640 }
12641
12642 static void
12643 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12644 {
12645 int *reg_data;
12646 uint_t reglen;
12647
12648 /*
12649 * Lookup the 'reg' property and extract the other data
12650 */
12651 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12652 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12653 DDI_PROP_SUCCESS) {
12654 /*
12655 * Extract the PCI data from the 'reg' property first DWORD.
12656 * The entry looks like the following:
12657 * First DWORD:
12658 * Bits 0 - 7 8-bit Register number
12659 * Bits 8 - 10 3-bit Function number
12660 * Bits 11 - 15 5-bit Device number
12661 * Bits 16 - 23 8-bit Bus number
12662 * Bits 24 - 25 2-bit Address Space type identifier
12663 *
12664 */
12665 adapter_data->PciInformation.u.bits.BusNumber =
12666 (reg_data[0] & 0x00FF0000) >> 16;
12667 adapter_data->PciInformation.u.bits.DeviceNumber =
12668 (reg_data[0] & 0x0000F800) >> 11;
12669 adapter_data->PciInformation.u.bits.FunctionNumber =
12670 (reg_data[0] & 0x00000700) >> 8;
12671 ddi_prop_free((void *)reg_data);
12672 } else {
12673 /*
12674 * If we can't determine the PCI data then we fill in FF's for
12675 * the data to indicate this.
12676 */
12677 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12678 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12679 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12680 }
12681
12682 /*
12683 * Saved in the mpt->m_fwversion
12684 */
12685 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12686 }
12687
12688 static void
12689 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12690 {
12691 char *driver_verstr = MPTSAS_MOD_STRING;
12692
12693 mptsas_lookup_pci_data(mpt, adapter_data);
12694 adapter_data->AdapterType = mpt->m_MPI25 ?
12695 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12696 MPTIOCTL_ADAPTER_TYPE_SAS2;
12697 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12698 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12699 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12700 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12701 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12702 adapter_data->BiosVersion = 0;
12703 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12704 }
12705
12706 static void
12707 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12708 {
12709 int *reg_data, i;
12710 uint_t reglen;
12711
12712 /*
12713 * Lookup the 'reg' property and extract the other data
12714 */
12715 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12716 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12717 DDI_PROP_SUCCESS) {
12718 /*
12719 * Extract the PCI data from the 'reg' property first DWORD.
12720 * The entry looks like the following:
12721 * First DWORD:
12722 * Bits 8 - 10 3-bit Function number
12723 * Bits 11 - 15 5-bit Device number
12724 * Bits 16 - 23 8-bit Bus number
12725 */
12726 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12727 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12728 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12729 ddi_prop_free((void *)reg_data);
12730 } else {
12731 /*
12732 * If we can't determine the PCI info then we fill in FF's for
12733 * the data to indicate this.
12734 */
12735 pci_info->BusNumber = 0xFFFFFFFF;
12736 pci_info->DeviceNumber = 0xFF;
12737 pci_info->FunctionNumber = 0xFF;
12738 }
12739
12740 /*
12741 * Now get the interrupt vector and the pci header. The vector can
12742 * only be 0 right now. The header is the first 256 bytes of config
12743 * space.
12744 */
12745 pci_info->InterruptVector = 0;
12746 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12747 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12748 i);
12749 }
12750 }
12751
12752 static int
12753 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12754 {
12755 int status = 0;
12756 mptsas_reg_access_t driverdata;
12757
12758 mutex_enter(&mpt->m_mutex);
12759 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12760 switch (driverdata.Command) {
12761 /*
12762 * IO access is not supported.
12763 */
12764 case REG_IO_READ:
12765 case REG_IO_WRITE:
12766 mptsas_log(mpt, CE_WARN, "IO access is not "
12767 "supported. Use memory access.");
12768 status = EINVAL;
12769 break;
12770
12771 case REG_MEM_READ:
12772 driverdata.RegData = ddi_get32(mpt->m_datap,
12773 (uint32_t *)(void *)mpt->m_reg +
12774 driverdata.RegOffset);
12775 if (ddi_copyout(&driverdata.RegData,
12776 &data->RegData,
12777 sizeof (driverdata.RegData), mode) != 0) {
12778 mptsas_log(mpt, CE_WARN, "Register "
12779 "Read Failed");
12780 status = EFAULT;
12781 }
12782 break;
12783
12784 case REG_MEM_WRITE:
12785 ddi_put32(mpt->m_datap,
12786 (uint32_t *)(void *)mpt->m_reg +
12787 driverdata.RegOffset,
12788 driverdata.RegData);
12789 break;
12790
12791 default:
12792 status = EINVAL;
12793 break;
12794 }
12795 } else {
12796 status = EFAULT;
12797 }
12798
12799 mutex_exit(&mpt->m_mutex);
12800 return (status);
12801 }
12802
12803 static int
12804 led_control(mptsas_t *mpt, intptr_t data, int mode)
12805 {
12806 int ret = 0;
12807 mptsas_led_control_t lc;
12808 mptsas_enclosure_t *mep;
12809 uint16_t slotidx;
12810
12811 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12812 return (EFAULT);
12813 }
12814
12815 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12816 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12817 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12818 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12819 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12820 lc.LedStatus != 1)) {
12821 return (EINVAL);
12822 }
12823
12824 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12825 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12826 return (EACCES);
12827
12828 /* Locate the required enclosure */
12829 mutex_enter(&mpt->m_mutex);
12830 mep = mptsas_enc_lookup(mpt, lc.Enclosure);
12831 if (mep == NULL) {
12832 mutex_exit(&mpt->m_mutex);
12833 return (ENOENT);
12834 }
12835
12836 if (lc.Slot < mep->me_fslot) {
12837 mutex_exit(&mpt->m_mutex);
12838 return (ENOENT);
12839 }
12840
12841 /*
12842 * Slots on the enclosure are maintained in array where me_fslot is
12843 * entry zero. We normalize the requested slot.
12844 */
12845 slotidx = lc.Slot - mep->me_fslot;
12846 if (slotidx >= mep->me_nslots) {
12847 mutex_exit(&mpt->m_mutex);
12848 return (ENOENT);
12849 }
12850
12851 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12852 /* Update our internal LED state. */
12853 mep->me_slotleds[slotidx] &= ~(1 << (lc.Led - 1));
12854 mep->me_slotleds[slotidx] |= lc.LedStatus << (lc.Led - 1);
12855
12856 /* Flush it to the controller. */
12857 ret = mptsas_flush_led_status(mpt, mep, slotidx);
12858 mutex_exit(&mpt->m_mutex);
12859 return (ret);
12860 }
12861
12862 /* Return our internal LED state. */
12863 lc.LedStatus = (mep->me_slotleds[slotidx] >> (lc.Led - 1)) & 1;
12864 mutex_exit(&mpt->m_mutex);
12865
12866 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12867 return (EFAULT);
12868 }
12869
12870 return (0);
12871 }
12872
12873 static int
12874 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12875 {
12876 uint16_t i = 0;
12877 uint16_t count = 0;
12878 int ret = 0;
12879 mptsas_target_t *ptgt;
12880 mptsas_disk_info_t *di;
12881 STRUCT_DECL(mptsas_get_disk_info, gdi);
12882
12883 if ((mode & FREAD) == 0)
12884 return (EACCES);
12885
12886 STRUCT_INIT(gdi, get_udatamodel());
12887
12888 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12889 mode) != 0) {
12890 return (EFAULT);
12891 }
12892
12893 /* Find out how many targets there are. */
12894 mutex_enter(&mpt->m_mutex);
12895 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12896 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12897 count++;
12898 }
12899 mutex_exit(&mpt->m_mutex);
12900
12901 /*
12902 * If we haven't been asked to copy out information on each target,
12903 * then just return the count.
12904 */
12905 STRUCT_FSET(gdi, DiskCount, count);
12906 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12907 goto copy_out;
12908
12909 /*
12910 * If we haven't been given a large enough buffer to copy out into,
12911 * let the caller know.
12912 */
12913 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12914 count * sizeof (mptsas_disk_info_t)) {
12915 ret = ENOSPC;
12916 goto copy_out;
12917 }
12918
12919 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12920
12921 mutex_enter(&mpt->m_mutex);
12922 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12923 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12924 if (i >= count) {
12925 /*
12926 * The number of targets changed while we weren't
12927 * looking, so give up.
12928 */
12929 refhash_rele(mpt->m_targets, ptgt);
12930 mutex_exit(&mpt->m_mutex);
12931 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12932 return (EAGAIN);
12933 }
12934 di[i].Instance = mpt->m_instance;
12935 di[i].Enclosure = ptgt->m_enclosure;
12936 di[i].Slot = ptgt->m_slot_num;
12937 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12938 i++;
12939 }
12940 mutex_exit(&mpt->m_mutex);
12941 STRUCT_FSET(gdi, DiskCount, i);
12942
12943 /* Copy out the disk information to the caller. */
12944 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12945 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12946 ret = EFAULT;
12947 }
12948
12949 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12950
12951 copy_out:
12952 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12953 mode) != 0) {
12954 ret = EFAULT;
12955 }
12956
12957 return (ret);
12958 }
12959
12960 static int
12961 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12962 int *rval)
12963 {
12964 int status = 0;
12965 mptsas_t *mpt;
12966 mptsas_update_flash_t flashdata;
12967 mptsas_pass_thru_t passthru_data;
12968 mptsas_adapter_data_t adapter_data;
12969 mptsas_pci_info_t pci_info;
12970 int copylen;
12971
12972 int iport_flag = 0;
12973 dev_info_t *dip = NULL;
12974 mptsas_phymask_t phymask = 0;
12975 struct devctl_iocdata *dcp = NULL;
12976 char *addr = NULL;
12977 mptsas_target_t *ptgt = NULL;
12978
12979 *rval = MPTIOCTL_STATUS_GOOD;
12980 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12981 return (EPERM);
12982 }
12983
12984 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12985 if (mpt == NULL) {
12986 /*
12987 * Called from iport node, get the states
12988 */
12989 iport_flag = 1;
12990 dip = mptsas_get_dip_from_dev(dev, &phymask);
12991 if (dip == NULL) {
12992 return (ENXIO);
12993 }
12994 mpt = DIP2MPT(dip);
12995 }
12996 /* Make sure power level is D0 before accessing registers */
12997 mutex_enter(&mpt->m_mutex);
12998 if (mpt->m_options & MPTSAS_OPT_PM) {
12999 (void) pm_busy_component(mpt->m_dip, 0);
13000 if (mpt->m_power_level != PM_LEVEL_D0) {
13001 mutex_exit(&mpt->m_mutex);
13002 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
13003 DDI_SUCCESS) {
13004 mptsas_log(mpt, CE_WARN,
13005 "raise power request failed");
13006 (void) pm_idle_component(mpt->m_dip, 0);
13007 return (ENXIO);
13008 }
13009 } else {
13010 mutex_exit(&mpt->m_mutex);
13011 }
13012 } else {
13013 mutex_exit(&mpt->m_mutex);
13014 }
13015
13016 if (iport_flag) {
13017 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
13018 if (status != 0) {
13019 goto out;
13020 }
13021 /*
13022 * The following code control the OK2RM LED, it doesn't affect
13023 * the ioctl return status.
13024 */
13025 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
13026 (cmd == DEVCTL_DEVICE_OFFLINE)) {
13027 if (ndi_dc_allochdl((void *)data, &dcp) !=
13028 NDI_SUCCESS) {
13029 goto out;
13030 }
13031 addr = ndi_dc_getaddr(dcp);
13032 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
13033 if (ptgt == NULL) {
13034 NDBG14(("mptsas_ioctl led control: tgt %s not "
13035 "found", addr));
13036 ndi_dc_freehdl(dcp);
13037 goto out;
13038 }
13039 ndi_dc_freehdl(dcp);
13040 }
13041 goto out;
13042 }
13043 switch (cmd) {
13044 case MPTIOCTL_GET_DISK_INFO:
13045 status = get_disk_info(mpt, data, mode);
13046 break;
13047 case MPTIOCTL_LED_CONTROL:
13048 status = led_control(mpt, data, mode);
13049 break;
13050 case MPTIOCTL_UPDATE_FLASH:
13051 if (ddi_copyin((void *)data, &flashdata,
13052 sizeof (struct mptsas_update_flash), mode)) {
13053 status = EFAULT;
13054 break;
13055 }
13056
13057 mutex_enter(&mpt->m_mutex);
13058 if (mptsas_update_flash(mpt,
13059 (caddr_t)(long)flashdata.PtrBuffer,
13060 flashdata.ImageSize, flashdata.ImageType, mode)) {
13061 status = EFAULT;
13062 }
13063
13064 /*
13065 * Reset the chip to start using the new
13066 * firmware. Reset if failed also.
13067 */
13068 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
13069 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
13070 status = EFAULT;
13071 }
13072 mutex_exit(&mpt->m_mutex);
13073 break;
13074 case MPTIOCTL_PASS_THRU:
13075 /*
13076 * The user has requested to pass through a command to
13077 * be executed by the MPT firmware. Call our routine
13078 * which does this. Only allow one passthru IOCTL at
13079 * one time. Other threads will block on
13080 * m_passthru_mutex, which is of adaptive variant.
13081 */
13082 if (ddi_copyin((void *)data, &passthru_data,
13083 sizeof (mptsas_pass_thru_t), mode)) {
13084 status = EFAULT;
13085 break;
13086 }
13087 mutex_enter(&mpt->m_passthru_mutex);
13088 mutex_enter(&mpt->m_mutex);
13089 status = mptsas_pass_thru(mpt, &passthru_data, mode);
13090 mutex_exit(&mpt->m_mutex);
13091 mutex_exit(&mpt->m_passthru_mutex);
13092
13093 break;
13094 case MPTIOCTL_GET_ADAPTER_DATA:
13095 /*
13096 * The user has requested to read adapter data. Call
13097 * our routine which does this.
13098 */
13099 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
13100 if (ddi_copyin((void *)data, (void *)&adapter_data,
13101 sizeof (mptsas_adapter_data_t), mode)) {
13102 status = EFAULT;
13103 break;
13104 }
13105 if (adapter_data.StructureLength >=
13106 sizeof (mptsas_adapter_data_t)) {
13107 adapter_data.StructureLength = (uint32_t)
13108 sizeof (mptsas_adapter_data_t);
13109 copylen = sizeof (mptsas_adapter_data_t);
13110 mutex_enter(&mpt->m_mutex);
13111 mptsas_read_adapter_data(mpt, &adapter_data);
13112 mutex_exit(&mpt->m_mutex);
13113 } else {
13114 adapter_data.StructureLength = (uint32_t)
13115 sizeof (mptsas_adapter_data_t);
13116 copylen = sizeof (adapter_data.StructureLength);
13117 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
13118 }
13119 if (ddi_copyout((void *)(&adapter_data), (void *)data,
13120 copylen, mode) != 0) {
13121 status = EFAULT;
13122 }
13123 break;
13124 case MPTIOCTL_GET_PCI_INFO:
13125 /*
13126 * The user has requested to read pci info. Call
13127 * our routine which does this.
13128 */
13129 bzero(&pci_info, sizeof (mptsas_pci_info_t));
13130 mutex_enter(&mpt->m_mutex);
13131 mptsas_read_pci_info(mpt, &pci_info);
13132 mutex_exit(&mpt->m_mutex);
13133 if (ddi_copyout((void *)(&pci_info), (void *)data,
13134 sizeof (mptsas_pci_info_t), mode) != 0) {
13135 status = EFAULT;
13136 }
13137 break;
13138 case MPTIOCTL_RESET_ADAPTER:
13139 mutex_enter(&mpt->m_mutex);
13140 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
13141 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
13142 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
13143 "failed");
13144 status = EFAULT;
13145 }
13146 mutex_exit(&mpt->m_mutex);
13147 break;
13148 case MPTIOCTL_DIAG_ACTION:
13149 /*
13150 * The user has done a diag buffer action. Call our
13151 * routine which does this. Only allow one diag action
13152 * at one time.
13153 */
13154 mutex_enter(&mpt->m_mutex);
13155 if (mpt->m_diag_action_in_progress) {
13156 mutex_exit(&mpt->m_mutex);
13157 return (EBUSY);
13158 }
13159 mpt->m_diag_action_in_progress = 1;
13160 status = mptsas_diag_action(mpt,
13161 (mptsas_diag_action_t *)data, mode);
13162 mpt->m_diag_action_in_progress = 0;
13163 mutex_exit(&mpt->m_mutex);
13164 break;
13165 case MPTIOCTL_EVENT_QUERY:
13166 /*
13167 * The user has done an event query. Call our routine
13168 * which does this.
13169 */
13170 status = mptsas_event_query(mpt,
13171 (mptsas_event_query_t *)data, mode, rval);
13172 break;
13173 case MPTIOCTL_EVENT_ENABLE:
13174 /*
13175 * The user has done an event enable. Call our routine
13176 * which does this.
13177 */
13178 status = mptsas_event_enable(mpt,
13179 (mptsas_event_enable_t *)data, mode, rval);
13180 break;
13181 case MPTIOCTL_EVENT_REPORT:
13182 /*
13183 * The user has done an event report. Call our routine
13184 * which does this.
13185 */
13186 status = mptsas_event_report(mpt,
13187 (mptsas_event_report_t *)data, mode, rval);
13188 break;
13189 case MPTIOCTL_REG_ACCESS:
13190 /*
13191 * The user has requested register access. Call our
13192 * routine which does this.
13193 */
13194 status = mptsas_reg_access(mpt,
13195 (mptsas_reg_access_t *)data, mode);
13196 break;
13197 default:
13198 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
13199 rval);
13200 break;
13201 }
13202
13203 out:
13204 return (status);
13205 }
13206
13207 int
13208 mptsas_restart_ioc(mptsas_t *mpt)
13209 {
13210 int rval = DDI_SUCCESS;
13211 mptsas_target_t *ptgt = NULL;
13212
13213 ASSERT(mutex_owned(&mpt->m_mutex));
13214
13215 /*
13216 * Set a flag telling I/O path that we're processing a reset. This is
13217 * needed because after the reset is complete, the hash table still
13218 * needs to be rebuilt. If I/Os are started before the hash table is
13219 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
13220 * so that they can be retried.
13221 */
13222 mpt->m_in_reset = TRUE;
13223
13224 /*
13225 * Set all throttles to HOLD
13226 */
13227 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13228 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13229 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13230 }
13231
13232 /*
13233 * Disable interrupts
13234 */
13235 MPTSAS_DISABLE_INTR(mpt);
13236
13237 /*
13238 * Abort all commands: outstanding commands, commands in waitq and
13239 * tx_waitq.
13240 */
13241 mptsas_flush_hba(mpt);
13242
13243 /*
13244 * Reinitialize the chip.
13245 */
13246 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
13247 rval = DDI_FAILURE;
13248 }
13249
13250 /*
13251 * Enable interrupts again
13252 */
13253 MPTSAS_ENABLE_INTR(mpt);
13254
13255 /*
13256 * If mptsas_init_chip was successful, update the driver data.
13257 */
13258 if (rval == DDI_SUCCESS) {
13259 mptsas_update_driver_data(mpt);
13260 }
13261
13262 /*
13263 * Reset the throttles
13264 */
13265 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13266 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13267 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
13268 }
13269
13270 mptsas_doneq_empty(mpt);
13271 mptsas_restart_hba(mpt);
13272
13273 if (rval != DDI_SUCCESS) {
13274 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
13275 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
13276 }
13277
13278 /*
13279 * Clear the reset flag so that I/Os can continue.
13280 */
13281 mpt->m_in_reset = FALSE;
13282
13283 return (rval);
13284 }
13285
13286 static int
13287 mptsas_init_chip(mptsas_t *mpt, int first_time)
13288 {
13289 ddi_dma_cookie_t cookie;
13290 uint32_t i;
13291 int rval;
13292
13293 /*
13294 * Check to see if the firmware image is valid
13295 */
13296 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
13297 MPI2_DIAG_FLASH_BAD_SIG) {
13298 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
13299 goto fail;
13300 }
13301
13302 /*
13303 * Reset the chip
13304 */
13305 rval = mptsas_ioc_reset(mpt, first_time);
13306 if (rval == MPTSAS_RESET_FAIL) {
13307 mptsas_log(mpt, CE_WARN, "hard reset failed!");
13308 goto fail;
13309 }
13310
13311 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
13312 goto mur;
13313 }
13314 /*
13315 * Setup configuration space
13316 */
13317 if (mptsas_config_space_init(mpt) == FALSE) {
13318 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13319 "failed!");
13320 goto fail;
13321 }
13322
13323 /*
13324 * IOC facts can change after a diag reset so all buffers that are
13325 * based on these numbers must be de-allocated and re-allocated. Get
13326 * new IOC facts each time chip is initialized.
13327 */
13328 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13329 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13330 goto fail;
13331 }
13332
13333 if (first_time) {
13334 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13335 goto fail;
13336 }
13337 /*
13338 * Allocate request message frames, reply free queue, reply
13339 * descriptor post queue, and reply message frames using
13340 * latest IOC facts.
13341 */
13342 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13343 mptsas_log(mpt, CE_WARN,
13344 "mptsas_alloc_request_frames failed");
13345 goto fail;
13346 }
13347 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13348 mptsas_log(mpt, CE_WARN,
13349 "mptsas_alloc_sense_bufs failed");
13350 goto fail;
13351 }
13352 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13353 mptsas_log(mpt, CE_WARN,
13354 "mptsas_alloc_free_queue failed!");
13355 goto fail;
13356 }
13357 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13358 mptsas_log(mpt, CE_WARN,
13359 "mptsas_alloc_post_queue failed!");
13360 goto fail;
13361 }
13362 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13363 mptsas_log(mpt, CE_WARN,
13364 "mptsas_alloc_reply_frames failed!");
13365 goto fail;
13366 }
13367 }
13368 mur:
13369 /*
13370 * Re-Initialize ioc to operational state
13371 */
13372 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13373 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13374 goto fail;
13375 }
13376
13377 mptsas_alloc_reply_args(mpt);
13378
13379 /*
13380 * Initialize reply post index. Reply free index is initialized after
13381 * the next loop.
13382 */
13383 mpt->m_post_index = 0;
13384
13385 /*
13386 * Initialize the Reply Free Queue with the physical addresses of our
13387 * reply frames.
13388 */
13389 cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
13390 for (i = 0; i < mpt->m_max_replies; i++) {
13391 ddi_put32(mpt->m_acc_free_queue_hdl,
13392 &((uint32_t *)(void *)mpt->m_free_queue)[i],
13393 cookie.dmac_address);
13394 cookie.dmac_address += mpt->m_reply_frame_size;
13395 }
13396 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
13397 DDI_DMA_SYNC_FORDEV);
13398
13399 /*
13400 * Initialize the reply free index to one past the last frame on the
13401 * queue. This will signify that the queue is empty to start with.
13402 */
13403 mpt->m_free_index = i;
13404 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
13405
13406 /*
13407 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
13408 */
13409 for (i = 0; i < mpt->m_post_queue_depth; i++) {
13410 ddi_put64(mpt->m_acc_post_queue_hdl,
13411 &((uint64_t *)(void *)mpt->m_post_queue)[i],
13412 0xFFFFFFFFFFFFFFFF);
13413 }
13414 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
13415 DDI_DMA_SYNC_FORDEV);
13416
13417 /*
13418 * Enable ports
13419 */
13420 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
13421 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
13422 goto fail;
13423 }
13424
13425 /*
13426 * enable events
13427 */
13428 if (mptsas_ioc_enable_event_notification(mpt)) {
13429 mptsas_log(mpt, CE_WARN,
13430 "mptsas_ioc_enable_event_notification failed");
13431 goto fail;
13432 }
13433
13434 /*
13435 * We need checks in attach and these.
13436 * chip_init is called in mult. places
13437 */
13438
13439 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
13440 DDI_SUCCESS) ||
13441 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
13442 DDI_SUCCESS) ||
13443 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
13444 DDI_SUCCESS) ||
13445 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
13446 DDI_SUCCESS) ||
13447 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
13448 DDI_SUCCESS) ||
13449 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
13450 DDI_SUCCESS)) {
13451 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13452 goto fail;
13453 }
13454
13455 /* Check all acc handles */
13456 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
13457 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
13458 DDI_SUCCESS) ||
13459 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
13460 DDI_SUCCESS) ||
13461 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
13462 DDI_SUCCESS) ||
13463 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
13464 DDI_SUCCESS) ||
13465 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
13466 DDI_SUCCESS) ||
13467 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
13468 DDI_SUCCESS) ||
13469 (mptsas_check_acc_handle(mpt->m_config_handle) !=
13470 DDI_SUCCESS)) {
13471 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13472 goto fail;
13473 }
13474
13475 return (DDI_SUCCESS);
13476
13477 fail:
13478 return (DDI_FAILURE);
13479 }
13480
13481 static int
13482 mptsas_get_pci_cap(mptsas_t *mpt)
13483 {
13484 ushort_t caps_ptr, cap, cap_count;
13485
13486 if (mpt->m_config_handle == NULL)
13487 return (FALSE);
13488 /*
13489 * Check if capabilities list is supported and if so,
13490 * get initial capabilities pointer and clear bits 0,1.
13491 */
13492 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13493 & PCI_STAT_CAP) {
13494 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13495 PCI_CONF_CAP_PTR), 4);
13496 } else {
13497 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13498 }
13499
13500 /*
13501 * Walk capabilities if supported.
13502 */
13503 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13504
13505 /*
13506 * Check that we haven't exceeded the maximum number of
13507 * capabilities and that the pointer is in a valid range.
13508 */
13509 if (++cap_count > 48) {
13510 mptsas_log(mpt, CE_WARN,
13511 "too many device capabilities");
13512 break;
13513 }
13514 if (caps_ptr < 64) {
13515 mptsas_log(mpt, CE_WARN,
13516 "capabilities pointer 0x%x out of range",
13517 caps_ptr);
13518 break;
13519 }
13520
13521 /*
13522 * Get next capability and check that it is valid.
13523 * For now, we only support power management.
13524 */
13525 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13526 switch (cap) {
13527 case PCI_CAP_ID_PM:
13528 mptsas_log(mpt, CE_NOTE,
13529 "power management supported");
13530 mpt->m_options |= MPTSAS_OPT_PM;
13531
13532 /* Save PMCSR offset */
13533 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13534 break;
13535 /*
13536 * The following capabilities are valid. Any others
13537 * will cause a message to be logged.
13538 */
13539 case PCI_CAP_ID_VPD:
13540 case PCI_CAP_ID_MSI:
13541 case PCI_CAP_ID_PCIX:
13542 case PCI_CAP_ID_PCI_E:
13543 case PCI_CAP_ID_MSI_X:
13544 break;
13545 default:
13546 mptsas_log(mpt, CE_NOTE,
13547 "unrecognized capability 0x%x", cap);
13548 break;
13549 }
13550
13551 /*
13552 * Get next capabilities pointer and clear bits 0,1.
13553 */
13554 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13555 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13556 }
13557 return (TRUE);
13558 }
13559
13560 static int
13561 mptsas_init_pm(mptsas_t *mpt)
13562 {
13563 char pmc_name[16];
13564 char *pmc[] = {
13565 NULL,
13566 "0=Off (PCI D3 State)",
13567 "3=On (PCI D0 State)",
13568 NULL
13569 };
13570 uint16_t pmcsr_stat;
13571
13572 if (mptsas_get_pci_cap(mpt) == FALSE) {
13573 return (DDI_FAILURE);
13574 }
13575 /*
13576 * If PCI's capability does not support PM, then don't need
13577 * to registe the pm-components
13578 */
13579 if (!(mpt->m_options & MPTSAS_OPT_PM))
13580 return (DDI_SUCCESS);
13581 /*
13582 * If power management is supported by this chip, create
13583 * pm-components property for the power management framework
13584 */
13585 (void) sprintf(pmc_name, "NAME=mpt_sas%d", mpt->m_instance);
13586 pmc[0] = pmc_name;
13587 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13588 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13589 mpt->m_options &= ~MPTSAS_OPT_PM;
13590 mptsas_log(mpt, CE_WARN,
13591 "pm-component property creation failed");
13592 return (DDI_FAILURE);
13593 }
13594
13595 /*
13596 * Power on device.
13597 */
13598 (void) pm_busy_component(mpt->m_dip, 0);
13599 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13600 mpt->m_pmcsr_offset);
13601 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13602 mptsas_log(mpt, CE_WARN, "power up the device");
13603 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13604 PCI_PMCSR_D0);
13605 }
13606 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13607 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13608 return (DDI_FAILURE);
13609 }
13610 mpt->m_power_level = PM_LEVEL_D0;
13611 /*
13612 * Set pm idle delay.
13613 */
13614 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13615 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13616
13617 return (DDI_SUCCESS);
13618 }
13619
13620 static int
13621 mptsas_register_intrs(mptsas_t *mpt)
13622 {
13623 dev_info_t *dip;
13624 int intr_types;
13625
13626 dip = mpt->m_dip;
13627
13628 /* Get supported interrupt types */
13629 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13630 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13631 "failed");
13632 return (FALSE);
13633 }
13634
13635 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13636
13637 /*
13638 * Try MSI, but fall back to FIXED
13639 */
13640 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13641 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13642 NDBG0(("Using MSI interrupt type"));
13643 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13644 return (TRUE);
13645 }
13646 }
13647 if (intr_types & DDI_INTR_TYPE_FIXED) {
13648 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13649 NDBG0(("Using FIXED interrupt type"));
13650 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13651 return (TRUE);
13652 } else {
13653 NDBG0(("FIXED interrupt registration failed"));
13654 return (FALSE);
13655 }
13656 }
13657
13658 return (FALSE);
13659 }
13660
13661 static void
13662 mptsas_unregister_intrs(mptsas_t *mpt)
13663 {
13664 mptsas_rem_intrs(mpt);
13665 }
13666
13667 /*
13668 * mptsas_add_intrs:
13669 *
13670 * Register FIXED or MSI interrupts.
13671 */
13672 static int
13673 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13674 {
13675 dev_info_t *dip = mpt->m_dip;
13676 int avail, actual, count = 0;
13677 int i, flag, ret;
13678
13679 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13680
13681 /* Get number of interrupts */
13682 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13683 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13684 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13685 "ret %d count %d", ret, count);
13686
13687 return (DDI_FAILURE);
13688 }
13689
13690 /* Get number of available interrupts */
13691 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13692 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13693 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13694 "ret %d avail %d", ret, avail);
13695
13696 return (DDI_FAILURE);
13697 }
13698
13699 if (avail < count) {
13700 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13701 "navail() returned %d", count, avail);
13702 }
13703
13704 /* Mpt only have one interrupt routine */
13705 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13706 count = 1;
13707 }
13708
13709 /* Allocate an array of interrupt handles */
13710 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13711 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13712
13713 flag = DDI_INTR_ALLOC_NORMAL;
13714
13715 /* call ddi_intr_alloc() */
13716 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13717 count, &actual, flag);
13718
13719 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13720 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d",
13721 ret);
13722 kmem_free(mpt->m_htable, mpt->m_intr_size);
13723 return (DDI_FAILURE);
13724 }
13725
13726 /* use interrupt count returned or abort? */
13727 if (actual < count) {
13728 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d",
13729 count, actual);
13730 }
13731
13732 mpt->m_intr_cnt = actual;
13733
13734 /*
13735 * Get priority for first msi, assume remaining are all the same
13736 */
13737 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13738 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13739 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d", ret);
13740
13741 /* Free already allocated intr */
13742 for (i = 0; i < actual; i++) {
13743 (void) ddi_intr_free(mpt->m_htable[i]);
13744 }
13745
13746 kmem_free(mpt->m_htable, mpt->m_intr_size);
13747 return (DDI_FAILURE);
13748 }
13749
13750 /* Test for high level mutex */
13751 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13752 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13753 "Hi level interrupt not supported");
13754
13755 /* Free already allocated intr */
13756 for (i = 0; i < actual; i++) {
13757 (void) ddi_intr_free(mpt->m_htable[i]);
13758 }
13759
13760 kmem_free(mpt->m_htable, mpt->m_intr_size);
13761 return (DDI_FAILURE);
13762 }
13763
13764 /* Call ddi_intr_add_handler() */
13765 for (i = 0; i < actual; i++) {
13766 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13767 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13768 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13769 "failed %d", ret);
13770
13771 /* Free already allocated intr */
13772 for (i = 0; i < actual; i++) {
13773 (void) ddi_intr_free(mpt->m_htable[i]);
13774 }
13775
13776 kmem_free(mpt->m_htable, mpt->m_intr_size);
13777 return (DDI_FAILURE);
13778 }
13779 }
13780
13781 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13782 != DDI_SUCCESS) {
13783 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d", ret);
13784
13785 /* Free already allocated intr */
13786 for (i = 0; i < actual; i++) {
13787 (void) ddi_intr_free(mpt->m_htable[i]);
13788 }
13789
13790 kmem_free(mpt->m_htable, mpt->m_intr_size);
13791 return (DDI_FAILURE);
13792 }
13793
13794 /*
13795 * Enable interrupts
13796 */
13797 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13798 /* Call ddi_intr_block_enable() for MSI interrupts */
13799 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13800 } else {
13801 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13802 for (i = 0; i < mpt->m_intr_cnt; i++) {
13803 (void) ddi_intr_enable(mpt->m_htable[i]);
13804 }
13805 }
13806 return (DDI_SUCCESS);
13807 }
13808
13809 /*
13810 * mptsas_rem_intrs:
13811 *
13812 * Unregister FIXED or MSI interrupts
13813 */
13814 static void
13815 mptsas_rem_intrs(mptsas_t *mpt)
13816 {
13817 int i;
13818
13819 NDBG6(("mptsas_rem_intrs"));
13820
13821 /* Disable all interrupts */
13822 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13823 /* Call ddi_intr_block_disable() */
13824 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13825 } else {
13826 for (i = 0; i < mpt->m_intr_cnt; i++) {
13827 (void) ddi_intr_disable(mpt->m_htable[i]);
13828 }
13829 }
13830
13831 /* Call ddi_intr_remove_handler() */
13832 for (i = 0; i < mpt->m_intr_cnt; i++) {
13833 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13834 (void) ddi_intr_free(mpt->m_htable[i]);
13835 }
13836
13837 kmem_free(mpt->m_htable, mpt->m_intr_size);
13838 }
13839
13840 /*
13841 * The IO fault service error handling callback function
13842 */
13843 /*ARGSUSED*/
13844 static int
13845 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13846 {
13847 /*
13848 * as the driver can always deal with an error in any dma or
13849 * access handle, we can just return the fme_status value.
13850 */
13851 pci_ereport_post(dip, err, NULL);
13852 return (err->fme_status);
13853 }
13854
13855 /*
13856 * mptsas_fm_init - initialize fma capabilities and register with IO
13857 * fault services.
13858 */
13859 static void
13860 mptsas_fm_init(mptsas_t *mpt)
13861 {
13862 /*
13863 * Need to change iblock to priority for new MSI intr
13864 */
13865 ddi_iblock_cookie_t fm_ibc;
13866
13867 /* Only register with IO Fault Services if we have some capability */
13868 if (mpt->m_fm_capabilities) {
13869 /* Adjust access and dma attributes for FMA */
13870 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13871 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13872 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13873
13874 /*
13875 * Register capabilities with IO Fault Services.
13876 * mpt->m_fm_capabilities will be updated to indicate
13877 * capabilities actually supported (not requested.)
13878 */
13879 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13880
13881 /*
13882 * Initialize pci ereport capabilities if ereport
13883 * capable (should always be.)
13884 */
13885 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13886 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13887 pci_ereport_setup(mpt->m_dip);
13888 }
13889
13890 /*
13891 * Register error callback if error callback capable.
13892 */
13893 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13894 ddi_fm_handler_register(mpt->m_dip,
13895 mptsas_fm_error_cb, (void *) mpt);
13896 }
13897 }
13898 }
13899
13900 /*
13901 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13902 * fault services.
13903 *
13904 */
13905 static void
13906 mptsas_fm_fini(mptsas_t *mpt)
13907 {
13908 /* Only unregister FMA capabilities if registered */
13909 if (mpt->m_fm_capabilities) {
13910
13911 /*
13912 * Un-register error callback if error callback capable.
13913 */
13914
13915 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13916 ddi_fm_handler_unregister(mpt->m_dip);
13917 }
13918
13919 /*
13920 * Release any resources allocated by pci_ereport_setup()
13921 */
13922
13923 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13924 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13925 pci_ereport_teardown(mpt->m_dip);
13926 }
13927
13928 /* Unregister from IO Fault Services */
13929 ddi_fm_fini(mpt->m_dip);
13930
13931 /* Adjust access and dma attributes for FMA */
13932 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13933 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13934 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13935
13936 }
13937 }
13938
13939 int
13940 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13941 {
13942 ddi_fm_error_t de;
13943
13944 if (handle == NULL)
13945 return (DDI_FAILURE);
13946 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13947 return (de.fme_status);
13948 }
13949
13950 int
13951 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13952 {
13953 ddi_fm_error_t de;
13954
13955 if (handle == NULL)
13956 return (DDI_FAILURE);
13957 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13958 return (de.fme_status);
13959 }
13960
13961 void
13962 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13963 {
13964 uint64_t ena;
13965 char buf[FM_MAX_CLASS];
13966
13967 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13968 ena = fm_ena_generate(0, FM_ENA_FMT1);
13969 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13970 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13971 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13972 }
13973 }
13974
13975 static int
13976 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13977 uint16_t *dev_handle, mptsas_target_t **pptgt)
13978 {
13979 int rval;
13980 uint32_t dev_info;
13981 uint64_t sas_wwn;
13982 mptsas_phymask_t phymask;
13983 uint8_t physport, phynum, config, disk;
13984 uint64_t devicename;
13985 uint16_t pdev_hdl;
13986 mptsas_target_t *tmp_tgt = NULL;
13987 uint16_t bay_num, enclosure, io_flags;
13988
13989 ASSERT(*pptgt == NULL);
13990
13991 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13992 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13993 &bay_num, &enclosure, &io_flags);
13994 if (rval != DDI_SUCCESS) {
13995 rval = DEV_INFO_FAIL_PAGE0;
13996 return (rval);
13997 }
13998
13999 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
14000 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14001 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
14002 rval = DEV_INFO_WRONG_DEVICE_TYPE;
14003 return (rval);
14004 }
14005
14006 /*
14007 * Check if the dev handle is for a Phys Disk. If so, set return value
14008 * and exit. Don't add Phys Disks to hash.
14009 */
14010 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14011 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
14012 if (*dev_handle == mpt->m_raidconfig[config].
14013 m_physdisk_devhdl[disk]) {
14014 rval = DEV_INFO_PHYS_DISK;
14015 return (rval);
14016 }
14017 }
14018 }
14019
14020 /*
14021 * Get SATA Device Name from SAS device page0 for
14022 * sata device, if device name doesn't exist, set mta_wwn to
14023 * 0 for direct attached SATA. For the device behind the expander
14024 * we still can use STP address assigned by expander.
14025 */
14026 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14027 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14028 /* alloc a temporary target to send the cmd to */
14029 tmp_tgt = mptsas_tgt_alloc(mpt->m_tmp_targets, *dev_handle,
14030 0, dev_info, 0, 0);
14031 mutex_exit(&mpt->m_mutex);
14032
14033 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
14034
14035 if (devicename == -1) {
14036 mutex_enter(&mpt->m_mutex);
14037 refhash_remove(mpt->m_tmp_targets, tmp_tgt);
14038 rval = DEV_INFO_FAIL_GUID;
14039 return (rval);
14040 }
14041
14042 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
14043 sas_wwn = devicename;
14044 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
14045 sas_wwn = 0;
14046 }
14047
14048 mutex_enter(&mpt->m_mutex);
14049 refhash_remove(mpt->m_tmp_targets, tmp_tgt);
14050 }
14051
14052 phymask = mptsas_physport_to_phymask(mpt, physport);
14053 *pptgt = mptsas_tgt_alloc(mpt->m_targets, *dev_handle, sas_wwn,
14054 dev_info, phymask, phynum);
14055 if (*pptgt == NULL) {
14056 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
14057 "structure!");
14058 rval = DEV_INFO_FAIL_ALLOC;
14059 return (rval);
14060 }
14061 (*pptgt)->m_io_flags = io_flags;
14062 (*pptgt)->m_enclosure = enclosure;
14063 (*pptgt)->m_slot_num = bay_num;
14064 return (DEV_INFO_SUCCESS);
14065 }
14066
14067 uint64_t
14068 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
14069 {
14070 uint64_t sata_guid = 0, *pwwn = NULL;
14071 int target = ptgt->m_devhdl;
14072 uchar_t *inq83 = NULL;
14073 int inq83_len = 0xFF;
14074 uchar_t *dblk = NULL;
14075 int inq83_retry = 3;
14076 int rval = DDI_FAILURE;
14077
14078 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
14079
14080 inq83_retry:
14081 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14082 inq83_len, NULL, 1);
14083 if (rval != DDI_SUCCESS) {
14084 mptsas_log(mpt, CE_WARN, "mptsas request inquiry page "
14085 "0x83 for target:%x, lun:%x failed!", target, lun);
14086 sata_guid = -1;
14087 goto out;
14088 }
14089 /* According to SAT2, the first descriptor is logic unit name */
14090 dblk = &inq83[4];
14091 if ((dblk[1] & 0x30) != 0) {
14092 mptsas_log(mpt, CE_WARN, "Descriptor is not lun associated.");
14093 goto out;
14094 }
14095 pwwn = (uint64_t *)(void *)(&dblk[4]);
14096 if ((dblk[4] & 0xf0) == 0x50) {
14097 sata_guid = BE_64(*pwwn);
14098 goto out;
14099 } else if (dblk[4] == 'A') {
14100 NDBG20(("SATA drive has no NAA format GUID."));
14101 goto out;
14102 } else {
14103 /* The data is not ready, wait and retry */
14104 inq83_retry--;
14105 if (inq83_retry <= 0) {
14106 goto out;
14107 }
14108 NDBG20(("The GUID is not ready, retry..."));
14109 delay(1 * drv_usectohz(1000000));
14110 goto inq83_retry;
14111 }
14112 out:
14113 kmem_free(inq83, inq83_len);
14114 return (sata_guid);
14115 }
14116
14117 static int
14118 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
14119 unsigned char *buf, int len, int *reallen, uchar_t evpd)
14120 {
14121 uchar_t cdb[CDB_GROUP0];
14122 struct scsi_address ap;
14123 struct buf *data_bp = NULL;
14124 int resid = 0;
14125 int ret = DDI_FAILURE;
14126
14127 ASSERT(len <= 0xffff);
14128
14129 ap.a_target = MPTSAS_INVALID_DEVHDL;
14130 ap.a_lun = (uchar_t)(lun);
14131 ap.a_hba_tran = mpt->m_tran;
14132
14133 data_bp = scsi_alloc_consistent_buf(&ap,
14134 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
14135 if (data_bp == NULL) {
14136 return (ret);
14137 }
14138 bzero(cdb, CDB_GROUP0);
14139 cdb[0] = SCMD_INQUIRY;
14140 cdb[1] = evpd;
14141 cdb[2] = page;
14142 cdb[3] = (len & 0xff00) >> 8;
14143 cdb[4] = (len & 0x00ff);
14144 cdb[5] = 0;
14145
14146 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
14147 &resid);
14148 if (ret == DDI_SUCCESS) {
14149 if (reallen) {
14150 *reallen = len - resid;
14151 }
14152 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
14153 }
14154 if (data_bp) {
14155 scsi_free_consistent_buf(data_bp);
14156 }
14157 return (ret);
14158 }
14159
14160 static int
14161 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
14162 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
14163 int *resid)
14164 {
14165 struct scsi_pkt *pktp = NULL;
14166 scsi_hba_tran_t *tran_clone = NULL;
14167 mptsas_tgt_private_t *tgt_private = NULL;
14168 int ret = DDI_FAILURE;
14169
14170 /*
14171 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
14172 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
14173 * to simulate the cmds from sd
14174 */
14175 tran_clone = kmem_alloc(
14176 sizeof (scsi_hba_tran_t), KM_SLEEP);
14177 if (tran_clone == NULL) {
14178 goto out;
14179 }
14180 bcopy((caddr_t)mpt->m_tran,
14181 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
14182 tgt_private = kmem_alloc(
14183 sizeof (mptsas_tgt_private_t), KM_SLEEP);
14184 if (tgt_private == NULL) {
14185 goto out;
14186 }
14187 tgt_private->t_lun = ap->a_lun;
14188 tgt_private->t_private = ptgt;
14189 tran_clone->tran_tgt_private = tgt_private;
14190 ap->a_hba_tran = tran_clone;
14191
14192 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
14193 data_bp, cdblen, sizeof (struct scsi_arq_status),
14194 0, PKT_CONSISTENT, NULL, NULL);
14195 if (pktp == NULL) {
14196 goto out;
14197 }
14198 bcopy(cdb, pktp->pkt_cdbp, cdblen);
14199 pktp->pkt_flags = FLAG_NOPARITY;
14200 pktp->pkt_time = mptsas_scsi_pkt_time;
14201 if (scsi_poll(pktp) < 0) {
14202 goto out;
14203 }
14204 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
14205 goto out;
14206 }
14207 if (resid != NULL) {
14208 *resid = pktp->pkt_resid;
14209 }
14210
14211 ret = DDI_SUCCESS;
14212 out:
14213 if (pktp) {
14214 scsi_destroy_pkt(pktp);
14215 }
14216 if (tran_clone) {
14217 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
14218 }
14219 if (tgt_private) {
14220 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14221 }
14222 return (ret);
14223 }
14224 static int
14225 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
14226 {
14227 char *cp = NULL;
14228 char *ptr = NULL;
14229 size_t s = 0;
14230 char *wwid_str = NULL;
14231 char *lun_str = NULL;
14232 long lunnum;
14233 long phyid = -1;
14234 int rc = DDI_FAILURE;
14235
14236 ptr = name;
14237 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
14238 ptr++;
14239 if ((cp = strchr(ptr, ',')) == NULL) {
14240 return (DDI_FAILURE);
14241 }
14242
14243 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14244 s = (uintptr_t)cp - (uintptr_t)ptr;
14245
14246 bcopy(ptr, wwid_str, s);
14247 wwid_str[s] = '\0';
14248
14249 ptr = ++cp;
14250
14251 if ((cp = strchr(ptr, '\0')) == NULL) {
14252 goto out;
14253 }
14254 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14255 s = (uintptr_t)cp - (uintptr_t)ptr;
14256
14257 bcopy(ptr, lun_str, s);
14258 lun_str[s] = '\0';
14259
14260 if (name[0] == 'p') {
14261 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
14262 } else {
14263 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
14264 }
14265 if (rc != DDI_SUCCESS)
14266 goto out;
14267
14268 if (phyid != -1) {
14269 ASSERT(phyid < MPTSAS_MAX_PHYS);
14270 *phy = (uint8_t)phyid;
14271 }
14272 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
14273 if (rc != 0)
14274 goto out;
14275
14276 *lun = (int)lunnum;
14277 rc = DDI_SUCCESS;
14278 out:
14279 if (wwid_str)
14280 kmem_free(wwid_str, SCSI_MAXNAMELEN);
14281 if (lun_str)
14282 kmem_free(lun_str, SCSI_MAXNAMELEN);
14283
14284 return (rc);
14285 }
14286
14287 /*
14288 * mptsas_parse_smp_name() is to parse sas wwn string
14289 * which format is "wWWN"
14290 */
14291 static int
14292 mptsas_parse_smp_name(char *name, uint64_t *wwn)
14293 {
14294 char *ptr = name;
14295
14296 if (*ptr != 'w') {
14297 return (DDI_FAILURE);
14298 }
14299
14300 ptr++;
14301 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
14302 return (DDI_FAILURE);
14303 }
14304 return (DDI_SUCCESS);
14305 }
14306
14307 static int
14308 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
14309 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
14310 {
14311 int ret = NDI_FAILURE;
14312 int circ = 0;
14313 int circ1 = 0;
14314 mptsas_t *mpt;
14315 char *ptr = NULL;
14316 char *devnm = NULL;
14317 uint64_t wwid = 0;
14318 uint8_t phy = 0xFF;
14319 int lun = 0;
14320 uint_t mflags = flag;
14321 int bconfig = TRUE;
14322
14323 if (scsi_hba_iport_unit_address(pdip) == 0) {
14324 return (DDI_FAILURE);
14325 }
14326
14327 mpt = DIP2MPT(pdip);
14328 if (!mpt) {
14329 return (DDI_FAILURE);
14330 }
14331 /*
14332 * Hold the nexus across the bus_config
14333 */
14334 ndi_devi_enter(scsi_vhci_dip, &circ);
14335 ndi_devi_enter(pdip, &circ1);
14336 switch (op) {
14337 case BUS_CONFIG_ONE:
14338 /* parse wwid/target name out of name given */
14339 if ((ptr = strchr((char *)arg, '@')) == NULL) {
14340 ret = NDI_FAILURE;
14341 break;
14342 }
14343 ptr++;
14344 if (strncmp((char *)arg, "smp", 3) == 0) {
14345 /*
14346 * This is a SMP target device
14347 */
14348 ret = mptsas_parse_smp_name(ptr, &wwid);
14349 if (ret != DDI_SUCCESS) {
14350 ret = NDI_FAILURE;
14351 break;
14352 }
14353 ret = mptsas_config_smp(pdip, wwid, childp);
14354 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
14355 /*
14356 * OBP could pass down a non-canonical form
14357 * bootpath without LUN part when LUN is 0.
14358 * So driver need adjust the string.
14359 */
14360 if (strchr(ptr, ',') == NULL) {
14361 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14362 (void) sprintf(devnm, "%s,0", (char *)arg);
14363 ptr = strchr(devnm, '@');
14364 ptr++;
14365 }
14366
14367 /*
14368 * The device path is wWWID format and the device
14369 * is not SMP target device.
14370 */
14371 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
14372 if (ret != DDI_SUCCESS) {
14373 ret = NDI_FAILURE;
14374 break;
14375 }
14376 *childp = NULL;
14377 if (ptr[0] == 'w') {
14378 ret = mptsas_config_one_addr(pdip, wwid,
14379 lun, childp);
14380 } else if (ptr[0] == 'p') {
14381 ret = mptsas_config_one_phy(pdip, phy, lun,
14382 childp);
14383 }
14384
14385 /*
14386 * If this is CD/DVD device in OBP path, the
14387 * ndi_busop_bus_config can be skipped as config one
14388 * operation is done above.
14389 */
14390 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
14391 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14392 (strncmp((char *)arg, "disk", 4) == 0)) {
14393 bconfig = FALSE;
14394 ndi_hold_devi(*childp);
14395 }
14396 } else {
14397 ret = NDI_FAILURE;
14398 break;
14399 }
14400
14401 /*
14402 * DDI group instructed us to use this flag.
14403 */
14404 mflags |= NDI_MDI_FALLBACK;
14405 break;
14406 case BUS_CONFIG_DRIVER:
14407 case BUS_CONFIG_ALL:
14408 mptsas_config_all(pdip);
14409 ret = NDI_SUCCESS;
14410 break;
14411 default:
14412 ret = NDI_FAILURE;
14413 break;
14414 }
14415
14416 if ((ret == NDI_SUCCESS) && bconfig) {
14417 ret = ndi_busop_bus_config(pdip, mflags, op,
14418 (devnm == NULL) ? arg : devnm, childp, 0);
14419 }
14420
14421 ndi_devi_exit(pdip, circ1);
14422 ndi_devi_exit(scsi_vhci_dip, circ);
14423 if (devnm != NULL)
14424 kmem_free(devnm, SCSI_MAXNAMELEN);
14425 return (ret);
14426 }
14427
14428 static int
14429 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14430 mptsas_target_t *ptgt)
14431 {
14432 int rval = DDI_FAILURE;
14433 struct scsi_inquiry *sd_inq = NULL;
14434 mptsas_t *mpt = DIP2MPT(pdip);
14435
14436 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14437
14438 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
14439 SUN_INQSIZE, 0, (uchar_t)0);
14440
14441 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14442 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14443 } else {
14444 rval = DDI_FAILURE;
14445 }
14446
14447 kmem_free(sd_inq, SUN_INQSIZE);
14448 return (rval);
14449 }
14450
14451 static int
14452 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14453 dev_info_t **lundip)
14454 {
14455 int rval;
14456 mptsas_t *mpt = DIP2MPT(pdip);
14457 int phymask;
14458 mptsas_target_t *ptgt = NULL;
14459
14460 /*
14461 * The phymask exists if the port is active, otherwise
14462 * nothing to do.
14463 */
14464 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
14465 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
14466 return (DDI_FAILURE);
14467
14468 /*
14469 * Get the physical port associated to the iport
14470 */
14471 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14472 "phymask", 0);
14473
14474 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14475 if (ptgt == NULL) {
14476 /*
14477 * didn't match any device by searching
14478 */
14479 return (DDI_FAILURE);
14480 }
14481 /*
14482 * If the LUN already exists and the status is online,
14483 * we just return the pointer to dev_info_t directly.
14484 * For the mdi_pathinfo node, we'll handle it in
14485 * mptsas_create_virt_lun()
14486 * TODO should be also in mptsas_handle_dr
14487 */
14488
14489 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14490 if (*lundip != NULL) {
14491 /*
14492 * TODO Another senario is, we hotplug the same disk
14493 * on the same slot, the devhdl changed, is this
14494 * possible?
14495 * tgt_private->t_private != ptgt
14496 */
14497 if (sasaddr != ptgt->m_addr.mta_wwn) {
14498 /*
14499 * The device has changed although the devhdl is the
14500 * same (Enclosure mapping mode, change drive on the
14501 * same slot)
14502 */
14503 return (DDI_FAILURE);
14504 }
14505 return (DDI_SUCCESS);
14506 }
14507
14508 /*
14509 * If this is a RAID, configure the volumes
14510 */
14511 if (mpt->m_num_raid_configs > 0) {
14512 /*
14513 * Configure IR volume
14514 */
14515 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14516 return (rval);
14517 }
14518 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14519
14520 return (rval);
14521 }
14522
14523 static int
14524 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14525 dev_info_t **lundip)
14526 {
14527 int rval;
14528 mptsas_t *mpt = DIP2MPT(pdip);
14529 mptsas_phymask_t phymask;
14530 mptsas_target_t *ptgt = NULL;
14531
14532 /*
14533 * The phymask exists if the port is active, otherwise
14534 * nothing to do.
14535 */
14536 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
14537 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
14538 return (DDI_FAILURE);
14539 /*
14540 * Get the physical port associated to the iport
14541 */
14542 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14543 "phymask", 0);
14544
14545 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14546 if (ptgt == NULL) {
14547 /*
14548 * didn't match any device by searching
14549 */
14550 return (DDI_FAILURE);
14551 }
14552
14553 /*
14554 * If the LUN already exists and the status is online,
14555 * we just return the pointer to dev_info_t directly.
14556 * For the mdi_pathinfo node, we'll handle it in
14557 * mptsas_create_virt_lun().
14558 */
14559
14560 *lundip = mptsas_find_child_phy(pdip, phy);
14561 if (*lundip != NULL) {
14562 return (DDI_SUCCESS);
14563 }
14564
14565 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14566
14567 return (rval);
14568 }
14569
14570 static int
14571 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14572 uint8_t *lun_addr_type)
14573 {
14574 uint32_t lun_idx = 0;
14575
14576 ASSERT(lun_num != NULL);
14577 ASSERT(lun_addr_type != NULL);
14578
14579 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14580 /* determine report luns addressing type */
14581 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14582 /*
14583 * Vendors in the field have been found to be concatenating
14584 * bus/target/lun to equal the complete lun value instead
14585 * of switching to flat space addressing
14586 */
14587 /* 00b - peripheral device addressing method */
14588 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14589 /* FALLTHRU */
14590 /* 10b - logical unit addressing method */
14591 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14592 /* FALLTHRU */
14593 /* 01b - flat space addressing method */
14594 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14595 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14596 *lun_addr_type = (buf[lun_idx] &
14597 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14598 *lun_num = (buf[lun_idx] & 0x3F) << 8;
14599 *lun_num |= buf[lun_idx + 1];
14600 return (DDI_SUCCESS);
14601 default:
14602 return (DDI_FAILURE);
14603 }
14604 }
14605
14606 static int
14607 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14608 {
14609 struct buf *repluns_bp = NULL;
14610 struct scsi_address ap;
14611 uchar_t cdb[CDB_GROUP5];
14612 int ret = DDI_FAILURE;
14613 int retry = 0;
14614 int lun_list_len = 0;
14615 uint16_t lun_num = 0;
14616 uint8_t lun_addr_type = 0;
14617 uint32_t lun_cnt = 0;
14618 uint32_t lun_total = 0;
14619 dev_info_t *cdip = NULL;
14620 uint16_t *saved_repluns = NULL;
14621 char *buffer = NULL;
14622 int buf_len = 128;
14623 mptsas_t *mpt = DIP2MPT(pdip);
14624 uint64_t sas_wwn = 0;
14625 uint8_t phy = 0xFF;
14626 uint32_t dev_info = 0;
14627
14628 mutex_enter(&mpt->m_mutex);
14629 sas_wwn = ptgt->m_addr.mta_wwn;
14630 phy = ptgt->m_phynum;
14631 dev_info = ptgt->m_deviceinfo;
14632 mutex_exit(&mpt->m_mutex);
14633
14634 if (sas_wwn == 0) {
14635 /*
14636 * It's a SATA without Device Name
14637 * So don't try multi-LUNs
14638 */
14639 if (mptsas_find_child_phy(pdip, phy)) {
14640 return (DDI_SUCCESS);
14641 } else {
14642 /*
14643 * need configure and create node
14644 */
14645 return (DDI_FAILURE);
14646 }
14647 }
14648
14649 /*
14650 * WWN (SAS address or Device Name exist)
14651 */
14652 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14653 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14654 /*
14655 * SATA device with Device Name
14656 * So don't try multi-LUNs
14657 */
14658 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14659 return (DDI_SUCCESS);
14660 } else {
14661 return (DDI_FAILURE);
14662 }
14663 }
14664
14665 do {
14666 ap.a_target = MPTSAS_INVALID_DEVHDL;
14667 ap.a_lun = 0;
14668 ap.a_hba_tran = mpt->m_tran;
14669 repluns_bp = scsi_alloc_consistent_buf(&ap,
14670 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14671 if (repluns_bp == NULL) {
14672 retry++;
14673 continue;
14674 }
14675 bzero(cdb, CDB_GROUP5);
14676 cdb[0] = SCMD_REPORT_LUNS;
14677 cdb[6] = (buf_len & 0xff000000) >> 24;
14678 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14679 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14680 cdb[9] = (buf_len & 0x000000ff);
14681
14682 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14683 repluns_bp, NULL);
14684 if (ret != DDI_SUCCESS) {
14685 scsi_free_consistent_buf(repluns_bp);
14686 retry++;
14687 continue;
14688 }
14689 lun_list_len = BE_32(*(int *)((void *)(
14690 repluns_bp->b_un.b_addr)));
14691 if (buf_len >= lun_list_len + 8) {
14692 ret = DDI_SUCCESS;
14693 break;
14694 }
14695 scsi_free_consistent_buf(repluns_bp);
14696 buf_len = lun_list_len + 8;
14697
14698 } while (retry < 3);
14699
14700 if (ret != DDI_SUCCESS)
14701 return (ret);
14702 buffer = (char *)repluns_bp->b_un.b_addr;
14703 /*
14704 * find out the number of luns returned by the SCSI ReportLun call
14705 * and allocate buffer space
14706 */
14707 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14708 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14709 if (saved_repluns == NULL) {
14710 scsi_free_consistent_buf(repluns_bp);
14711 return (DDI_FAILURE);
14712 }
14713 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14714 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14715 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14716 continue;
14717 }
14718 saved_repluns[lun_cnt] = lun_num;
14719 if ((cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) !=
14720 NULL) {
14721 ret = DDI_SUCCESS;
14722 } else {
14723 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14724 ptgt);
14725 }
14726 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14727 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14728 MPTSAS_DEV_GONE);
14729 }
14730 }
14731 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14732 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14733 scsi_free_consistent_buf(repluns_bp);
14734 return (DDI_SUCCESS);
14735 }
14736
14737 static int
14738 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14739 {
14740 int rval = DDI_FAILURE;
14741 struct scsi_inquiry *sd_inq = NULL;
14742 mptsas_t *mpt = DIP2MPT(pdip);
14743 mptsas_target_t *ptgt = NULL;
14744
14745 mutex_enter(&mpt->m_mutex);
14746 ptgt = refhash_linear_search(mpt->m_targets,
14747 mptsas_target_eval_devhdl, &target);
14748 mutex_exit(&mpt->m_mutex);
14749 if (ptgt == NULL) {
14750 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14751 "not found.", target);
14752 return (rval);
14753 }
14754
14755 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14756 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14757 SUN_INQSIZE, 0, (uchar_t)0);
14758
14759 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14760 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14761 0);
14762 } else {
14763 rval = DDI_FAILURE;
14764 }
14765
14766 kmem_free(sd_inq, SUN_INQSIZE);
14767 return (rval);
14768 }
14769
14770 /*
14771 * configure all RAID volumes for virtual iport
14772 */
14773 static void
14774 mptsas_config_all_viport(dev_info_t *pdip)
14775 {
14776 mptsas_t *mpt = DIP2MPT(pdip);
14777 int config, vol;
14778 int target;
14779 dev_info_t *lundip = NULL;
14780
14781 /*
14782 * Get latest RAID info and search for any Volume DevHandles. If any
14783 * are found, configure the volume.
14784 */
14785 mutex_enter(&mpt->m_mutex);
14786 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14787 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14788 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14789 == 1) {
14790 target = mpt->m_raidconfig[config].
14791 m_raidvol[vol].m_raidhandle;
14792 mutex_exit(&mpt->m_mutex);
14793 (void) mptsas_config_raid(pdip, target,
14794 &lundip);
14795 mutex_enter(&mpt->m_mutex);
14796 }
14797 }
14798 }
14799 mutex_exit(&mpt->m_mutex);
14800 }
14801
14802 static void
14803 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14804 int lun_cnt, mptsas_target_t *ptgt)
14805 {
14806 dev_info_t *child = NULL, *savechild = NULL;
14807 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14808 uint64_t sas_wwn, wwid;
14809 uint8_t phy;
14810 int lun;
14811 int i;
14812 int find;
14813 char *addr;
14814 char *nodename;
14815 mptsas_t *mpt = DIP2MPT(pdip);
14816
14817 mutex_enter(&mpt->m_mutex);
14818 wwid = ptgt->m_addr.mta_wwn;
14819 mutex_exit(&mpt->m_mutex);
14820
14821 child = ddi_get_child(pdip);
14822 while (child) {
14823 find = 0;
14824 savechild = child;
14825 child = ddi_get_next_sibling(child);
14826
14827 nodename = ddi_node_name(savechild);
14828 if (strcmp(nodename, "smp") == 0) {
14829 continue;
14830 }
14831
14832 addr = ddi_get_name_addr(savechild);
14833 if (addr == NULL) {
14834 continue;
14835 }
14836
14837 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14838 DDI_SUCCESS) {
14839 continue;
14840 }
14841
14842 if (wwid == sas_wwn) {
14843 for (i = 0; i < lun_cnt; i++) {
14844 if (repluns[i] == lun) {
14845 find = 1;
14846 break;
14847 }
14848 }
14849 } else {
14850 continue;
14851 }
14852 if (find == 0) {
14853 /*
14854 * The lun has not been there already
14855 */
14856 (void) mptsas_offline_lun(savechild, NULL);
14857 }
14858 }
14859
14860 pip = mdi_get_next_client_path(pdip, NULL);
14861 while (pip) {
14862 find = 0;
14863 savepip = pip;
14864 addr = MDI_PI(pip)->pi_addr;
14865
14866 pip = mdi_get_next_client_path(pdip, pip);
14867
14868 if (addr == NULL) {
14869 continue;
14870 }
14871
14872 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14873 &lun) != DDI_SUCCESS) {
14874 continue;
14875 }
14876
14877 if (sas_wwn == wwid) {
14878 for (i = 0; i < lun_cnt; i++) {
14879 if (repluns[i] == lun) {
14880 find = 1;
14881 break;
14882 }
14883 }
14884 } else {
14885 continue;
14886 }
14887
14888 if (find == 0) {
14889 /*
14890 * The lun has not been there already
14891 */
14892 (void) mptsas_offline_lun(NULL, savepip);
14893 }
14894 }
14895 }
14896
14897 /*
14898 * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14899 * update it.
14900 */
14901 static void
14902 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14903 {
14904 mptsas_enclosure_t *m;
14905
14906 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14907 m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14908 if (m != NULL) {
14909 uint8_t *ledp;
14910 m->me_flags = mep->me_flags;
14911
14912
14913 /*
14914 * If the number of slots and the first slot entry in the
14915 * enclosure has not changed, then we don't need to do anything
14916 * here. Otherwise, we need to allocate a new array for the LED
14917 * status of the slot.
14918 */
14919 if (m->me_fslot == mep->me_fslot &&
14920 m->me_nslots == mep->me_nslots)
14921 return;
14922
14923 /*
14924 * If the number of slots or the first slot has changed, it's
14925 * not clear that we're really in a place that we can continue
14926 * to honor the existing flags.
14927 */
14928 if (mep->me_nslots > 0) {
14929 ledp = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14930 KM_SLEEP);
14931 } else {
14932 ledp = NULL;
14933 }
14934
14935 if (m->me_slotleds != NULL) {
14936 kmem_free(m->me_slotleds, sizeof (uint8_t) *
14937 m->me_nslots);
14938 }
14939 m->me_slotleds = ledp;
14940 m->me_fslot = mep->me_fslot;
14941 m->me_nslots = mep->me_nslots;
14942 return;
14943 }
14944
14945 m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14946 m->me_enchdl = mep->me_enchdl;
14947 m->me_flags = mep->me_flags;
14948 m->me_nslots = mep->me_nslots;
14949 m->me_fslot = mep->me_fslot;
14950 if (m->me_nslots > 0) {
14951 m->me_slotleds = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14952 KM_SLEEP);
14953 /*
14954 * It may make sense to optionally flush all of the slots and/or
14955 * read the slot status flag here to synchronize between
14956 * ourselves and the card. So far, that hasn't been needed
14957 * annecdotally when enumerating something new. If we do, we
14958 * should kick that off in a taskq potentially.
14959 */
14960 }
14961 list_insert_tail(&mpt->m_enclosures, m);
14962 }
14963
14964 static void
14965 mptsas_update_hashtab(struct mptsas *mpt)
14966 {
14967 uint32_t page_address;
14968 int rval = 0;
14969 uint16_t dev_handle;
14970 mptsas_target_t *ptgt = NULL;
14971 mptsas_smp_t smp_node;
14972
14973 /*
14974 * Get latest RAID info.
14975 */
14976 (void) mptsas_get_raid_info(mpt);
14977
14978 dev_handle = mpt->m_smp_devhdl;
14979 while (mpt->m_done_traverse_smp == 0) {
14980 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14981 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14982 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14983 != DDI_SUCCESS) {
14984 break;
14985 }
14986 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14987 (void) mptsas_smp_alloc(mpt, &smp_node);
14988 }
14989
14990 /*
14991 * Loop over enclosures so we can understand what's there.
14992 */
14993 dev_handle = MPTSAS_INVALID_DEVHDL;
14994 while (mpt->m_done_traverse_enc == 0) {
14995 mptsas_enclosure_t me;
14996
14997 page_address = (MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE &
14998 MPI2_SAS_ENCLOS_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14999
15000 if (mptsas_get_enclosure_page0(mpt, page_address, &me) !=
15001 DDI_SUCCESS) {
15002 break;
15003 }
15004 dev_handle = me.me_enchdl;
15005 mptsas_enclosure_update(mpt, &me);
15006 }
15007
15008 /*
15009 * Config target devices
15010 */
15011 dev_handle = mpt->m_dev_handle;
15012
15013 /*
15014 * Loop to get sas device page 0 by GetNextHandle till the
15015 * the last handle. If the sas device is a SATA/SSP target,
15016 * we try to config it.
15017 */
15018 while (mpt->m_done_traverse_dev == 0) {
15019 ptgt = NULL;
15020 page_address =
15021 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15022 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15023 (uint32_t)dev_handle;
15024 rval = mptsas_get_target_device_info(mpt, page_address,
15025 &dev_handle, &ptgt);
15026 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15027 (rval == DEV_INFO_FAIL_ALLOC) ||
15028 (rval == DEV_INFO_FAIL_GUID)) {
15029 break;
15030 }
15031
15032 mpt->m_dev_handle = dev_handle;
15033 }
15034
15035 }
15036
15037 void
15038 mptsas_update_driver_data(struct mptsas *mpt)
15039 {
15040 mptsas_target_t *tp;
15041 mptsas_smp_t *sp;
15042
15043 ASSERT(MUTEX_HELD(&mpt->m_mutex));
15044
15045 /*
15046 * TODO after hard reset, update the driver data structures
15047 * 1. update port/phymask mapping table mpt->m_phy_info
15048 * 2. invalid all the entries in hash table
15049 * m_devhdl = 0xffff and m_deviceinfo = 0
15050 * 3. call sas_device_page/expander_page to update hash table
15051 */
15052 mptsas_update_phymask(mpt);
15053
15054 /*
15055 * Remove all the devhdls for existing entries but leave their
15056 * addresses alone. In update_hashtab() below, we'll find all
15057 * targets that are still present and reassociate them with
15058 * their potentially new devhdls. Leaving the targets around in
15059 * this fashion allows them to be used on the tx waitq even
15060 * while IOC reset is occurring.
15061 */
15062 for (tp = refhash_first(mpt->m_targets); tp != NULL;
15063 tp = refhash_next(mpt->m_targets, tp)) {
15064 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
15065 tp->m_deviceinfo = 0;
15066 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
15067 }
15068 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
15069 sp = refhash_next(mpt->m_smp_targets, sp)) {
15070 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
15071 sp->m_deviceinfo = 0;
15072 }
15073 mpt->m_done_traverse_dev = 0;
15074 mpt->m_done_traverse_smp = 0;
15075 mpt->m_done_traverse_enc = 0;
15076 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
15077 mptsas_update_hashtab(mpt);
15078 }
15079
15080 static void
15081 mptsas_config_all(dev_info_t *pdip)
15082 {
15083 dev_info_t *smpdip = NULL;
15084 mptsas_t *mpt = DIP2MPT(pdip);
15085 int phymask = 0;
15086 mptsas_phymask_t phy_mask;
15087 mptsas_target_t *ptgt = NULL;
15088 mptsas_smp_t *psmp;
15089
15090 /*
15091 * The phymask exists if the port is active, otherwise
15092 * nothing to do.
15093 */
15094 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
15095 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
15096 return;
15097
15098 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15099 "phymask", 0);
15100
15101 /*
15102 * If this is a RAID, enumerate the volumes
15103 */
15104 if (mpt->m_num_raid_configs > 0) {
15105 mptsas_config_all_viport(pdip);
15106 return;
15107 }
15108
15109 mutex_enter(&mpt->m_mutex);
15110
15111 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
15112 !mpt->m_done_traverse_enc) {
15113 mptsas_update_hashtab(mpt);
15114 }
15115
15116 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
15117 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
15118 phy_mask = psmp->m_addr.mta_phymask;
15119 if (phy_mask == phymask) {
15120 smpdip = NULL;
15121 mutex_exit(&mpt->m_mutex);
15122 (void) mptsas_online_smp(pdip, psmp, &smpdip);
15123 mutex_enter(&mpt->m_mutex);
15124 }
15125 }
15126
15127 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
15128 ptgt = refhash_next(mpt->m_targets, ptgt)) {
15129 phy_mask = ptgt->m_addr.mta_phymask;
15130 if (phy_mask == phymask) {
15131 mutex_exit(&mpt->m_mutex);
15132 (void) mptsas_config_target(pdip, ptgt);
15133 mutex_enter(&mpt->m_mutex);
15134 }
15135 }
15136 mutex_exit(&mpt->m_mutex);
15137 }
15138
15139 static int
15140 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
15141 {
15142 int rval = DDI_FAILURE;
15143 dev_info_t *tdip;
15144
15145 rval = mptsas_config_luns(pdip, ptgt);
15146 if (rval != DDI_SUCCESS) {
15147 /*
15148 * The return value means the SCMD_REPORT_LUNS
15149 * did not execute successfully. The target maybe
15150 * doesn't support such command.
15151 */
15152 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
15153 }
15154 return (rval);
15155 }
15156
15157 /*
15158 * Return fail if not all the childs/paths are freed.
15159 * if there is any path under the HBA, the return value will be always fail
15160 * because we didn't call mdi_pi_free for path
15161 */
15162 static int
15163 mptsas_offline_target(dev_info_t *pdip, char *name)
15164 {
15165 dev_info_t *child = NULL, *prechild = NULL;
15166 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
15167 int tmp_rval, rval = DDI_SUCCESS;
15168 char *addr, *cp;
15169 size_t s;
15170 mptsas_t *mpt = DIP2MPT(pdip);
15171
15172 child = ddi_get_child(pdip);
15173 while (child) {
15174 addr = ddi_get_name_addr(child);
15175 prechild = child;
15176 child = ddi_get_next_sibling(child);
15177
15178 if (addr == NULL) {
15179 continue;
15180 }
15181 if ((cp = strchr(addr, ',')) == NULL) {
15182 continue;
15183 }
15184
15185 s = (uintptr_t)cp - (uintptr_t)addr;
15186
15187 if (strncmp(addr, name, s) != 0) {
15188 continue;
15189 }
15190
15191 tmp_rval = mptsas_offline_lun(prechild, NULL);
15192 if (tmp_rval != DDI_SUCCESS) {
15193 rval = DDI_FAILURE;
15194 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15195 prechild, MPTSAS_DEV_GONE) !=
15196 DDI_PROP_SUCCESS) {
15197 mptsas_log(mpt, CE_WARN, "mptsas driver "
15198 "unable to create property for "
15199 "SAS %s (MPTSAS_DEV_GONE)", addr);
15200 }
15201 }
15202 }
15203
15204 pip = mdi_get_next_client_path(pdip, NULL);
15205 while (pip) {
15206 addr = MDI_PI(pip)->pi_addr;
15207 savepip = pip;
15208 pip = mdi_get_next_client_path(pdip, pip);
15209 if (addr == NULL) {
15210 continue;
15211 }
15212
15213 if ((cp = strchr(addr, ',')) == NULL) {
15214 continue;
15215 }
15216
15217 s = (uintptr_t)cp - (uintptr_t)addr;
15218
15219 if (strncmp(addr, name, s) != 0) {
15220 continue;
15221 }
15222
15223 (void) mptsas_offline_lun(NULL, savepip);
15224 /*
15225 * driver will not invoke mdi_pi_free, so path will not
15226 * be freed forever, return DDI_FAILURE.
15227 */
15228 rval = DDI_FAILURE;
15229 }
15230 return (rval);
15231 }
15232
15233 static int
15234 mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip)
15235 {
15236 int rval = DDI_FAILURE;
15237
15238 if (rpip != NULL) {
15239 if (MDI_PI_IS_OFFLINE(rpip)) {
15240 rval = DDI_SUCCESS;
15241 } else {
15242 rval = mdi_pi_offline(rpip, 0);
15243 }
15244 } else if (rdip != NULL) {
15245 rval = ndi_devi_offline(rdip,
15246 NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE | NDI_DEVI_GONE);
15247 }
15248
15249 return (rval);
15250 }
15251
15252 static dev_info_t *
15253 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
15254 {
15255 dev_info_t *child = NULL;
15256 char *smp_wwn = NULL;
15257
15258 child = ddi_get_child(parent);
15259 while (child) {
15260 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15261 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15262 != DDI_SUCCESS) {
15263 child = ddi_get_next_sibling(child);
15264 continue;
15265 }
15266
15267 if (strcmp(smp_wwn, str_wwn) == 0) {
15268 ddi_prop_free(smp_wwn);
15269 break;
15270 }
15271 child = ddi_get_next_sibling(child);
15272 ddi_prop_free(smp_wwn);
15273 }
15274 return (child);
15275 }
15276
15277 static int
15278 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node)
15279 {
15280 int rval = DDI_FAILURE;
15281 char wwn_str[MPTSAS_WWN_STRLEN];
15282 dev_info_t *cdip;
15283
15284 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15285
15286 cdip = mptsas_find_smp_child(pdip, wwn_str);
15287 if (cdip == NULL)
15288 return (DDI_SUCCESS);
15289
15290 rval = ndi_devi_offline(cdip, NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
15291
15292 return (rval);
15293 }
15294
15295 static dev_info_t *
15296 mptsas_find_child(dev_info_t *pdip, char *name)
15297 {
15298 dev_info_t *child = NULL;
15299 char *rname = NULL;
15300 int rval = DDI_FAILURE;
15301
15302 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15303
15304 child = ddi_get_child(pdip);
15305 while (child) {
15306 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15307 if (rval != DDI_SUCCESS) {
15308 child = ddi_get_next_sibling(child);
15309 bzero(rname, SCSI_MAXNAMELEN);
15310 continue;
15311 }
15312
15313 if (strcmp(rname, name) == 0) {
15314 break;
15315 }
15316 child = ddi_get_next_sibling(child);
15317 bzero(rname, SCSI_MAXNAMELEN);
15318 }
15319
15320 kmem_free(rname, SCSI_MAXNAMELEN);
15321
15322 return (child);
15323 }
15324
15325
15326 static dev_info_t *
15327 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
15328 {
15329 dev_info_t *child = NULL;
15330 char *name = NULL;
15331 char *addr = NULL;
15332
15333 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15334 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15335 (void) sprintf(name, "%016"PRIx64, sasaddr);
15336 (void) sprintf(addr, "w%s,%x", name, lun);
15337 child = mptsas_find_child(pdip, addr);
15338 kmem_free(name, SCSI_MAXNAMELEN);
15339 kmem_free(addr, SCSI_MAXNAMELEN);
15340 return (child);
15341 }
15342
15343 static dev_info_t *
15344 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
15345 {
15346 dev_info_t *child;
15347 char *addr;
15348
15349 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15350 (void) sprintf(addr, "p%x,0", phy);
15351 child = mptsas_find_child(pdip, addr);
15352 kmem_free(addr, SCSI_MAXNAMELEN);
15353 return (child);
15354 }
15355
15356 static mdi_pathinfo_t *
15357 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
15358 {
15359 mdi_pathinfo_t *path;
15360 char *addr = NULL;
15361
15362 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15363 (void) sprintf(addr, "p%x,0", phy);
15364 path = mdi_pi_find(pdip, NULL, addr);
15365 kmem_free(addr, SCSI_MAXNAMELEN);
15366 return (path);
15367 }
15368
15369 static mdi_pathinfo_t *
15370 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
15371 {
15372 mdi_pathinfo_t *path;
15373 char *name = NULL;
15374 char *addr = NULL;
15375
15376 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15377 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15378 (void) sprintf(name, "%016"PRIx64, sasaddr);
15379 (void) sprintf(addr, "w%s,%x", name, lun);
15380 path = mdi_pi_find(parent, NULL, addr);
15381 kmem_free(name, SCSI_MAXNAMELEN);
15382 kmem_free(addr, SCSI_MAXNAMELEN);
15383
15384 return (path);
15385 }
15386
15387 static int
15388 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
15389 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15390 {
15391 int i = 0;
15392 uchar_t *inq83 = NULL;
15393 int inq83_len1 = 0xFF;
15394 int inq83_len = 0;
15395 int rval = DDI_FAILURE;
15396 ddi_devid_t devid;
15397 char *guid = NULL;
15398 int target = ptgt->m_devhdl;
15399 mdi_pathinfo_t *pip = NULL;
15400 mptsas_t *mpt = DIP2MPT(pdip);
15401
15402 /*
15403 * For DVD/CD ROM and tape devices and optical
15404 * devices, we won't try to enumerate them under
15405 * scsi_vhci, so no need to try page83
15406 */
15407 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15408 sd_inq->inq_dtype == DTYPE_OPTICAL ||
15409 sd_inq->inq_dtype == DTYPE_ESI))
15410 goto create_lun;
15411
15412 /*
15413 * The LCA returns good SCSI status, but corrupt page 83 data the first
15414 * time it is queried. The solution is to keep trying to request page83
15415 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15416 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15417 * give up to get VPD page at this stage and fail the enumeration.
15418 */
15419
15420 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
15421
15422 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15423 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15424 inq83_len1, &inq83_len, 1);
15425 if (rval != 0) {
15426 mptsas_log(mpt, CE_WARN, "mptsas request inquiry page "
15427 "0x83 for target:%x, lun:%x failed!", target, lun);
15428 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15429 goto create_lun;
15430 goto out;
15431 }
15432 /*
15433 * create DEVID from inquiry data
15434 */
15435 if ((rval = ddi_devid_scsi_encode(
15436 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15437 sizeof (struct scsi_inquiry), NULL, 0, inq83,
15438 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15439 /*
15440 * extract GUID from DEVID
15441 */
15442 guid = ddi_devid_to_guid(devid);
15443
15444 /*
15445 * Do not enable MPXIO if the strlen(guid) is greater
15446 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15447 * handled by framework later.
15448 */
15449 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15450 ddi_devid_free_guid(guid);
15451 guid = NULL;
15452 if (mpt->m_mpxio_enable == TRUE) {
15453 mptsas_log(mpt, CE_NOTE, "Target:%x, "
15454 "lun:%x doesn't have a valid GUID, "
15455 "multipathing for this drive is "
15456 "not enabled", target, lun);
15457 }
15458 }
15459
15460 /*
15461 * devid no longer needed
15462 */
15463 ddi_devid_free(devid);
15464 break;
15465 } else if (rval == DDI_NOT_WELL_FORMED) {
15466 /*
15467 * return value of ddi_devid_scsi_encode equal to
15468 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15469 * to retry inquiry page 0x83 and get GUID.
15470 */
15471 NDBG20(("Not well formed devid, retry..."));
15472 delay(1 * drv_usectohz(1000000));
15473 continue;
15474 } else {
15475 mptsas_log(mpt, CE_WARN, "Encode devid failed for "
15476 "path target:%x, lun:%x", target, lun);
15477 rval = DDI_FAILURE;
15478 goto create_lun;
15479 }
15480 }
15481
15482 if (i == mptsas_inq83_retry_timeout) {
15483 mptsas_log(mpt, CE_WARN, "Repeated page83 requests timeout "
15484 "for path target:%x, lun:%x", target, lun);
15485 }
15486
15487 rval = DDI_FAILURE;
15488
15489 create_lun:
15490 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15491 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15492 ptgt, lun);
15493 }
15494 if (rval != DDI_SUCCESS) {
15495 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15496 ptgt, lun);
15497
15498 }
15499 out:
15500 if (guid != NULL) {
15501 /*
15502 * guid no longer needed
15503 */
15504 ddi_devid_free_guid(guid);
15505 }
15506 if (inq83 != NULL)
15507 kmem_free(inq83, inq83_len1);
15508 return (rval);
15509 }
15510
15511 static int
15512 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
15513 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
15514 {
15515 int target;
15516 char *nodename = NULL;
15517 char **compatible = NULL;
15518 int ncompatible = 0;
15519 int mdi_rtn = MDI_FAILURE;
15520 int rval = DDI_FAILURE;
15521 char *old_guid = NULL;
15522 mptsas_t *mpt = DIP2MPT(pdip);
15523 char *lun_addr = NULL;
15524 char *wwn_str = NULL;
15525 char *attached_wwn_str = NULL;
15526 char *component = NULL;
15527 uint8_t phy = 0xFF;
15528 uint64_t sas_wwn;
15529 int64_t lun64 = 0;
15530 uint32_t devinfo;
15531 uint16_t dev_hdl;
15532 uint16_t pdev_hdl;
15533 uint64_t dev_sas_wwn;
15534 uint64_t pdev_sas_wwn;
15535 uint32_t pdev_info;
15536 uint8_t physport;
15537 uint8_t phy_id;
15538 uint32_t page_address;
15539 uint16_t bay_num, enclosure, io_flags;
15540 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15541 uint32_t dev_info;
15542
15543 mutex_enter(&mpt->m_mutex);
15544 target = ptgt->m_devhdl;
15545 sas_wwn = ptgt->m_addr.mta_wwn;
15546 devinfo = ptgt->m_deviceinfo;
15547 phy = ptgt->m_phynum;
15548 mutex_exit(&mpt->m_mutex);
15549
15550 if (sas_wwn) {
15551 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15552 } else {
15553 *pip = mptsas_find_path_phy(pdip, phy);
15554 }
15555
15556 if (*pip != NULL) {
15557 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15558 ASSERT(*lun_dip != NULL);
15559 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15560 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15561 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15562 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15563 /*
15564 * Same path back online again.
15565 */
15566 (void) ddi_prop_free(old_guid);
15567 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15568 (!MDI_PI_IS_STANDBY(*pip)) &&
15569 (ptgt->m_tgt_unconfigured == 0)) {
15570 rval = mdi_pi_online(*pip, 0);
15571 } else {
15572 rval = DDI_SUCCESS;
15573 }
15574 if (rval != DDI_SUCCESS) {
15575 mptsas_log(mpt, CE_WARN, "path:target: "
15576 "%x, lun:%x online failed!", target,
15577 lun);
15578 *pip = NULL;
15579 *lun_dip = NULL;
15580 }
15581 return (rval);
15582 } else {
15583 /*
15584 * The GUID of the LUN has changed which maybe
15585 * because customer mapped another volume to the
15586 * same LUN.
15587 */
15588 mptsas_log(mpt, CE_WARN, "The GUID of the "
15589 "target:%x, lun:%x was changed, maybe "
15590 "because someone mapped another volume "
15591 "to the same LUN", target, lun);
15592 (void) ddi_prop_free(old_guid);
15593 if (!MDI_PI_IS_OFFLINE(*pip)) {
15594 rval = mdi_pi_offline(*pip, 0);
15595 if (rval != MDI_SUCCESS) {
15596 mptsas_log(mpt, CE_WARN, "path:"
15597 "target:%x, lun:%x offline "
15598 "failed!", target, lun);
15599 *pip = NULL;
15600 *lun_dip = NULL;
15601 return (DDI_FAILURE);
15602 }
15603 }
15604 if (mdi_pi_free(*pip,
15605 MDI_CLIENT_FLAGS_NO_EVENT) != MDI_SUCCESS) {
15606 mptsas_log(mpt, CE_WARN, "path:target:"
15607 "%x, lun:%x free failed!", target,
15608 lun);
15609 *pip = NULL;
15610 *lun_dip = NULL;
15611 return (DDI_FAILURE);
15612 }
15613 }
15614 } else {
15615 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15616 "property for path:target:%x, lun:%x", target, lun);
15617 *pip = NULL;
15618 *lun_dip = NULL;
15619 return (DDI_FAILURE);
15620 }
15621 }
15622 scsi_hba_nodename_compatible_get(inq, NULL,
15623 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15624
15625 /*
15626 * if nodename can't be determined then print a message and skip it
15627 */
15628 if (nodename == NULL) {
15629 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15630 "driver for target%d lun %d dtype:0x%02x", target, lun,
15631 inq->inq_dtype);
15632 return (DDI_FAILURE);
15633 }
15634
15635 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15636 /* The property is needed by MPAPI */
15637 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15638
15639 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15640 if (guid) {
15641 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15642 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15643 } else {
15644 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
15645 (void) sprintf(wwn_str, "p%x", phy);
15646 }
15647
15648 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15649 guid, lun_addr, compatible, ncompatible,
15650 0, pip);
15651 if (mdi_rtn == MDI_SUCCESS) {
15652
15653 if (mdi_prop_update_string(*pip, MDI_GUID,
15654 guid) != DDI_SUCCESS) {
15655 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15656 "create prop for target %d lun %d (MDI_GUID)",
15657 target, lun);
15658 mdi_rtn = MDI_FAILURE;
15659 goto virt_create_done;
15660 }
15661
15662 if (mdi_prop_update_int(*pip, LUN_PROP,
15663 lun) != DDI_SUCCESS) {
15664 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15665 "create prop for target %d lun %d (LUN_PROP)",
15666 target, lun);
15667 mdi_rtn = MDI_FAILURE;
15668 goto virt_create_done;
15669 }
15670 lun64 = (int64_t)lun;
15671 if (mdi_prop_update_int64(*pip, LUN64_PROP,
15672 lun64) != DDI_SUCCESS) {
15673 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15674 "create prop for target %d (LUN64_PROP)",
15675 target);
15676 mdi_rtn = MDI_FAILURE;
15677 goto virt_create_done;
15678 }
15679 if (mdi_prop_update_string_array(*pip, "compatible",
15680 compatible, ncompatible) !=
15681 DDI_PROP_SUCCESS) {
15682 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15683 "create prop for target %d lun %d (COMPATIBLE)",
15684 target, lun);
15685 mdi_rtn = MDI_FAILURE;
15686 goto virt_create_done;
15687 }
15688 if (sas_wwn && (mdi_prop_update_string(*pip,
15689 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15690 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15691 "create prop for target %d lun %d "
15692 "(target-port)", target, lun);
15693 mdi_rtn = MDI_FAILURE;
15694 goto virt_create_done;
15695 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15696 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15697 /*
15698 * Direct attached SATA device without DeviceName
15699 */
15700 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15701 "create prop for SAS target %d lun %d "
15702 "(sata-phy)", target, lun);
15703 mdi_rtn = MDI_FAILURE;
15704 goto virt_create_done;
15705 }
15706 mutex_enter(&mpt->m_mutex);
15707
15708 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15709 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15710 (uint32_t)ptgt->m_devhdl;
15711 rval = mptsas_get_sas_device_page0(mpt, page_address,
15712 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15713 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15714 if (rval != DDI_SUCCESS) {
15715 mutex_exit(&mpt->m_mutex);
15716 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15717 "parent device for handle %d", page_address);
15718 mdi_rtn = MDI_FAILURE;
15719 goto virt_create_done;
15720 }
15721
15722 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15723 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15724 rval = mptsas_get_sas_device_page0(mpt, page_address,
15725 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15726 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15727 if (rval != DDI_SUCCESS) {
15728 mutex_exit(&mpt->m_mutex);
15729 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15730 "device info for handle %d", page_address);
15731 mdi_rtn = MDI_FAILURE;
15732 goto virt_create_done;
15733 }
15734
15735 mutex_exit(&mpt->m_mutex);
15736
15737 /*
15738 * If this device direct attached to the controller
15739 * set the attached-port to the base wwid
15740 */
15741 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15742 != DEVINFO_DIRECT_ATTACHED) {
15743 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15744 pdev_sas_wwn);
15745 } else {
15746 /*
15747 * Update the iport's attached-port to guid
15748 */
15749 if (sas_wwn == 0) {
15750 (void) sprintf(wwn_str, "p%x", phy);
15751 } else {
15752 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15753 }
15754 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15755 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15756 DDI_PROP_SUCCESS) {
15757 mptsas_log(mpt, CE_WARN,
15758 "mptsas unable to create "
15759 "property for iport target-port"
15760 " %s (sas_wwn)",
15761 wwn_str);
15762 mdi_rtn = MDI_FAILURE;
15763 goto virt_create_done;
15764 }
15765
15766 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15767 mpt->un.m_base_wwid);
15768 }
15769
15770 if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
15771 char uabuf[SCSI_WWN_BUFLEN];
15772
15773 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
15774 mptsas_log(mpt, CE_WARN,
15775 "mptsas unable to format SATA bridge WWN");
15776 mdi_rtn = MDI_FAILURE;
15777 goto virt_create_done;
15778 }
15779
15780 if (mdi_prop_update_string(*pip,
15781 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
15782 DDI_SUCCESS) {
15783 mptsas_log(mpt, CE_WARN,
15784 "mptsas unable to create SCSI bridge port "
15785 "property for SATA device");
15786 mdi_rtn = MDI_FAILURE;
15787 goto virt_create_done;
15788 }
15789 }
15790
15791 if (mdi_prop_update_string(*pip,
15792 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15793 DDI_PROP_SUCCESS) {
15794 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15795 "property for iport attached-port %s (sas_wwn)",
15796 attached_wwn_str);
15797 mdi_rtn = MDI_FAILURE;
15798 goto virt_create_done;
15799 }
15800
15801
15802 if (inq->inq_dtype == 0) {
15803 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15804 /*
15805 * set obp path for pathinfo
15806 */
15807 (void) snprintf(component, MAXPATHLEN,
15808 "disk@%s", lun_addr);
15809
15810 if (mdi_pi_pathname_obp_set(*pip, component) !=
15811 DDI_SUCCESS) {
15812 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15813 "unable to set obp-path for object %s",
15814 component);
15815 mdi_rtn = MDI_FAILURE;
15816 goto virt_create_done;
15817 }
15818 }
15819
15820 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15821 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15822 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15823 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15824 "pm-capable", 1)) !=
15825 DDI_PROP_SUCCESS) {
15826 mptsas_log(mpt, CE_WARN, "mptsas driver"
15827 "failed to create pm-capable "
15828 "property, target %d", target);
15829 mdi_rtn = MDI_FAILURE;
15830 goto virt_create_done;
15831 }
15832 }
15833 /*
15834 * Create the phy-num property
15835 */
15836 if (mdi_prop_update_int(*pip, "phy-num",
15837 ptgt->m_phynum) != DDI_SUCCESS) {
15838 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15839 "create phy-num property for target %d lun %d",
15840 target, lun);
15841 mdi_rtn = MDI_FAILURE;
15842 goto virt_create_done;
15843 }
15844 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15845 mdi_rtn = mdi_pi_online(*pip, 0);
15846 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15847 mdi_rtn = MDI_FAILURE;
15848 }
15849 virt_create_done:
15850 if (*pip && mdi_rtn != MDI_SUCCESS) {
15851 (void) mdi_pi_free(*pip, MDI_CLIENT_FLAGS_NO_EVENT);
15852 *pip = NULL;
15853 *lun_dip = NULL;
15854 }
15855 }
15856
15857 scsi_hba_nodename_compatible_free(nodename, compatible);
15858 if (lun_addr != NULL) {
15859 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15860 }
15861 if (wwn_str != NULL) {
15862 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15863 }
15864 if (component != NULL) {
15865 kmem_free(component, MAXPATHLEN);
15866 }
15867
15868 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15869 }
15870
15871 static int
15872 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15873 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15874 {
15875 int target;
15876 int rval;
15877 int ndi_rtn = NDI_FAILURE;
15878 uint64_t be_sas_wwn;
15879 char *nodename = NULL;
15880 char **compatible = NULL;
15881 int ncompatible = 0;
15882 int instance = 0;
15883 mptsas_t *mpt = DIP2MPT(pdip);
15884 char *wwn_str = NULL;
15885 char *component = NULL;
15886 char *attached_wwn_str = NULL;
15887 uint8_t phy = 0xFF;
15888 uint64_t sas_wwn;
15889 uint32_t devinfo;
15890 uint16_t dev_hdl;
15891 uint16_t pdev_hdl;
15892 uint64_t pdev_sas_wwn;
15893 uint64_t dev_sas_wwn;
15894 uint32_t pdev_info;
15895 uint8_t physport;
15896 uint8_t phy_id;
15897 uint32_t page_address;
15898 uint16_t bay_num, enclosure, io_flags;
15899 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15900 uint32_t dev_info;
15901 int64_t lun64 = 0;
15902
15903 mutex_enter(&mpt->m_mutex);
15904 target = ptgt->m_devhdl;
15905 sas_wwn = ptgt->m_addr.mta_wwn;
15906 devinfo = ptgt->m_deviceinfo;
15907 phy = ptgt->m_phynum;
15908 mutex_exit(&mpt->m_mutex);
15909
15910 /*
15911 * generate compatible property with binding-set "mpt"
15912 */
15913 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15914 &nodename, &compatible, &ncompatible);
15915
15916 /*
15917 * if nodename can't be determined then print a message and skip it
15918 */
15919 if (nodename == NULL) {
15920 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15921 "for target %d lun %d", target, lun);
15922 return (DDI_FAILURE);
15923 }
15924
15925 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15926 DEVI_SID_NODEID, lun_dip);
15927
15928 /*
15929 * if lun alloc success, set props
15930 */
15931 if (ndi_rtn == NDI_SUCCESS) {
15932
15933 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15934 *lun_dip, LUN_PROP, lun) !=
15935 DDI_PROP_SUCCESS) {
15936 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15937 "property for target %d lun %d (LUN_PROP)",
15938 target, lun);
15939 ndi_rtn = NDI_FAILURE;
15940 goto phys_create_done;
15941 }
15942
15943 lun64 = (int64_t)lun;
15944 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15945 *lun_dip, LUN64_PROP, lun64) !=
15946 DDI_PROP_SUCCESS) {
15947 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15948 "property for target %d lun64 %d (LUN64_PROP)",
15949 target, lun);
15950 ndi_rtn = NDI_FAILURE;
15951 goto phys_create_done;
15952 }
15953 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15954 *lun_dip, "compatible", compatible, ncompatible)
15955 != DDI_PROP_SUCCESS) {
15956 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15957 "property for target %d lun %d (COMPATIBLE)",
15958 target, lun);
15959 ndi_rtn = NDI_FAILURE;
15960 goto phys_create_done;
15961 }
15962
15963 /*
15964 * We need the SAS WWN for non-multipath devices, so
15965 * we'll use the same property as that multipathing
15966 * devices need to present for MPAPI. If we don't have
15967 * a WWN (e.g. parallel SCSI), don't create the prop.
15968 */
15969 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15970 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15971 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15972 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15973 != DDI_PROP_SUCCESS) {
15974 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15975 "create property for SAS target %d lun %d "
15976 "(target-port)", target, lun);
15977 ndi_rtn = NDI_FAILURE;
15978 goto phys_create_done;
15979 }
15980
15981 be_sas_wwn = BE_64(sas_wwn);
15982 if (sas_wwn && ndi_prop_update_byte_array(
15983 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15984 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15985 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15986 "create property for SAS target %d lun %d "
15987 "(port-wwn)", target, lun);
15988 ndi_rtn = NDI_FAILURE;
15989 goto phys_create_done;
15990 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15991 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15992 DDI_PROP_SUCCESS)) {
15993 /*
15994 * Direct attached SATA device without DeviceName
15995 */
15996 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15997 "create property for SAS target %d lun %d "
15998 "(sata-phy)", target, lun);
15999 ndi_rtn = NDI_FAILURE;
16000 goto phys_create_done;
16001 }
16002
16003 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
16004 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
16005 mptsas_log(mpt, CE_WARN, "mptsas unable to"
16006 "create property for SAS target %d lun %d"
16007 " (SAS_PROP)", target, lun);
16008 ndi_rtn = NDI_FAILURE;
16009 goto phys_create_done;
16010 }
16011 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
16012 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
16013 mptsas_log(mpt, CE_WARN, "mptsas unable "
16014 "to create guid property for target %d "
16015 "lun %d", target, lun);
16016 ndi_rtn = NDI_FAILURE;
16017 goto phys_create_done;
16018 }
16019
16020 /*
16021 * The following code is to set properties for SM-HBA support,
16022 * it doesn't apply to RAID volumes
16023 */
16024 if (ptgt->m_addr.mta_phymask == 0)
16025 goto phys_raid_lun;
16026
16027 mutex_enter(&mpt->m_mutex);
16028
16029 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16030 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16031 (uint32_t)ptgt->m_devhdl;
16032 rval = mptsas_get_sas_device_page0(mpt, page_address,
16033 &dev_hdl, &dev_sas_wwn, &dev_info,
16034 &physport, &phy_id, &pdev_hdl,
16035 &bay_num, &enclosure, &io_flags);
16036 if (rval != DDI_SUCCESS) {
16037 mutex_exit(&mpt->m_mutex);
16038 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
16039 "parent device for handle %d.", page_address);
16040 ndi_rtn = NDI_FAILURE;
16041 goto phys_create_done;
16042 }
16043
16044 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16045 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
16046 rval = mptsas_get_sas_device_page0(mpt, page_address,
16047 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
16048 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16049 if (rval != DDI_SUCCESS) {
16050 mutex_exit(&mpt->m_mutex);
16051 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16052 "device for handle %d.", page_address);
16053 ndi_rtn = NDI_FAILURE;
16054 goto phys_create_done;
16055 }
16056
16057 mutex_exit(&mpt->m_mutex);
16058
16059 /*
16060 * If this device direct attached to the controller
16061 * set the attached-port to the base wwid
16062 */
16063 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16064 != DEVINFO_DIRECT_ATTACHED) {
16065 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
16066 pdev_sas_wwn);
16067 } else {
16068 /*
16069 * Update the iport's attached-port to guid
16070 */
16071 if (sas_wwn == 0) {
16072 (void) sprintf(wwn_str, "p%x", phy);
16073 } else {
16074 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
16075 }
16076 if (ddi_prop_update_string(DDI_DEV_T_NONE,
16077 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16078 DDI_PROP_SUCCESS) {
16079 mptsas_log(mpt, CE_WARN,
16080 "mptsas unable to create "
16081 "property for iport target-port"
16082 " %s (sas_wwn)",
16083 wwn_str);
16084 ndi_rtn = NDI_FAILURE;
16085 goto phys_create_done;
16086 }
16087
16088 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
16089 mpt->un.m_base_wwid);
16090 }
16091
16092 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16093 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
16094 DDI_PROP_SUCCESS) {
16095 mptsas_log(mpt, CE_WARN,
16096 "mptsas unable to create "
16097 "property for iport attached-port %s (sas_wwn)",
16098 attached_wwn_str);
16099 ndi_rtn = NDI_FAILURE;
16100 goto phys_create_done;
16101 }
16102
16103 if (IS_SATA_DEVICE(dev_info)) {
16104 char uabuf[SCSI_WWN_BUFLEN];
16105
16106 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16107 *lun_dip, MPTSAS_VARIANT, "sata") !=
16108 DDI_PROP_SUCCESS) {
16109 mptsas_log(mpt, CE_WARN,
16110 "mptsas unable to create "
16111 "property for device variant ");
16112 ndi_rtn = NDI_FAILURE;
16113 goto phys_create_done;
16114 }
16115
16116 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
16117 mptsas_log(mpt, CE_WARN,
16118 "mptsas unable to format SATA bridge WWN");
16119 ndi_rtn = NDI_FAILURE;
16120 goto phys_create_done;
16121 }
16122
16123 if (ndi_prop_update_string(DDI_DEV_T_NONE, *lun_dip,
16124 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
16125 DDI_PROP_SUCCESS) {
16126 mptsas_log(mpt, CE_WARN,
16127 "mptsas unable to create SCSI bridge port "
16128 "property for SATA device");
16129 ndi_rtn = NDI_FAILURE;
16130 goto phys_create_done;
16131 }
16132 }
16133
16134 if (IS_ATAPI_DEVICE(dev_info)) {
16135 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16136 *lun_dip, MPTSAS_VARIANT, "atapi") !=
16137 DDI_PROP_SUCCESS) {
16138 mptsas_log(mpt, CE_WARN,
16139 "mptsas unable to create "
16140 "property for device variant ");
16141 ndi_rtn = NDI_FAILURE;
16142 goto phys_create_done;
16143 }
16144 }
16145
16146 phys_raid_lun:
16147 /*
16148 * if this is a SAS controller, and the target is a SATA
16149 * drive, set the 'pm-capable' property for sd and if on
16150 * an OPL platform, also check if this is an ATAPI
16151 * device.
16152 */
16153 instance = ddi_get_instance(mpt->m_dip);
16154 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
16155 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
16156 NDBG2(("mptsas%d: creating pm-capable property, "
16157 "target %d", instance, target));
16158
16159 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
16160 *lun_dip, "pm-capable", 1)) !=
16161 DDI_PROP_SUCCESS) {
16162 mptsas_log(mpt, CE_WARN, "mptsas "
16163 "failed to create pm-capable "
16164 "property, target %d", target);
16165 ndi_rtn = NDI_FAILURE;
16166 goto phys_create_done;
16167 }
16168
16169 }
16170
16171 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
16172 /*
16173 * add 'obp-path' properties for devinfo
16174 */
16175 bzero(wwn_str, sizeof (wwn_str));
16176 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
16177 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
16178 if (guid) {
16179 (void) snprintf(component, MAXPATHLEN,
16180 "disk@w%s,%x", wwn_str, lun);
16181 } else {
16182 (void) snprintf(component, MAXPATHLEN,
16183 "disk@p%x,%x", phy, lun);
16184 }
16185 if (ddi_pathname_obp_set(*lun_dip, component)
16186 != DDI_SUCCESS) {
16187 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
16188 "unable to set obp-path for SAS "
16189 "object %s", component);
16190 ndi_rtn = NDI_FAILURE;
16191 goto phys_create_done;
16192 }
16193 }
16194 /*
16195 * Create the phy-num property for non-raid disk
16196 */
16197 if (ptgt->m_addr.mta_phymask != 0) {
16198 if (ndi_prop_update_int(DDI_DEV_T_NONE,
16199 *lun_dip, "phy-num", ptgt->m_phynum) !=
16200 DDI_PROP_SUCCESS) {
16201 mptsas_log(mpt, CE_WARN, "mptsas driver "
16202 "failed to create phy-num property for "
16203 "target %d", target);
16204 ndi_rtn = NDI_FAILURE;
16205 goto phys_create_done;
16206 }
16207 }
16208 phys_create_done:
16209 /*
16210 * If props were setup ok, online the lun
16211 */
16212 if (ndi_rtn == NDI_SUCCESS) {
16213 /*
16214 * Try to online the new node
16215 */
16216 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
16217 }
16218
16219 /*
16220 * If success set rtn flag, else unwire alloc'd lun
16221 */
16222 if (ndi_rtn != NDI_SUCCESS) {
16223 NDBG12(("mptsas driver unable to online "
16224 "target %d lun %d", target, lun));
16225 ndi_prop_remove_all(*lun_dip);
16226 (void) ndi_devi_free(*lun_dip);
16227 *lun_dip = NULL;
16228 }
16229 }
16230
16231 scsi_hba_nodename_compatible_free(nodename, compatible);
16232
16233 if (wwn_str != NULL) {
16234 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16235 }
16236 if (component != NULL) {
16237 kmem_free(component, MAXPATHLEN);
16238 }
16239
16240
16241 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16242 }
16243
16244 static int
16245 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
16246 {
16247 mptsas_t *mpt = DIP2MPT(pdip);
16248 struct smp_device smp_sd;
16249
16250 /* XXX An HBA driver should not be allocating an smp_device. */
16251 bzero(&smp_sd, sizeof (struct smp_device));
16252 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16253 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16254
16255 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16256 return (NDI_FAILURE);
16257 return (NDI_SUCCESS);
16258 }
16259
16260 static int
16261 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16262 {
16263 mptsas_t *mpt = DIP2MPT(pdip);
16264 mptsas_smp_t *psmp = NULL;
16265 int rval;
16266 int phymask;
16267
16268 /*
16269 * The phymask exists if the port is active, otherwise
16270 * nothing to do.
16271 */
16272 if (ddi_prop_exists(DDI_DEV_T_ANY, pdip,
16273 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0)
16274 return (DDI_FAILURE);
16275
16276 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16277 "phymask", 0);
16278 /*
16279 * Find the smp node in hash table with specified sas address and
16280 * physical port
16281 */
16282 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16283 if (psmp == NULL) {
16284 return (DDI_FAILURE);
16285 }
16286
16287 rval = mptsas_online_smp(pdip, psmp, smp_dip);
16288
16289 return (rval);
16290 }
16291
16292 static int
16293 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16294 dev_info_t **smp_dip)
16295 {
16296 char wwn_str[MPTSAS_WWN_STRLEN];
16297 char attached_wwn_str[MPTSAS_WWN_STRLEN];
16298 int ndi_rtn = NDI_FAILURE;
16299 int rval = 0;
16300 mptsas_smp_t dev_info;
16301 uint32_t page_address;
16302 mptsas_t *mpt = DIP2MPT(pdip);
16303 uint16_t dev_hdl;
16304 uint64_t sas_wwn;
16305 uint64_t smp_sas_wwn;
16306 uint8_t physport;
16307 uint8_t phy_id;
16308 uint16_t pdev_hdl;
16309 uint8_t numphys = 0;
16310 uint16_t i = 0;
16311 char phymask[MPTSAS_MAX_PHYS];
16312 char *iport = NULL;
16313 mptsas_phymask_t phy_mask = 0;
16314 uint16_t attached_devhdl;
16315 uint16_t bay_num, enclosure, io_flags;
16316
16317 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
16318
16319 /*
16320 * Probe smp device, prevent the node of removed device from being
16321 * configured succesfully
16322 */
16323 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
16324 return (DDI_FAILURE);
16325 }
16326
16327 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
16328 return (DDI_SUCCESS);
16329 }
16330
16331 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
16332
16333 /*
16334 * if lun alloc success, set props
16335 */
16336 if (ndi_rtn == NDI_SUCCESS) {
16337 /*
16338 * Set the flavor of the child to be SMP flavored
16339 */
16340 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
16341
16342 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16343 *smp_dip, SMP_WWN, wwn_str) !=
16344 DDI_PROP_SUCCESS) {
16345 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16346 "property for smp device %s (sas_wwn)",
16347 wwn_str);
16348 ndi_rtn = NDI_FAILURE;
16349 goto smp_create_done;
16350 }
16351 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
16352 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16353 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
16354 DDI_PROP_SUCCESS) {
16355 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16356 "property for iport target-port %s (sas_wwn)",
16357 wwn_str);
16358 ndi_rtn = NDI_FAILURE;
16359 goto smp_create_done;
16360 }
16361
16362 mutex_enter(&mpt->m_mutex);
16363
16364 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
16365 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
16366 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16367 &dev_info);
16368 if (rval != DDI_SUCCESS) {
16369 mutex_exit(&mpt->m_mutex);
16370 mptsas_log(mpt, CE_WARN,
16371 "mptsas unable to get expander "
16372 "parent device info for %x", page_address);
16373 ndi_rtn = NDI_FAILURE;
16374 goto smp_create_done;
16375 }
16376
16377 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
16378 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16379 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16380 (uint32_t)dev_info.m_pdevhdl;
16381 rval = mptsas_get_sas_device_page0(mpt, page_address,
16382 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
16383 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16384 if (rval != DDI_SUCCESS) {
16385 mutex_exit(&mpt->m_mutex);
16386 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16387 "device info for %x", page_address);
16388 ndi_rtn = NDI_FAILURE;
16389 goto smp_create_done;
16390 }
16391
16392 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16393 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16394 (uint32_t)dev_info.m_devhdl;
16395 rval = mptsas_get_sas_device_page0(mpt, page_address,
16396 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
16397 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
16398 &io_flags);
16399 if (rval != DDI_SUCCESS) {
16400 mutex_exit(&mpt->m_mutex);
16401 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16402 "device info for %x", page_address);
16403 ndi_rtn = NDI_FAILURE;
16404 goto smp_create_done;
16405 }
16406 mutex_exit(&mpt->m_mutex);
16407
16408 /*
16409 * If this smp direct attached to the controller
16410 * set the attached-port to the base wwid
16411 */
16412 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16413 != DEVINFO_DIRECT_ATTACHED) {
16414 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
16415 sas_wwn);
16416 } else {
16417 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
16418 mpt->un.m_base_wwid);
16419 }
16420
16421 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16422 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
16423 DDI_PROP_SUCCESS) {
16424 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16425 "property for smp attached-port %s (sas_wwn)",
16426 attached_wwn_str);
16427 ndi_rtn = NDI_FAILURE;
16428 goto smp_create_done;
16429 }
16430
16431 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
16432 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
16433 mptsas_log(mpt, CE_WARN, "mptsas unable to "
16434 "create property for SMP %s (SMP_PROP) ",
16435 wwn_str);
16436 ndi_rtn = NDI_FAILURE;
16437 goto smp_create_done;
16438 }
16439
16440 /*
16441 * check the smp to see whether it direct
16442 * attached to the controller
16443 */
16444 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16445 != DEVINFO_DIRECT_ATTACHED) {
16446 goto smp_create_done;
16447 }
16448 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
16449 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
16450 if (numphys > 0) {
16451 goto smp_create_done;
16452 }
16453 /*
16454 * this iport is an old iport, we need to
16455 * reconfig the props for it.
16456 */
16457 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16458 MPTSAS_VIRTUAL_PORT, 0) !=
16459 DDI_PROP_SUCCESS) {
16460 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16461 MPTSAS_VIRTUAL_PORT);
16462 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
16463 "prop update failed");
16464 goto smp_create_done;
16465 }
16466
16467 mutex_enter(&mpt->m_mutex);
16468 numphys = 0;
16469 iport = ddi_get_name_addr(pdip);
16470 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16471 bzero(phymask, sizeof (phymask));
16472 (void) sprintf(phymask,
16473 "%x", mpt->m_phy_info[i].phy_mask);
16474 if (strcmp(phymask, iport) == 0) {
16475 phy_mask = mpt->m_phy_info[i].phy_mask;
16476 break;
16477 }
16478 }
16479
16480 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16481 if ((phy_mask >> i) & 0x01) {
16482 numphys++;
16483 }
16484 }
16485 /*
16486 * Update PHY info for smhba
16487 */
16488 if (mptsas_smhba_phy_init(mpt)) {
16489 mutex_exit(&mpt->m_mutex);
16490 mptsas_log(mpt, CE_WARN, "mptsas phy update "
16491 "failed");
16492 goto smp_create_done;
16493 }
16494 mutex_exit(&mpt->m_mutex);
16495
16496 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
16497 &attached_devhdl);
16498
16499 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16500 MPTSAS_NUM_PHYS, numphys) !=
16501 DDI_PROP_SUCCESS) {
16502 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16503 MPTSAS_NUM_PHYS);
16504 mptsas_log(mpt, CE_WARN, "mptsas update "
16505 "num phys props failed");
16506 goto smp_create_done;
16507 }
16508 /*
16509 * Add parent's props for SMHBA support
16510 */
16511 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
16512 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16513 DDI_PROP_SUCCESS) {
16514 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16515 SCSI_ADDR_PROP_ATTACHED_PORT);
16516 mptsas_log(mpt, CE_WARN, "mptsas update iport"
16517 "attached-port failed");
16518 goto smp_create_done;
16519 }
16520
16521 smp_create_done:
16522 /*
16523 * If props were setup ok, online the lun
16524 */
16525 if (ndi_rtn == NDI_SUCCESS) {
16526 /*
16527 * Try to online the new node
16528 */
16529 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
16530 }
16531
16532 /*
16533 * If success set rtn flag, else unwire alloc'd lun
16534 */
16535 if (ndi_rtn != NDI_SUCCESS) {
16536 NDBG12(("mptsas unable to online "
16537 "SMP target %s", wwn_str));
16538 ndi_prop_remove_all(*smp_dip);
16539 (void) ndi_devi_free(*smp_dip);
16540 }
16541 }
16542
16543 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16544 }
16545
16546 /* smp transport routine */
16547 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
16548 {
16549 uint64_t wwn;
16550 Mpi2SmpPassthroughRequest_t req;
16551 Mpi2SmpPassthroughReply_t rep;
16552 uint32_t direction = 0;
16553 mptsas_t *mpt;
16554 int ret;
16555 uint64_t tmp64;
16556
16557 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
16558 smp_a_hba_tran->smp_tran_hba_private;
16559
16560 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
16561 /*
16562 * Need to compose a SMP request message
16563 * and call mptsas_do_passthru() function
16564 */
16565 bzero(&req, sizeof (req));
16566 bzero(&rep, sizeof (rep));
16567 req.PassthroughFlags = 0;
16568 req.PhysicalPort = 0xff;
16569 req.ChainOffset = 0;
16570 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16571
16572 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16573 smp_pkt->smp_pkt_reason = ERANGE;
16574 return (DDI_FAILURE);
16575 }
16576 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16577
16578 req.MsgFlags = 0;
16579 tmp64 = LE_64(wwn);
16580 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16581 if (smp_pkt->smp_pkt_rspsize > 0) {
16582 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16583 }
16584 if (smp_pkt->smp_pkt_reqsize > 0) {
16585 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16586 }
16587
16588 mutex_enter(&mpt->m_mutex);
16589 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16590 (uint8_t *)smp_pkt->smp_pkt_rsp,
16591 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16592 smp_pkt->smp_pkt_rspsize - 4, direction,
16593 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16594 smp_pkt->smp_pkt_timeout, FKIOCTL);
16595 mutex_exit(&mpt->m_mutex);
16596 if (ret != 0) {
16597 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16598 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16599 return (DDI_FAILURE);
16600 }
16601 /* do passthrough success, check the smp status */
16602 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16603 switch (LE_16(rep.IOCStatus)) {
16604 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16605 smp_pkt->smp_pkt_reason = ENODEV;
16606 break;
16607 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16608 smp_pkt->smp_pkt_reason = EOVERFLOW;
16609 break;
16610 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16611 smp_pkt->smp_pkt_reason = EIO;
16612 break;
16613 default:
16614 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16615 "status:%x", LE_16(rep.IOCStatus));
16616 smp_pkt->smp_pkt_reason = EIO;
16617 break;
16618 }
16619 return (DDI_FAILURE);
16620 }
16621 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16622 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16623 rep.SASStatus);
16624 smp_pkt->smp_pkt_reason = EIO;
16625 return (DDI_FAILURE);
16626 }
16627
16628 return (DDI_SUCCESS);
16629 }
16630
16631 /*
16632 * If we didn't get a match, we need to get sas page0 for each device, and
16633 * untill we get a match. If failed, return NULL
16634 */
16635 static mptsas_target_t *
16636 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16637 {
16638 int i, j = 0;
16639 int rval = 0;
16640 uint16_t cur_handle;
16641 uint32_t page_address;
16642 mptsas_target_t *ptgt = NULL;
16643
16644 /*
16645 * PHY named device must be direct attached and attaches to
16646 * narrow port, if the iport is not parent of the device which
16647 * we are looking for.
16648 */
16649 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16650 if ((1 << i) & phymask)
16651 j++;
16652 }
16653
16654 if (j > 1)
16655 return (NULL);
16656
16657 /*
16658 * Must be a narrow port and single device attached to the narrow port
16659 * So the physical port num of device which is equal to the iport's
16660 * port num is the device what we are looking for.
16661 */
16662
16663 if (mpt->m_phy_info[phy].phy_mask != phymask)
16664 return (NULL);
16665
16666 mutex_enter(&mpt->m_mutex);
16667
16668 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16669 &phy);
16670 if (ptgt != NULL) {
16671 mutex_exit(&mpt->m_mutex);
16672 return (ptgt);
16673 }
16674
16675 if (mpt->m_done_traverse_dev) {
16676 mutex_exit(&mpt->m_mutex);
16677 return (NULL);
16678 }
16679
16680 /* If didn't get a match, come here */
16681 cur_handle = mpt->m_dev_handle;
16682 for (; ; ) {
16683 ptgt = NULL;
16684 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16685 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16686 rval = mptsas_get_target_device_info(mpt, page_address,
16687 &cur_handle, &ptgt);
16688 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16689 (rval == DEV_INFO_FAIL_ALLOC) ||
16690 (rval == DEV_INFO_FAIL_GUID)) {
16691 break;
16692 }
16693 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16694 (rval == DEV_INFO_PHYS_DISK)) {
16695 continue;
16696 }
16697 mpt->m_dev_handle = cur_handle;
16698
16699 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16700 break;
16701 }
16702 }
16703
16704 mutex_exit(&mpt->m_mutex);
16705 return (ptgt);
16706 }
16707
16708 /*
16709 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16710 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16711 * If we didn't get a match, we need to get sas page0 for each device, and
16712 * untill we get a match
16713 * If failed, return NULL
16714 */
16715 static mptsas_target_t *
16716 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16717 {
16718 int rval = 0;
16719 uint16_t cur_handle;
16720 uint32_t page_address;
16721 mptsas_target_t *tmp_tgt = NULL;
16722 mptsas_target_addr_t addr;
16723
16724 addr.mta_wwn = wwid;
16725 addr.mta_phymask = phymask;
16726 mutex_enter(&mpt->m_mutex);
16727 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16728 if (tmp_tgt != NULL) {
16729 mutex_exit(&mpt->m_mutex);
16730 return (tmp_tgt);
16731 }
16732
16733 if (phymask == 0) {
16734 /*
16735 * It's IR volume
16736 */
16737 rval = mptsas_get_raid_info(mpt);
16738 if (rval) {
16739 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16740 }
16741 mutex_exit(&mpt->m_mutex);
16742 return (tmp_tgt);
16743 }
16744
16745 if (mpt->m_done_traverse_dev) {
16746 mutex_exit(&mpt->m_mutex);
16747 return (NULL);
16748 }
16749
16750 /* If didn't get a match, come here */
16751 cur_handle = mpt->m_dev_handle;
16752 for (;;) {
16753 tmp_tgt = NULL;
16754 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16755 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16756 rval = mptsas_get_target_device_info(mpt, page_address,
16757 &cur_handle, &tmp_tgt);
16758 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16759 (rval == DEV_INFO_FAIL_ALLOC) ||
16760 (rval == DEV_INFO_FAIL_GUID)) {
16761 tmp_tgt = NULL;
16762 break;
16763 }
16764 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16765 (rval == DEV_INFO_PHYS_DISK)) {
16766 continue;
16767 }
16768 mpt->m_dev_handle = cur_handle;
16769 if ((tmp_tgt->m_addr.mta_wwn) &&
16770 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16771 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16772 break;
16773 }
16774 }
16775
16776 mutex_exit(&mpt->m_mutex);
16777 return (tmp_tgt);
16778 }
16779
16780 static mptsas_smp_t *
16781 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16782 {
16783 int rval = 0;
16784 uint16_t cur_handle;
16785 uint32_t page_address;
16786 mptsas_smp_t smp_node, *psmp = NULL;
16787 mptsas_target_addr_t addr;
16788
16789 addr.mta_wwn = wwid;
16790 addr.mta_phymask = phymask;
16791 mutex_enter(&mpt->m_mutex);
16792 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16793 if (psmp != NULL) {
16794 mutex_exit(&mpt->m_mutex);
16795 return (psmp);
16796 }
16797
16798 if (mpt->m_done_traverse_smp) {
16799 mutex_exit(&mpt->m_mutex);
16800 return (NULL);
16801 }
16802
16803 /* If didn't get a match, come here */
16804 cur_handle = mpt->m_smp_devhdl;
16805 for (;;) {
16806 psmp = NULL;
16807 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16808 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16809 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16810 &smp_node);
16811 if (rval != DDI_SUCCESS) {
16812 break;
16813 }
16814 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16815 psmp = mptsas_smp_alloc(mpt, &smp_node);
16816 ASSERT(psmp);
16817 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16818 (psmp->m_addr.mta_phymask == phymask)) {
16819 break;
16820 }
16821 }
16822
16823 mutex_exit(&mpt->m_mutex);
16824 return (psmp);
16825 }
16826
16827 mptsas_target_t *
16828 mptsas_tgt_alloc(refhash_t *refhash, uint16_t devhdl, uint64_t wwid,
16829 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16830 {
16831 mptsas_target_t *tmp_tgt = NULL;
16832 mptsas_target_addr_t addr;
16833
16834 addr.mta_wwn = wwid;
16835 addr.mta_phymask = phymask;
16836 tmp_tgt = refhash_lookup(refhash, &addr);
16837 if (tmp_tgt != NULL) {
16838 NDBG20(("Hash item already exist"));
16839 tmp_tgt->m_deviceinfo = devinfo;
16840 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16841 return (tmp_tgt);
16842 }
16843 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16844 if (tmp_tgt == NULL) {
16845 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16846 return (NULL);
16847 }
16848 tmp_tgt->m_devhdl = devhdl;
16849 tmp_tgt->m_addr.mta_wwn = wwid;
16850 tmp_tgt->m_deviceinfo = devinfo;
16851 tmp_tgt->m_addr.mta_phymask = phymask;
16852 tmp_tgt->m_phynum = phynum;
16853 /* Initialized the tgt structure */
16854 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16855 tmp_tgt->m_qfull_retry_interval =
16856 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16857 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16858 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16859
16860 refhash_insert(refhash, tmp_tgt);
16861
16862 return (tmp_tgt);
16863 }
16864
16865 static void
16866 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16867 {
16868 dst->m_devhdl = src->m_devhdl;
16869 dst->m_deviceinfo = src->m_deviceinfo;
16870 dst->m_pdevhdl = src->m_pdevhdl;
16871 dst->m_pdevinfo = src->m_pdevinfo;
16872 }
16873
16874 static mptsas_smp_t *
16875 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16876 {
16877 mptsas_target_addr_t addr;
16878 mptsas_smp_t *ret_data;
16879
16880 addr.mta_wwn = data->m_addr.mta_wwn;
16881 addr.mta_phymask = data->m_addr.mta_phymask;
16882 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16883 /*
16884 * If there's already a matching SMP target, update its fields
16885 * in place. Since the address is not changing, it's safe to do
16886 * this. We cannot just bcopy() here because the structure we've
16887 * been given has invalid hash links.
16888 */
16889 if (ret_data != NULL) {
16890 mptsas_smp_target_copy(data, ret_data);
16891 return (ret_data);
16892 }
16893
16894 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16895 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16896 refhash_insert(mpt->m_smp_targets, ret_data);
16897 return (ret_data);
16898 }
16899
16900 /*
16901 * Functions for SGPIO LED support
16902 */
16903 static dev_info_t *
16904 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16905 {
16906 dev_info_t *dip;
16907 int prop;
16908
16909 dip = e_ddi_hold_devi_by_dev(dev, 0);
16910 if (dip == NULL)
16911 return (dip);
16912
16913 /*
16914 * The phymask exists if the port is active, otherwise
16915 * nothing to do.
16916 */
16917 if (ddi_prop_exists(DDI_DEV_T_ANY, dip,
16918 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "phymask") == 0) {
16919 ddi_release_devi(dip);
16920 return ((dev_info_t *)NULL);
16921 }
16922
16923 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16924 "phymask", 0);
16925
16926 *phymask = (mptsas_phymask_t)prop;
16927 ddi_release_devi(dip);
16928 return (dip);
16929 }
16930 static mptsas_target_t *
16931 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16932 {
16933 uint8_t phynum;
16934 uint64_t wwn;
16935 int lun;
16936 mptsas_target_t *ptgt = NULL;
16937
16938 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16939 return (NULL);
16940 }
16941 if (addr[0] == 'w') {
16942 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16943 } else {
16944 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16945 }
16946 return (ptgt);
16947 }
16948
16949 static int
16950 mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx)
16951 {
16952 uint32_t slotstatus = 0;
16953
16954 ASSERT3U(idx, <, mep->me_nslots);
16955
16956 /* Build an MPI2 Slot Status based on our view of the world */
16957 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16958 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16959 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16960 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16961 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16962 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16963
16964 /* Write it to the controller */
16965 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16966 slotstatus, idx + mep->me_fslot));
16967 return (mptsas_send_sep(mpt, mep, idx, &slotstatus,
16968 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16969 }
16970
16971 /*
16972 * send sep request, use enclosure/slot addressing
16973 */
16974 static int
16975 mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
16976 uint32_t *status, uint8_t act)
16977 {
16978 Mpi2SepRequest_t req;
16979 Mpi2SepReply_t rep;
16980 int ret;
16981 uint16_t enctype;
16982 uint16_t slot;
16983
16984 ASSERT(mutex_owned(&mpt->m_mutex));
16985
16986 /*
16987 * Look through the enclosures and make sure that this enclosure is
16988 * something that is directly attached device. If we didn't find an
16989 * enclosure for this device, don't send the ioctl.
16990 */
16991 enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16992 if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16993 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16994 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16995 return (ENOTTY);
16996 }
16997 slot = idx + mep->me_fslot;
16998
16999 bzero(&req, sizeof (req));
17000 bzero(&rep, sizeof (rep));
17001
17002 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
17003 req.Action = act;
17004 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
17005 req.EnclosureHandle = LE_16(mep->me_enchdl);
17006 req.Slot = LE_16(slot);
17007 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
17008 req.SlotStatus = LE_32(*status);
17009 }
17010 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
17011 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
17012 if (ret != 0) {
17013 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
17014 "Processor Request message error %d", ret);
17015 return (ret);
17016 }
17017 /* do passthrough success, check the ioc status */
17018 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
17019 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
17020 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
17021 LE_32(rep.IOCLogInfo));
17022 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
17023 case MPI2_IOCSTATUS_INVALID_FUNCTION:
17024 case MPI2_IOCSTATUS_INVALID_VPID:
17025 case MPI2_IOCSTATUS_INVALID_FIELD:
17026 case MPI2_IOCSTATUS_INVALID_STATE:
17027 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
17028 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
17029 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
17030 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
17031 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
17032 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
17033 return (EINVAL);
17034 case MPI2_IOCSTATUS_BUSY:
17035 return (EBUSY);
17036 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
17037 return (EAGAIN);
17038 case MPI2_IOCSTATUS_INVALID_SGL:
17039 case MPI2_IOCSTATUS_INTERNAL_ERROR:
17040 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
17041 default:
17042 return (EIO);
17043 }
17044 }
17045 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
17046 *status = LE_32(rep.SlotStatus);
17047 }
17048
17049 return (0);
17050 }
17051
17052 int
17053 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
17054 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
17055 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
17056 {
17057 ddi_dma_cookie_t new_cookie;
17058 size_t alloc_len;
17059 uint_t ncookie;
17060
17061 if (cookiep == NULL)
17062 cookiep = &new_cookie;
17063
17064 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
17065 NULL, dma_hdp) != DDI_SUCCESS) {
17066 return (FALSE);
17067 }
17068
17069 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
17070 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
17071 acc_hdp) != DDI_SUCCESS) {
17072 ddi_dma_free_handle(dma_hdp);
17073 *dma_hdp = NULL;
17074 return (FALSE);
17075 }
17076
17077 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
17078 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
17079 cookiep, &ncookie) != DDI_DMA_MAPPED) {
17080 (void) ddi_dma_mem_free(acc_hdp);
17081 ddi_dma_free_handle(dma_hdp);
17082 *dma_hdp = NULL;
17083 return (FALSE);
17084 }
17085
17086 return (TRUE);
17087 }
17088
17089 void
17090 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
17091 {
17092 if (*dma_hdp == NULL)
17093 return;
17094
17095 (void) ddi_dma_unbind_handle(*dma_hdp);
17096 (void) ddi_dma_mem_free(acc_hdp);
17097 ddi_dma_free_handle(dma_hdp);
17098 *dma_hdp = NULL;
17099 }