1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 */
56
57 /*
58 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
59 *
60 */
61
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/scsi/generic/sas.h>
80 #include <sys/scsi/impl/scsi_sas.h>
81
82 #pragma pack(1)
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
91 #pragma pack()
92
93 /*
94 * private header files.
95 *
96 */
97 #include <sys/scsi/impl/scsi_reset_notify.h>
98 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
100 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
102 #include <sys/raidioctl.h>
103
104 #include <sys/fs/dv_node.h> /* devfs_clean */
105
106 /*
107 * FMA header files
108 */
109 #include <sys/ddifm.h>
110 #include <sys/fm/protocol.h>
111 #include <sys/fm/util.h>
112 #include <sys/fm/io/ddi.h>
113
114 /*
115 * autoconfiguration data and routines.
116 */
117 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
118 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
119 static int mptsas_power(dev_info_t *dip, int component, int level);
120
121 /*
122 * cb_ops function
123 */
124 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
125 cred_t *credp, int *rval);
126 #ifdef __sparc
127 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
128 #else /* __sparc */
129 static int mptsas_quiesce(dev_info_t *devi);
130 #endif /* __sparc */
131
132 /*
133 * Resource initilaization for hardware
134 */
135 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
136 static void mptsas_disable_bus_master(mptsas_t *mpt);
137 static void mptsas_hba_fini(mptsas_t *mpt);
138 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
139 static int mptsas_hba_setup(mptsas_t *mpt);
140 static void mptsas_hba_teardown(mptsas_t *mpt);
141 static int mptsas_config_space_init(mptsas_t *mpt);
142 static void mptsas_config_space_fini(mptsas_t *mpt);
143 static void mptsas_iport_register(mptsas_t *mpt);
144 static int mptsas_smp_setup(mptsas_t *mpt);
145 static void mptsas_smp_teardown(mptsas_t *mpt);
146 static int mptsas_cache_create(mptsas_t *mpt);
147 static void mptsas_cache_destroy(mptsas_t *mpt);
148 static int mptsas_alloc_request_frames(mptsas_t *mpt);
149 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
150 static int mptsas_alloc_free_queue(mptsas_t *mpt);
151 static int mptsas_alloc_post_queue(mptsas_t *mpt);
152 static void mptsas_alloc_reply_args(mptsas_t *mpt);
153 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
154 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
155 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
156
157 /*
158 * SCSA function prototypes
159 */
160 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
161 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
162 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
163 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
164 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
165 int tgtonly);
166 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
167 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
168 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
169 int tgtlen, int flags, int (*callback)(), caddr_t arg);
170 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
171 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
172 struct scsi_pkt *pkt);
173 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
174 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
175 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
176 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
177 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
178 void (*callback)(caddr_t), caddr_t arg);
179 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
180 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
181 static int mptsas_scsi_quiesce(dev_info_t *dip);
182 static int mptsas_scsi_unquiesce(dev_info_t *dip);
183 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
184 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
185
186 /*
187 * SMP functions
188 */
189 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
190
191 /*
192 * internal function prototypes.
193 */
194 static void mptsas_list_add(mptsas_t *mpt);
195 static void mptsas_list_del(mptsas_t *mpt);
196
197 static int mptsas_quiesce_bus(mptsas_t *mpt);
198 static int mptsas_unquiesce_bus(mptsas_t *mpt);
199
200 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
201 static void mptsas_free_handshake_msg(mptsas_t *mpt);
202
203 static void mptsas_ncmds_checkdrain(void *arg);
204
205 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
206 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
207 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
208 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
209
210 static int mptsas_do_detach(dev_info_t *dev);
211 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
212 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
213 struct scsi_pkt *pkt);
214 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
215
216 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
217 static void mptsas_handle_event(void *args);
218 static int mptsas_handle_event_sync(void *args);
219 static void mptsas_handle_dr(void *args);
220 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
221 dev_info_t *pdip);
222
223 static void mptsas_restart_cmd(void *);
224
225 static void mptsas_flush_hba(mptsas_t *mpt);
226 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
227 uint8_t tasktype);
228 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
229 uchar_t reason, uint_t stat);
230
231 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
232 static void mptsas_process_intr(mptsas_t *mpt,
233 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
234 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
235 pMpi2ReplyDescriptorsUnion_t reply_desc);
236 static void mptsas_handle_address_reply(mptsas_t *mpt,
237 pMpi2ReplyDescriptorsUnion_t reply_desc);
238 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
239 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
240 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
241
242 static void mptsas_watch(void *arg);
243 static void mptsas_watchsubr(mptsas_t *mpt);
244 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
245
246 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
247 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
248 uint8_t *data, uint32_t request_size, uint32_t reply_size,
249 uint32_t data_size, uint8_t direction, uint8_t *dataout,
250 uint32_t dataout_size, short timeout, int mode);
251 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
252
253 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
254 uint32_t unique_id);
255 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
256 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
257 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
258 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
259 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
260 uint32_t diag_type);
261 static int mptsas_diag_register(mptsas_t *mpt,
262 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
263 static int mptsas_diag_unregister(mptsas_t *mpt,
264 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
265 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
266 uint32_t *return_code);
267 static int mptsas_diag_read_buffer(mptsas_t *mpt,
268 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
269 uint32_t *return_code, int ioctl_mode);
270 static int mptsas_diag_release(mptsas_t *mpt,
271 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
272 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
273 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
274 int ioctl_mode);
275 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
276 int mode);
277
278 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
279 int cmdlen, int tgtlen, int statuslen, int kf);
280 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
281
282 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
283 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
284
285 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
286 int kmflags);
287 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
288
289 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
290 mptsas_cmd_t *cmd);
291 static void mptsas_check_task_mgt(mptsas_t *mpt,
292 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
293 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
294 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
295 int *resid);
296
297 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
298 static void mptsas_free_active_slots(mptsas_t *mpt);
299 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
300
301 static void mptsas_restart_hba(mptsas_t *mpt);
302 static void mptsas_restart_waitq(mptsas_t *mpt);
303
304 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
305 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
306 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
307
308 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
309 static void mptsas_doneq_empty(mptsas_t *mpt);
310 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
311
312 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
313 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
314 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
315 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
316
317
318 static void mptsas_start_watch_reset_delay();
319 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
320 static void mptsas_watch_reset_delay(void *arg);
321 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
322
323 /*
324 * helper functions
325 */
326 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
327
328 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
329 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
330 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
331 int lun);
332 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
333 int lun);
334 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
335 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
336
337 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
338 int *lun);
339 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
340
341 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
342 mptsas_phymask_t phymask, uint8_t phy);
343 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
344 mptsas_phymask_t phymask, uint64_t wwid);
345 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
346 mptsas_phymask_t phymask, uint64_t wwid);
347
348 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
349 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
350
351 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
352 uint16_t *handle, mptsas_target_t **pptgt);
353 static void mptsas_update_phymask(mptsas_t *mpt);
354
355 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
356 uint32_t *status, uint8_t cmd);
357 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
358 mptsas_phymask_t *phymask);
359 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
360 mptsas_phymask_t phymask);
361 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
362
363
364 /*
365 * Enumeration / DR functions
366 */
367 static void mptsas_config_all(dev_info_t *pdip);
368 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
369 dev_info_t **lundip);
370 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
371 dev_info_t **lundip);
372
373 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
374 static int mptsas_offline_target(dev_info_t *pdip, char *name);
375
376 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
377 dev_info_t **dip);
378
379 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
380 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
381 dev_info_t **dip, mptsas_target_t *ptgt);
382
383 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
384 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
385
386 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
387 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
388 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
389 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
390 int lun);
391
392 static void mptsas_offline_missed_luns(dev_info_t *pdip,
393 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
394 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
395 mdi_pathinfo_t *rpip, uint_t flags);
396
397 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
398 dev_info_t **smp_dip);
399 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
400 uint_t flags);
401
402 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
403 int mode, int *rval);
404 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
405 int mode, int *rval);
406 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
407 int mode, int *rval);
408 static void mptsas_record_event(void *args);
409 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
410 int mode);
411
412 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
413 uint32_t, mptsas_phymask_t, uint8_t);
414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
415 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
416 dev_info_t **smp_dip);
417
418 /*
419 * Power management functions
420 */
421 static int mptsas_get_pci_cap(mptsas_t *mpt);
422 static int mptsas_init_pm(mptsas_t *mpt);
423
424 /*
425 * MPT MSI tunable:
426 *
427 * By default MSI is enabled on all supported platforms.
428 */
429 boolean_t mptsas_enable_msi = B_TRUE;
430 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
431
432 static int mptsas_register_intrs(mptsas_t *);
433 static void mptsas_unregister_intrs(mptsas_t *);
434 static int mptsas_add_intrs(mptsas_t *, int);
435 static void mptsas_rem_intrs(mptsas_t *);
436
437 /*
438 * FMA Prototypes
439 */
440 static void mptsas_fm_init(mptsas_t *mpt);
441 static void mptsas_fm_fini(mptsas_t *mpt);
442 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
443
444 extern pri_t minclsyspri, maxclsyspri;
445
446 /*
447 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
448 * under this device that the paths to a physical device are created when
449 * MPxIO is used.
450 */
451 extern dev_info_t *scsi_vhci_dip;
452
453 /*
454 * Tunable timeout value for Inquiry VPD page 0x83
455 * By default the value is 30 seconds.
456 */
457 int mptsas_inq83_retry_timeout = 30;
458
459 /*
460 * This is used to allocate memory for message frame storage, not for
461 * data I/O DMA. All message frames must be stored in the first 4G of
462 * physical memory.
463 */
464 ddi_dma_attr_t mptsas_dma_attrs = {
465 DMA_ATTR_V0, /* attribute layout version */
466 0x0ull, /* address low - should be 0 (longlong) */
467 0xffffffffull, /* address high - 32-bit max range */
468 0x00ffffffull, /* count max - max DMA object size */
469 4, /* allocation alignment requirements */
470 0x78, /* burstsizes - binary encoded values */
471 1, /* minxfer - gran. of DMA engine */
472 0x00ffffffull, /* maxxfer - gran. of DMA engine */
473 0xffffffffull, /* max segment size (DMA boundary) */
474 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
475 512, /* granularity - device transfer size */
476 0 /* flags, set to 0 */
477 };
478
479 /*
480 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
481 * physical addresses are supported.)
482 */
483 ddi_dma_attr_t mptsas_dma_attrs64 = {
484 DMA_ATTR_V0, /* attribute layout version */
485 0x0ull, /* address low - should be 0 (longlong) */
486 0xffffffffffffffffull, /* address high - 64-bit max */
487 0x00ffffffull, /* count max - max DMA object size */
488 4, /* allocation alignment requirements */
489 0x78, /* burstsizes - binary encoded values */
490 1, /* minxfer - gran. of DMA engine */
491 0x00ffffffull, /* maxxfer - gran. of DMA engine */
492 0xffffffffull, /* max segment size (DMA boundary) */
493 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
494 512, /* granularity - device transfer size */
495 0 /* flags, set to 0 */
496 };
497
498 ddi_device_acc_attr_t mptsas_dev_attr = {
499 DDI_DEVICE_ATTR_V1,
500 DDI_STRUCTURE_LE_ACC,
501 DDI_STRICTORDER_ACC,
502 DDI_DEFAULT_ACC
503 };
504
505 static struct cb_ops mptsas_cb_ops = {
506 scsi_hba_open, /* open */
507 scsi_hba_close, /* close */
508 nodev, /* strategy */
509 nodev, /* print */
510 nodev, /* dump */
511 nodev, /* read */
512 nodev, /* write */
513 mptsas_ioctl, /* ioctl */
514 nodev, /* devmap */
515 nodev, /* mmap */
516 nodev, /* segmap */
517 nochpoll, /* chpoll */
518 ddi_prop_op, /* cb_prop_op */
519 NULL, /* streamtab */
520 D_MP, /* cb_flag */
521 CB_REV, /* rev */
522 nodev, /* aread */
523 nodev /* awrite */
524 };
525
526 static struct dev_ops mptsas_ops = {
527 DEVO_REV, /* devo_rev, */
528 0, /* refcnt */
529 ddi_no_info, /* info */
530 nulldev, /* identify */
531 nulldev, /* probe */
532 mptsas_attach, /* attach */
533 mptsas_detach, /* detach */
534 #ifdef __sparc
535 mptsas_reset,
536 #else
537 nodev, /* reset */
538 #endif /* __sparc */
539 &mptsas_cb_ops, /* driver operations */
540 NULL, /* bus operations */
541 mptsas_power, /* power management */
542 #ifdef __sparc
543 ddi_quiesce_not_needed
544 #else
545 mptsas_quiesce /* quiesce */
546 #endif /* __sparc */
547 };
548
549
550 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
551
552 static struct modldrv modldrv = {
553 &mod_driverops, /* Type of module. This one is a driver */
554 MPTSAS_MOD_STRING, /* Name of the module. */
555 &mptsas_ops, /* driver ops */
556 };
557
558 static struct modlinkage modlinkage = {
559 MODREV_1, &modldrv, NULL
560 };
561 #define TARGET_PROP "target"
562 #define LUN_PROP "lun"
563 #define LUN64_PROP "lun64"
564 #define SAS_PROP "sas-mpt"
565 #define MDI_GUID "wwn"
566 #define NDI_GUID "guid"
567 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
568
569 /*
570 * Local static data
571 */
572 #if defined(MPTSAS_DEBUG)
573 uint32_t mptsas_debug_flags = 0x0;
574 #endif /* defined(MPTSAS_DEBUG) */
575 uint32_t mptsas_debug_resets = 0;
576
577 static kmutex_t mptsas_global_mutex;
578 static void *mptsas_state; /* soft state ptr */
579 static krwlock_t mptsas_global_rwlock;
580
581 static kmutex_t mptsas_log_mutex;
582 static char mptsas_log_buf[256];
583 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
584
585 static mptsas_t *mptsas_head, *mptsas_tail;
586 static clock_t mptsas_scsi_watchdog_tick;
587 static clock_t mptsas_tick;
588 static timeout_id_t mptsas_reset_watch;
589 static timeout_id_t mptsas_timeout_id;
590 static int mptsas_timeouts_enabled = 0;
591
592 /*
593 * The only software retriction on switching msg buffers to 64 bit seems to
594 * be the Auto Request Sense interface. The high 32 bits for all such
595 * requests appear to be required to sit in the same 4G segment.
596 * See initialization of SenseBufferAddressHigh in mptsas_init.c, and
597 * the use of SenseBufferLowAddress in requests. Note that there is
598 * currently a dependency on scsi_alloc_consistent_buf() adhering to
599 * this requirement.
600 * There is also a question about improved performance over PCI/PCIX
601 * if transfers are within the first 4Gb.
602 */
603 static int mptsas_use_64bit_msgaddr = 0;
604
605 /*
606 * warlock directives
607 */
608 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
609 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
610 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
611 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
612 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
613 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
614
615 /*
616 * SM - HBA statics
617 */
618 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
619
620 #ifdef MPTSAS_DEBUG
621 void debug_enter(char *);
622 #endif
623
624 /*
625 * Notes:
626 * - scsi_hba_init(9F) initializes SCSI HBA modules
627 * - must call scsi_hba_fini(9F) if modload() fails
628 */
629 int
630 _init(void)
631 {
632 int status;
633 /* CONSTCOND */
634 ASSERT(NO_COMPETING_THREADS);
635
636 NDBG0(("_init"));
637
638 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
639 MPTSAS_INITIAL_SOFT_SPACE);
640 if (status != 0) {
641 return (status);
642 }
643
644 if ((status = scsi_hba_init(&modlinkage)) != 0) {
645 ddi_soft_state_fini(&mptsas_state);
646 return (status);
647 }
648
649 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
650 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
651 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
652
653 if ((status = mod_install(&modlinkage)) != 0) {
654 mutex_destroy(&mptsas_log_mutex);
655 rw_destroy(&mptsas_global_rwlock);
656 mutex_destroy(&mptsas_global_mutex);
657 ddi_soft_state_fini(&mptsas_state);
658 scsi_hba_fini(&modlinkage);
659 }
660
661 return (status);
662 }
663
664 /*
665 * Notes:
666 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
667 */
668 int
669 _fini(void)
670 {
671 int status;
672 /* CONSTCOND */
673 ASSERT(NO_COMPETING_THREADS);
674
675 NDBG0(("_fini"));
676
677 if ((status = mod_remove(&modlinkage)) == 0) {
678 ddi_soft_state_fini(&mptsas_state);
679 scsi_hba_fini(&modlinkage);
680 mutex_destroy(&mptsas_global_mutex);
681 rw_destroy(&mptsas_global_rwlock);
682 mutex_destroy(&mptsas_log_mutex);
683 }
684 return (status);
685 }
686
687 /*
688 * The loadable-module _info(9E) entry point
689 */
690 int
691 _info(struct modinfo *modinfop)
692 {
693 /* CONSTCOND */
694 ASSERT(NO_COMPETING_THREADS);
695 NDBG0(("mptsas _info"));
696
697 return (mod_info(&modlinkage, modinfop));
698 }
699
700 static int
701 mptsas_target_eval_devhdl(const void *op, void *arg)
702 {
703 uint16_t dh = *(uint16_t *)arg;
704 const mptsas_target_t *tp = op;
705
706 return ((int)tp->m_devhdl - (int)dh);
707 }
708
709 static int
710 mptsas_target_eval_slot(const void *op, void *arg)
711 {
712 mptsas_led_control_t *lcp = arg;
713 const mptsas_target_t *tp = op;
714
715 if (tp->m_enclosure != lcp->Enclosure)
716 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
717
718 return ((int)tp->m_slot_num - (int)lcp->Slot);
719 }
720
721 static int
722 mptsas_target_eval_nowwn(const void *op, void *arg)
723 {
724 uint8_t phy = *(uint8_t *)arg;
725 const mptsas_target_t *tp = op;
726
727 if (tp->m_addr.mta_wwn != 0)
728 return (-1);
729
730 return ((int)tp->m_phynum - (int)phy);
731 }
732
733 static int
734 mptsas_smp_eval_devhdl(const void *op, void *arg)
735 {
736 uint16_t dh = *(uint16_t *)arg;
737 const mptsas_smp_t *sp = op;
738
739 return ((int)sp->m_devhdl - (int)dh);
740 }
741
742 static uint64_t
743 mptsas_target_addr_hash(const void *tp)
744 {
745 const mptsas_target_addr_t *tap = tp;
746
747 return ((tap->mta_wwn & 0xffffffffffffULL) |
748 ((uint64_t)tap->mta_phymask << 48));
749 }
750
751 static int
752 mptsas_target_addr_cmp(const void *a, const void *b)
753 {
754 const mptsas_target_addr_t *aap = a;
755 const mptsas_target_addr_t *bap = b;
756
757 if (aap->mta_wwn < bap->mta_wwn)
758 return (-1);
759 if (aap->mta_wwn > bap->mta_wwn)
760 return (1);
761 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
762 }
763
764 static void
765 mptsas_target_free(void *op)
766 {
767 kmem_free(op, sizeof (mptsas_target_t));
768 }
769
770 static void
771 mptsas_smp_free(void *op)
772 {
773 kmem_free(op, sizeof (mptsas_smp_t));
774 }
775
776 static void
777 mptsas_destroy_hashes(mptsas_t *mpt)
778 {
779 mptsas_target_t *tp;
780 mptsas_smp_t *sp;
781
782 for (tp = refhash_first(mpt->m_targets); tp != NULL;
783 tp = refhash_next(mpt->m_targets, tp)) {
784 refhash_remove(mpt->m_targets, tp);
785 }
786 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
787 sp = refhash_next(mpt->m_smp_targets, sp)) {
788 refhash_remove(mpt->m_smp_targets, sp);
789 }
790 refhash_destroy(mpt->m_targets);
791 refhash_destroy(mpt->m_smp_targets);
792 mpt->m_targets = NULL;
793 mpt->m_smp_targets = NULL;
794 }
795
796 static int
797 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
798 {
799 dev_info_t *pdip;
800 mptsas_t *mpt;
801 scsi_hba_tran_t *hba_tran;
802 char *iport = NULL;
803 char phymask[MPTSAS_MAX_PHYS];
804 mptsas_phymask_t phy_mask = 0;
805 int dynamic_port = 0;
806 uint32_t page_address;
807 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
808 int rval = DDI_FAILURE;
809 int i = 0;
810 uint8_t numphys = 0;
811 uint8_t phy_id;
812 uint8_t phy_port = 0;
813 uint16_t attached_devhdl = 0;
814 uint32_t dev_info;
815 uint64_t attached_sas_wwn;
816 uint16_t dev_hdl;
817 uint16_t pdev_hdl;
818 uint16_t bay_num, enclosure;
819 char attached_wwnstr[MPTSAS_WWN_STRLEN];
820
821 /* CONSTCOND */
822 ASSERT(NO_COMPETING_THREADS);
823
824 switch (cmd) {
825 case DDI_ATTACH:
826 break;
827
828 case DDI_RESUME:
829 /*
830 * If this a scsi-iport node, nothing to do here.
831 */
832 return (DDI_SUCCESS);
833
834 default:
835 return (DDI_FAILURE);
836 }
837
838 pdip = ddi_get_parent(dip);
839
840 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
841 NULL) {
842 cmn_err(CE_WARN, "Failed attach iport because fail to "
843 "get tran vector for the HBA node");
844 return (DDI_FAILURE);
845 }
846
847 mpt = TRAN2MPT(hba_tran);
848 ASSERT(mpt != NULL);
849 if (mpt == NULL)
850 return (DDI_FAILURE);
851
852 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
853 NULL) {
854 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
855 "get tran vector for the iport node");
856 return (DDI_FAILURE);
857 }
858
859 /*
860 * Overwrite parent's tran_hba_private to iport's tran vector
861 */
862 hba_tran->tran_hba_private = mpt;
863
864 ddi_report_dev(dip);
865
866 /*
867 * Get SAS address for initiator port according dev_handle
868 */
869 iport = ddi_get_name_addr(dip);
870 if (iport && strncmp(iport, "v0", 2) == 0) {
871 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
872 MPTSAS_VIRTUAL_PORT, 1) !=
873 DDI_PROP_SUCCESS) {
874 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
875 MPTSAS_VIRTUAL_PORT);
876 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
877 "prop update failed");
878 return (DDI_FAILURE);
879 }
880 return (DDI_SUCCESS);
881 }
882
883 mutex_enter(&mpt->m_mutex);
884 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
885 bzero(phymask, sizeof (phymask));
886 (void) sprintf(phymask,
887 "%x", mpt->m_phy_info[i].phy_mask);
888 if (strcmp(phymask, iport) == 0) {
889 break;
890 }
891 }
892
893 if (i == MPTSAS_MAX_PHYS) {
894 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
895 "seems not exist", iport);
896 mutex_exit(&mpt->m_mutex);
897 return (DDI_FAILURE);
898 }
899
900 phy_mask = mpt->m_phy_info[i].phy_mask;
901
902 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
903 dynamic_port = 1;
904 else
905 dynamic_port = 0;
906
907 /*
908 * Update PHY info for smhba
909 */
910 if (mptsas_smhba_phy_init(mpt)) {
911 mutex_exit(&mpt->m_mutex);
912 mptsas_log(mpt, CE_WARN, "mptsas phy update "
913 "failed");
914 return (DDI_FAILURE);
915 }
916
917 mutex_exit(&mpt->m_mutex);
918
919 numphys = 0;
920 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
921 if ((phy_mask >> i) & 0x01) {
922 numphys++;
923 }
924 }
925
926 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
927 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
928 mpt->un.m_base_wwid);
929
930 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
931 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
932 DDI_PROP_SUCCESS) {
933 (void) ddi_prop_remove(DDI_DEV_T_NONE,
934 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
935 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
936 "prop update failed");
937 return (DDI_FAILURE);
938 }
939 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
940 MPTSAS_NUM_PHYS, numphys) !=
941 DDI_PROP_SUCCESS) {
942 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
943 return (DDI_FAILURE);
944 }
945
946 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
947 "phymask", phy_mask) !=
948 DDI_PROP_SUCCESS) {
949 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
950 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
951 "prop update failed");
952 return (DDI_FAILURE);
953 }
954
955 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
956 "dynamic-port", dynamic_port) !=
957 DDI_PROP_SUCCESS) {
958 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
959 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
960 "prop update failed");
961 return (DDI_FAILURE);
962 }
963 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
964 MPTSAS_VIRTUAL_PORT, 0) !=
965 DDI_PROP_SUCCESS) {
966 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
967 MPTSAS_VIRTUAL_PORT);
968 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
969 "prop update failed");
970 return (DDI_FAILURE);
971 }
972 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
973 &attached_devhdl);
974
975 mutex_enter(&mpt->m_mutex);
976 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
977 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
978 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
979 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
980 &pdev_hdl, &bay_num, &enclosure);
981 if (rval != DDI_SUCCESS) {
982 mptsas_log(mpt, CE_WARN,
983 "Failed to get device page0 for handle:%d",
984 attached_devhdl);
985 mutex_exit(&mpt->m_mutex);
986 return (DDI_FAILURE);
987 }
988
989 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
990 bzero(phymask, sizeof (phymask));
991 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
992 if (strcmp(phymask, iport) == 0) {
993 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
994 "%x",
995 mpt->m_phy_info[i].phy_mask);
996 }
997 }
998 mutex_exit(&mpt->m_mutex);
999
1000 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1001 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1002 attached_sas_wwn);
1003 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1004 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1005 DDI_PROP_SUCCESS) {
1006 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1007 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1008 return (DDI_FAILURE);
1009 }
1010
1011 /* Create kstats for each phy on this iport */
1012
1013 mptsas_create_phy_stats(mpt, iport, dip);
1014
1015 /*
1016 * register sas hba iport with mdi (MPxIO/vhci)
1017 */
1018 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1019 dip, 0) == MDI_SUCCESS) {
1020 mpt->m_mpxio_enable = TRUE;
1021 }
1022 return (DDI_SUCCESS);
1023 }
1024
1025 /*
1026 * Notes:
1027 * Set up all device state and allocate data structures,
1028 * mutexes, condition variables, etc. for device operation.
1029 * Add interrupts needed.
1030 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1031 */
1032 static int
1033 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1034 {
1035 mptsas_t *mpt = NULL;
1036 int instance, i, j;
1037 int doneq_thread_num;
1038 char intr_added = 0;
1039 char map_setup = 0;
1040 char config_setup = 0;
1041 char hba_attach_setup = 0;
1042 char smp_attach_setup = 0;
1043 char mutex_init_done = 0;
1044 char event_taskq_create = 0;
1045 char dr_taskq_create = 0;
1046 char doneq_thread_create = 0;
1047 scsi_hba_tran_t *hba_tran;
1048 uint_t mem_bar = MEM_SPACE;
1049 int rval = DDI_FAILURE;
1050
1051 /* CONSTCOND */
1052 ASSERT(NO_COMPETING_THREADS);
1053
1054 if (scsi_hba_iport_unit_address(dip)) {
1055 return (mptsas_iport_attach(dip, cmd));
1056 }
1057
1058 switch (cmd) {
1059 case DDI_ATTACH:
1060 break;
1061
1062 case DDI_RESUME:
1063 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1064 return (DDI_FAILURE);
1065
1066 mpt = TRAN2MPT(hba_tran);
1067
1068 if (!mpt) {
1069 return (DDI_FAILURE);
1070 }
1071
1072 /*
1073 * Reset hardware and softc to "no outstanding commands"
1074 * Note that a check condition can result on first command
1075 * to a target.
1076 */
1077 mutex_enter(&mpt->m_mutex);
1078
1079 /*
1080 * raise power.
1081 */
1082 if (mpt->m_options & MPTSAS_OPT_PM) {
1083 mutex_exit(&mpt->m_mutex);
1084 (void) pm_busy_component(dip, 0);
1085 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1086 if (rval == DDI_SUCCESS) {
1087 mutex_enter(&mpt->m_mutex);
1088 } else {
1089 /*
1090 * The pm_raise_power() call above failed,
1091 * and that can only occur if we were unable
1092 * to reset the hardware. This is probably
1093 * due to unhealty hardware, and because
1094 * important filesystems(such as the root
1095 * filesystem) could be on the attached disks,
1096 * it would not be a good idea to continue,
1097 * as we won't be entirely certain we are
1098 * writing correct data. So we panic() here
1099 * to not only prevent possible data corruption,
1100 * but to give developers or end users a hope
1101 * of identifying and correcting any problems.
1102 */
1103 fm_panic("mptsas could not reset hardware "
1104 "during resume");
1105 }
1106 }
1107
1108 mpt->m_suspended = 0;
1109
1110 /*
1111 * Reinitialize ioc
1112 */
1113 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1114 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1115 mutex_exit(&mpt->m_mutex);
1116 if (mpt->m_options & MPTSAS_OPT_PM) {
1117 (void) pm_idle_component(dip, 0);
1118 }
1119 fm_panic("mptsas init chip fail during resume");
1120 }
1121 /*
1122 * mptsas_update_driver_data needs interrupts so enable them
1123 * first.
1124 */
1125 MPTSAS_ENABLE_INTR(mpt);
1126 mptsas_update_driver_data(mpt);
1127
1128 /* start requests, if possible */
1129 mptsas_restart_hba(mpt);
1130
1131 mutex_exit(&mpt->m_mutex);
1132
1133 /*
1134 * Restart watch thread
1135 */
1136 mutex_enter(&mptsas_global_mutex);
1137 if (mptsas_timeout_id == 0) {
1138 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1139 mptsas_tick);
1140 mptsas_timeouts_enabled = 1;
1141 }
1142 mutex_exit(&mptsas_global_mutex);
1143
1144 /* report idle status to pm framework */
1145 if (mpt->m_options & MPTSAS_OPT_PM) {
1146 (void) pm_idle_component(dip, 0);
1147 }
1148
1149 return (DDI_SUCCESS);
1150
1151 default:
1152 return (DDI_FAILURE);
1153
1154 }
1155
1156 instance = ddi_get_instance(dip);
1157
1158 /*
1159 * Allocate softc information.
1160 */
1161 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1162 mptsas_log(NULL, CE_WARN,
1163 "mptsas%d: cannot allocate soft state", instance);
1164 goto fail;
1165 }
1166
1167 mpt = ddi_get_soft_state(mptsas_state, instance);
1168
1169 if (mpt == NULL) {
1170 mptsas_log(NULL, CE_WARN,
1171 "mptsas%d: cannot get soft state", instance);
1172 goto fail;
1173 }
1174
1175 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1176 scsi_size_clean(dip);
1177
1178 mpt->m_dip = dip;
1179 mpt->m_instance = instance;
1180
1181 /* Make a per-instance copy of the structures */
1182 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1183 if (mptsas_use_64bit_msgaddr) {
1184 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1185 } else {
1186 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1187 }
1188 mpt->m_reg_acc_attr = mptsas_dev_attr;
1189 mpt->m_dev_acc_attr = mptsas_dev_attr;
1190
1191 /*
1192 * Initialize FMA
1193 */
1194 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1195 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1196 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1197 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1198
1199 mptsas_fm_init(mpt);
1200
1201 if (mptsas_alloc_handshake_msg(mpt,
1202 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1203 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1204 goto fail;
1205 }
1206
1207 /*
1208 * Setup configuration space
1209 */
1210 if (mptsas_config_space_init(mpt) == FALSE) {
1211 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1212 goto fail;
1213 }
1214 config_setup++;
1215
1216 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1217 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1218 mptsas_log(mpt, CE_WARN, "map setup failed");
1219 goto fail;
1220 }
1221 map_setup++;
1222
1223 /*
1224 * A taskq is created for dealing with the event handler
1225 */
1226 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1227 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1228 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1229 goto fail;
1230 }
1231 event_taskq_create++;
1232
1233 /*
1234 * A taskq is created for dealing with dr events
1235 */
1236 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1237 "mptsas_dr_taskq",
1238 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1239 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1240 "failed");
1241 goto fail;
1242 }
1243 dr_taskq_create++;
1244
1245 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1246 0, "mptsas_doneq_thread_threshold_prop", 10);
1247 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1248 0, "mptsas_doneq_length_threshold_prop", 8);
1249 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1250 0, "mptsas_doneq_thread_n_prop", 8);
1251
1252 if (mpt->m_doneq_thread_n) {
1253 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1254 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1255
1256 mutex_enter(&mpt->m_doneq_mutex);
1257 mpt->m_doneq_thread_id =
1258 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1259 * mpt->m_doneq_thread_n, KM_SLEEP);
1260
1261 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1262 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1263 CV_DRIVER, NULL);
1264 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1265 MUTEX_DRIVER, NULL);
1266 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1267 mpt->m_doneq_thread_id[j].flag |=
1268 MPTSAS_DONEQ_THREAD_ACTIVE;
1269 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1270 mpt->m_doneq_thread_id[j].arg.t = j;
1271 mpt->m_doneq_thread_id[j].threadp =
1272 thread_create(NULL, 0, mptsas_doneq_thread,
1273 &mpt->m_doneq_thread_id[j].arg,
1274 0, &p0, TS_RUN, minclsyspri);
1275 mpt->m_doneq_thread_id[j].donetail =
1276 &mpt->m_doneq_thread_id[j].doneq;
1277 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1278 }
1279 mutex_exit(&mpt->m_doneq_mutex);
1280 doneq_thread_create++;
1281 }
1282
1283 /*
1284 * Disable hardware interrupt since we're not ready to
1285 * handle it yet.
1286 */
1287 MPTSAS_DISABLE_INTR(mpt);
1288 if (mptsas_register_intrs(mpt) == FALSE)
1289 goto fail;
1290 intr_added++;
1291
1292 /* Initialize mutex used in interrupt handler */
1293 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1294 DDI_INTR_PRI(mpt->m_intr_pri));
1295 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1296 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1297 DDI_INTR_PRI(mpt->m_intr_pri));
1298 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1299 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1300 NULL, MUTEX_DRIVER,
1301 DDI_INTR_PRI(mpt->m_intr_pri));
1302 }
1303
1304 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1305 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1306 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1307 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1308 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1309 mutex_init_done++;
1310
1311 mutex_enter(&mpt->m_mutex);
1312 /*
1313 * Initialize power management component
1314 */
1315 if (mpt->m_options & MPTSAS_OPT_PM) {
1316 if (mptsas_init_pm(mpt)) {
1317 mutex_exit(&mpt->m_mutex);
1318 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1319 "failed");
1320 goto fail;
1321 }
1322 }
1323
1324 /*
1325 * Initialize chip using Message Unit Reset, if allowed
1326 */
1327 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1328 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1329 mutex_exit(&mpt->m_mutex);
1330 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1331 goto fail;
1332 }
1333
1334 /*
1335 * Fill in the phy_info structure and get the base WWID
1336 */
1337 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1338 mptsas_log(mpt, CE_WARN,
1339 "mptsas_get_manufacture_page5 failed!");
1340 goto fail;
1341 }
1342
1343 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1344 mptsas_log(mpt, CE_WARN,
1345 "mptsas_get_sas_io_unit_page_hndshk failed!");
1346 goto fail;
1347 }
1348
1349 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1350 mptsas_log(mpt, CE_WARN,
1351 "mptsas_get_manufacture_page0 failed!");
1352 goto fail;
1353 }
1354
1355 mutex_exit(&mpt->m_mutex);
1356
1357 /*
1358 * Register the iport for multiple port HBA
1359 */
1360 mptsas_iport_register(mpt);
1361
1362 /*
1363 * initialize SCSI HBA transport structure
1364 */
1365 if (mptsas_hba_setup(mpt) == FALSE)
1366 goto fail;
1367 hba_attach_setup++;
1368
1369 if (mptsas_smp_setup(mpt) == FALSE)
1370 goto fail;
1371 smp_attach_setup++;
1372
1373 if (mptsas_cache_create(mpt) == FALSE)
1374 goto fail;
1375
1376 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1377 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1378 if (mpt->m_scsi_reset_delay == 0) {
1379 mptsas_log(mpt, CE_NOTE,
1380 "scsi_reset_delay of 0 is not recommended,"
1381 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1382 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1383 }
1384
1385 /*
1386 * Initialize the wait and done FIFO queue
1387 */
1388 mpt->m_donetail = &mpt->m_doneq;
1389 mpt->m_waitqtail = &mpt->m_waitq;
1390 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1391 mpt->m_tx_draining = 0;
1392
1393 /*
1394 * ioc cmd queue initialize
1395 */
1396 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1397 mpt->m_dev_handle = 0xFFFF;
1398
1399 MPTSAS_ENABLE_INTR(mpt);
1400
1401 /*
1402 * enable event notification
1403 */
1404 mutex_enter(&mpt->m_mutex);
1405 if (mptsas_ioc_enable_event_notification(mpt)) {
1406 mutex_exit(&mpt->m_mutex);
1407 goto fail;
1408 }
1409 mutex_exit(&mpt->m_mutex);
1410
1411 /*
1412 * Initialize PHY info for smhba
1413 */
1414 if (mptsas_smhba_setup(mpt)) {
1415 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1416 "failed");
1417 goto fail;
1418 }
1419
1420 /* Check all dma handles allocated in attach */
1421 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1422 != DDI_SUCCESS) ||
1423 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1424 != DDI_SUCCESS) ||
1425 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1426 != DDI_SUCCESS) ||
1427 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1428 != DDI_SUCCESS) ||
1429 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1430 != DDI_SUCCESS)) {
1431 goto fail;
1432 }
1433
1434 /* Check all acc handles allocated in attach */
1435 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1436 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1437 != DDI_SUCCESS) ||
1438 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1439 != DDI_SUCCESS) ||
1440 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1441 != DDI_SUCCESS) ||
1442 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1443 != DDI_SUCCESS) ||
1444 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1445 != DDI_SUCCESS) ||
1446 (mptsas_check_acc_handle(mpt->m_config_handle)
1447 != DDI_SUCCESS)) {
1448 goto fail;
1449 }
1450
1451 /*
1452 * After this point, we are not going to fail the attach.
1453 */
1454 /*
1455 * used for mptsas_watch
1456 */
1457 mptsas_list_add(mpt);
1458
1459 mutex_enter(&mptsas_global_mutex);
1460 if (mptsas_timeouts_enabled == 0) {
1461 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1462 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1463
1464 mptsas_tick = mptsas_scsi_watchdog_tick *
1465 drv_usectohz((clock_t)1000000);
1466
1467 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1468 mptsas_timeouts_enabled = 1;
1469 }
1470 mutex_exit(&mptsas_global_mutex);
1471
1472 /* Print message of HBA present */
1473 ddi_report_dev(dip);
1474
1475 /* report idle status to pm framework */
1476 if (mpt->m_options & MPTSAS_OPT_PM) {
1477 (void) pm_idle_component(dip, 0);
1478 }
1479
1480 return (DDI_SUCCESS);
1481
1482 fail:
1483 mptsas_log(mpt, CE_WARN, "attach failed");
1484 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1485 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1486 if (mpt) {
1487 mutex_enter(&mptsas_global_mutex);
1488
1489 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1490 timeout_id_t tid = mptsas_timeout_id;
1491 mptsas_timeouts_enabled = 0;
1492 mptsas_timeout_id = 0;
1493 mutex_exit(&mptsas_global_mutex);
1494 (void) untimeout(tid);
1495 mutex_enter(&mptsas_global_mutex);
1496 }
1497 mutex_exit(&mptsas_global_mutex);
1498 /* deallocate in reverse order */
1499 mptsas_cache_destroy(mpt);
1500
1501 if (smp_attach_setup) {
1502 mptsas_smp_teardown(mpt);
1503 }
1504 if (hba_attach_setup) {
1505 mptsas_hba_teardown(mpt);
1506 }
1507
1508 if (mpt->m_targets)
1509 refhash_destroy(mpt->m_targets);
1510 if (mpt->m_smp_targets)
1511 refhash_destroy(mpt->m_smp_targets);
1512
1513 if (mpt->m_active) {
1514 mptsas_free_active_slots(mpt);
1515 }
1516 if (intr_added) {
1517 mptsas_unregister_intrs(mpt);
1518 }
1519
1520 if (doneq_thread_create) {
1521 mutex_enter(&mpt->m_doneq_mutex);
1522 doneq_thread_num = mpt->m_doneq_thread_n;
1523 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1524 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1525 mpt->m_doneq_thread_id[j].flag &=
1526 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1527 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1528 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1529 }
1530 while (mpt->m_doneq_thread_n) {
1531 cv_wait(&mpt->m_doneq_thread_cv,
1532 &mpt->m_doneq_mutex);
1533 }
1534 for (j = 0; j < doneq_thread_num; j++) {
1535 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1536 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1537 }
1538 kmem_free(mpt->m_doneq_thread_id,
1539 sizeof (mptsas_doneq_thread_list_t)
1540 * doneq_thread_num);
1541 mutex_exit(&mpt->m_doneq_mutex);
1542 cv_destroy(&mpt->m_doneq_thread_cv);
1543 mutex_destroy(&mpt->m_doneq_mutex);
1544 }
1545 if (event_taskq_create) {
1546 ddi_taskq_destroy(mpt->m_event_taskq);
1547 }
1548 if (dr_taskq_create) {
1549 ddi_taskq_destroy(mpt->m_dr_taskq);
1550 }
1551 if (mutex_init_done) {
1552 mutex_destroy(&mpt->m_tx_waitq_mutex);
1553 mutex_destroy(&mpt->m_passthru_mutex);
1554 mutex_destroy(&mpt->m_mutex);
1555 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1556 mutex_destroy(
1557 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1558 }
1559 cv_destroy(&mpt->m_cv);
1560 cv_destroy(&mpt->m_passthru_cv);
1561 cv_destroy(&mpt->m_fw_cv);
1562 cv_destroy(&mpt->m_config_cv);
1563 cv_destroy(&mpt->m_fw_diag_cv);
1564 }
1565
1566 if (map_setup) {
1567 mptsas_cfg_fini(mpt);
1568 }
1569 if (config_setup) {
1570 mptsas_config_space_fini(mpt);
1571 }
1572 mptsas_free_handshake_msg(mpt);
1573 mptsas_hba_fini(mpt);
1574
1575 mptsas_fm_fini(mpt);
1576 ddi_soft_state_free(mptsas_state, instance);
1577 ddi_prop_remove_all(dip);
1578 }
1579 return (DDI_FAILURE);
1580 }
1581
1582 static int
1583 mptsas_suspend(dev_info_t *devi)
1584 {
1585 mptsas_t *mpt, *g;
1586 scsi_hba_tran_t *tran;
1587
1588 if (scsi_hba_iport_unit_address(devi)) {
1589 return (DDI_SUCCESS);
1590 }
1591
1592 if ((tran = ddi_get_driver_private(devi)) == NULL)
1593 return (DDI_SUCCESS);
1594
1595 mpt = TRAN2MPT(tran);
1596 if (!mpt) {
1597 return (DDI_SUCCESS);
1598 }
1599
1600 mutex_enter(&mpt->m_mutex);
1601
1602 if (mpt->m_suspended++) {
1603 mutex_exit(&mpt->m_mutex);
1604 return (DDI_SUCCESS);
1605 }
1606
1607 /*
1608 * Cancel timeout threads for this mpt
1609 */
1610 if (mpt->m_quiesce_timeid) {
1611 timeout_id_t tid = mpt->m_quiesce_timeid;
1612 mpt->m_quiesce_timeid = 0;
1613 mutex_exit(&mpt->m_mutex);
1614 (void) untimeout(tid);
1615 mutex_enter(&mpt->m_mutex);
1616 }
1617
1618 if (mpt->m_restart_cmd_timeid) {
1619 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1620 mpt->m_restart_cmd_timeid = 0;
1621 mutex_exit(&mpt->m_mutex);
1622 (void) untimeout(tid);
1623 mutex_enter(&mpt->m_mutex);
1624 }
1625
1626 mutex_exit(&mpt->m_mutex);
1627
1628 (void) pm_idle_component(mpt->m_dip, 0);
1629
1630 /*
1631 * Cancel watch threads if all mpts suspended
1632 */
1633 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1634 for (g = mptsas_head; g != NULL; g = g->m_next) {
1635 if (!g->m_suspended)
1636 break;
1637 }
1638 rw_exit(&mptsas_global_rwlock);
1639
1640 mutex_enter(&mptsas_global_mutex);
1641 if (g == NULL) {
1642 timeout_id_t tid;
1643
1644 mptsas_timeouts_enabled = 0;
1645 if (mptsas_timeout_id) {
1646 tid = mptsas_timeout_id;
1647 mptsas_timeout_id = 0;
1648 mutex_exit(&mptsas_global_mutex);
1649 (void) untimeout(tid);
1650 mutex_enter(&mptsas_global_mutex);
1651 }
1652 if (mptsas_reset_watch) {
1653 tid = mptsas_reset_watch;
1654 mptsas_reset_watch = 0;
1655 mutex_exit(&mptsas_global_mutex);
1656 (void) untimeout(tid);
1657 mutex_enter(&mptsas_global_mutex);
1658 }
1659 }
1660 mutex_exit(&mptsas_global_mutex);
1661
1662 mutex_enter(&mpt->m_mutex);
1663
1664 /*
1665 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1666 */
1667 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1668 (mpt->m_power_level != PM_LEVEL_D0)) {
1669 mutex_exit(&mpt->m_mutex);
1670 return (DDI_SUCCESS);
1671 }
1672
1673 /* Disable HBA interrupts in hardware */
1674 MPTSAS_DISABLE_INTR(mpt);
1675 /*
1676 * Send RAID action system shutdown to sync IR
1677 */
1678 mptsas_raid_action_system_shutdown(mpt);
1679
1680 mutex_exit(&mpt->m_mutex);
1681
1682 /* drain the taskq */
1683 ddi_taskq_wait(mpt->m_event_taskq);
1684 ddi_taskq_wait(mpt->m_dr_taskq);
1685
1686 return (DDI_SUCCESS);
1687 }
1688
1689 #ifdef __sparc
1690 /*ARGSUSED*/
1691 static int
1692 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1693 {
1694 mptsas_t *mpt;
1695 scsi_hba_tran_t *tran;
1696
1697 /*
1698 * If this call is for iport, just return.
1699 */
1700 if (scsi_hba_iport_unit_address(devi))
1701 return (DDI_SUCCESS);
1702
1703 if ((tran = ddi_get_driver_private(devi)) == NULL)
1704 return (DDI_SUCCESS);
1705
1706 if ((mpt = TRAN2MPT(tran)) == NULL)
1707 return (DDI_SUCCESS);
1708
1709 /*
1710 * Send RAID action system shutdown to sync IR. Disable HBA
1711 * interrupts in hardware first.
1712 */
1713 MPTSAS_DISABLE_INTR(mpt);
1714 mptsas_raid_action_system_shutdown(mpt);
1715
1716 return (DDI_SUCCESS);
1717 }
1718 #else /* __sparc */
1719 /*
1720 * quiesce(9E) entry point.
1721 *
1722 * This function is called when the system is single-threaded at high
1723 * PIL with preemption disabled. Therefore, this function must not be
1724 * blocked.
1725 *
1726 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1727 * DDI_FAILURE indicates an error condition and should almost never happen.
1728 */
1729 static int
1730 mptsas_quiesce(dev_info_t *devi)
1731 {
1732 mptsas_t *mpt;
1733 scsi_hba_tran_t *tran;
1734
1735 /*
1736 * If this call is for iport, just return.
1737 */
1738 if (scsi_hba_iport_unit_address(devi))
1739 return (DDI_SUCCESS);
1740
1741 if ((tran = ddi_get_driver_private(devi)) == NULL)
1742 return (DDI_SUCCESS);
1743
1744 if ((mpt = TRAN2MPT(tran)) == NULL)
1745 return (DDI_SUCCESS);
1746
1747 /* Disable HBA interrupts in hardware */
1748 MPTSAS_DISABLE_INTR(mpt);
1749 /* Send RAID action system shutdonw to sync IR */
1750 mptsas_raid_action_system_shutdown(mpt);
1751
1752 return (DDI_SUCCESS);
1753 }
1754 #endif /* __sparc */
1755
1756 /*
1757 * detach(9E). Remove all device allocations and system resources;
1758 * disable device interrupts.
1759 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1760 */
1761 static int
1762 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1763 {
1764 /* CONSTCOND */
1765 ASSERT(NO_COMPETING_THREADS);
1766 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1767
1768 switch (cmd) {
1769 case DDI_DETACH:
1770 return (mptsas_do_detach(devi));
1771
1772 case DDI_SUSPEND:
1773 return (mptsas_suspend(devi));
1774
1775 default:
1776 return (DDI_FAILURE);
1777 }
1778 /* NOTREACHED */
1779 }
1780
1781 static int
1782 mptsas_do_detach(dev_info_t *dip)
1783 {
1784 mptsas_t *mpt;
1785 scsi_hba_tran_t *tran;
1786 int circ = 0;
1787 int circ1 = 0;
1788 mdi_pathinfo_t *pip = NULL;
1789 int i;
1790 int doneq_thread_num = 0;
1791
1792 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1793
1794 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1795 return (DDI_FAILURE);
1796
1797 mpt = TRAN2MPT(tran);
1798 if (!mpt) {
1799 return (DDI_FAILURE);
1800 }
1801 /*
1802 * Still have pathinfo child, should not detach mpt driver
1803 */
1804 if (scsi_hba_iport_unit_address(dip)) {
1805 if (mpt->m_mpxio_enable) {
1806 /*
1807 * MPxIO enabled for the iport
1808 */
1809 ndi_devi_enter(scsi_vhci_dip, &circ1);
1810 ndi_devi_enter(dip, &circ);
1811 while (pip = mdi_get_next_client_path(dip, NULL)) {
1812 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1813 continue;
1814 }
1815 ndi_devi_exit(dip, circ);
1816 ndi_devi_exit(scsi_vhci_dip, circ1);
1817 NDBG12(("detach failed because of "
1818 "outstanding path info"));
1819 return (DDI_FAILURE);
1820 }
1821 ndi_devi_exit(dip, circ);
1822 ndi_devi_exit(scsi_vhci_dip, circ1);
1823 (void) mdi_phci_unregister(dip, 0);
1824 }
1825
1826 ddi_prop_remove_all(dip);
1827
1828 return (DDI_SUCCESS);
1829 }
1830
1831 /* Make sure power level is D0 before accessing registers */
1832 if (mpt->m_options & MPTSAS_OPT_PM) {
1833 (void) pm_busy_component(dip, 0);
1834 if (mpt->m_power_level != PM_LEVEL_D0) {
1835 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1836 DDI_SUCCESS) {
1837 mptsas_log(mpt, CE_WARN,
1838 "mptsas%d: Raise power request failed.",
1839 mpt->m_instance);
1840 (void) pm_idle_component(dip, 0);
1841 return (DDI_FAILURE);
1842 }
1843 }
1844 }
1845
1846 /*
1847 * Send RAID action system shutdown to sync IR. After action, send a
1848 * Message Unit Reset. Since after that DMA resource will be freed,
1849 * set ioc to READY state will avoid HBA initiated DMA operation.
1850 */
1851 mutex_enter(&mpt->m_mutex);
1852 MPTSAS_DISABLE_INTR(mpt);
1853 mptsas_raid_action_system_shutdown(mpt);
1854 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1855 (void) mptsas_ioc_reset(mpt, FALSE);
1856 mutex_exit(&mpt->m_mutex);
1857 mptsas_rem_intrs(mpt);
1858 ddi_taskq_destroy(mpt->m_event_taskq);
1859 ddi_taskq_destroy(mpt->m_dr_taskq);
1860
1861 if (mpt->m_doneq_thread_n) {
1862 mutex_enter(&mpt->m_doneq_mutex);
1863 doneq_thread_num = mpt->m_doneq_thread_n;
1864 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1865 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1866 mpt->m_doneq_thread_id[i].flag &=
1867 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1868 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1869 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1870 }
1871 while (mpt->m_doneq_thread_n) {
1872 cv_wait(&mpt->m_doneq_thread_cv,
1873 &mpt->m_doneq_mutex);
1874 }
1875 for (i = 0; i < doneq_thread_num; i++) {
1876 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1877 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1878 }
1879 kmem_free(mpt->m_doneq_thread_id,
1880 sizeof (mptsas_doneq_thread_list_t)
1881 * doneq_thread_num);
1882 mutex_exit(&mpt->m_doneq_mutex);
1883 cv_destroy(&mpt->m_doneq_thread_cv);
1884 mutex_destroy(&mpt->m_doneq_mutex);
1885 }
1886
1887 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1888
1889 mptsas_list_del(mpt);
1890
1891 /*
1892 * Cancel timeout threads for this mpt
1893 */
1894 mutex_enter(&mpt->m_mutex);
1895 if (mpt->m_quiesce_timeid) {
1896 timeout_id_t tid = mpt->m_quiesce_timeid;
1897 mpt->m_quiesce_timeid = 0;
1898 mutex_exit(&mpt->m_mutex);
1899 (void) untimeout(tid);
1900 mutex_enter(&mpt->m_mutex);
1901 }
1902
1903 if (mpt->m_restart_cmd_timeid) {
1904 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1905 mpt->m_restart_cmd_timeid = 0;
1906 mutex_exit(&mpt->m_mutex);
1907 (void) untimeout(tid);
1908 mutex_enter(&mpt->m_mutex);
1909 }
1910
1911 mutex_exit(&mpt->m_mutex);
1912
1913 /*
1914 * last mpt? ... if active, CANCEL watch threads.
1915 */
1916 mutex_enter(&mptsas_global_mutex);
1917 if (mptsas_head == NULL) {
1918 timeout_id_t tid;
1919 /*
1920 * Clear mptsas_timeouts_enable so that the watch thread
1921 * gets restarted on DDI_ATTACH
1922 */
1923 mptsas_timeouts_enabled = 0;
1924 if (mptsas_timeout_id) {
1925 tid = mptsas_timeout_id;
1926 mptsas_timeout_id = 0;
1927 mutex_exit(&mptsas_global_mutex);
1928 (void) untimeout(tid);
1929 mutex_enter(&mptsas_global_mutex);
1930 }
1931 if (mptsas_reset_watch) {
1932 tid = mptsas_reset_watch;
1933 mptsas_reset_watch = 0;
1934 mutex_exit(&mptsas_global_mutex);
1935 (void) untimeout(tid);
1936 mutex_enter(&mptsas_global_mutex);
1937 }
1938 }
1939 mutex_exit(&mptsas_global_mutex);
1940
1941 /*
1942 * Delete Phy stats
1943 */
1944 mptsas_destroy_phy_stats(mpt);
1945
1946 mptsas_destroy_hashes(mpt);
1947
1948 /*
1949 * Delete nt_active.
1950 */
1951 mutex_enter(&mpt->m_mutex);
1952 mptsas_free_active_slots(mpt);
1953 mutex_exit(&mpt->m_mutex);
1954
1955 /* deallocate everything that was allocated in mptsas_attach */
1956 mptsas_cache_destroy(mpt);
1957
1958 mptsas_hba_fini(mpt);
1959 mptsas_cfg_fini(mpt);
1960
1961 /* Lower the power informing PM Framework */
1962 if (mpt->m_options & MPTSAS_OPT_PM) {
1963 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1964 mptsas_log(mpt, CE_WARN,
1965 "!mptsas%d: Lower power request failed "
1966 "during detach, ignoring.",
1967 mpt->m_instance);
1968 }
1969
1970 mutex_destroy(&mpt->m_tx_waitq_mutex);
1971 mutex_destroy(&mpt->m_passthru_mutex);
1972 mutex_destroy(&mpt->m_mutex);
1973 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1974 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1975 }
1976 cv_destroy(&mpt->m_cv);
1977 cv_destroy(&mpt->m_passthru_cv);
1978 cv_destroy(&mpt->m_fw_cv);
1979 cv_destroy(&mpt->m_config_cv);
1980 cv_destroy(&mpt->m_fw_diag_cv);
1981
1982
1983 mptsas_smp_teardown(mpt);
1984 mptsas_hba_teardown(mpt);
1985
1986 mptsas_config_space_fini(mpt);
1987
1988 mptsas_free_handshake_msg(mpt);
1989
1990 mptsas_fm_fini(mpt);
1991 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1992 ddi_prop_remove_all(dip);
1993
1994 return (DDI_SUCCESS);
1995 }
1996
1997 static void
1998 mptsas_list_add(mptsas_t *mpt)
1999 {
2000 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2001
2002 if (mptsas_head == NULL) {
2003 mptsas_head = mpt;
2004 } else {
2005 mptsas_tail->m_next = mpt;
2006 }
2007 mptsas_tail = mpt;
2008 rw_exit(&mptsas_global_rwlock);
2009 }
2010
2011 static void
2012 mptsas_list_del(mptsas_t *mpt)
2013 {
2014 mptsas_t *m;
2015 /*
2016 * Remove device instance from the global linked list
2017 */
2018 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2019 if (mptsas_head == mpt) {
2020 m = mptsas_head = mpt->m_next;
2021 } else {
2022 for (m = mptsas_head; m != NULL; m = m->m_next) {
2023 if (m->m_next == mpt) {
2024 m->m_next = mpt->m_next;
2025 break;
2026 }
2027 }
2028 if (m == NULL) {
2029 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2030 }
2031 }
2032
2033 if (mptsas_tail == mpt) {
2034 mptsas_tail = m;
2035 }
2036 rw_exit(&mptsas_global_rwlock);
2037 }
2038
2039 static int
2040 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2041 {
2042 ddi_dma_attr_t task_dma_attrs;
2043
2044 mpt->m_hshk_dma_size = 0;
2045 task_dma_attrs = mpt->m_msg_dma_attr;
2046 task_dma_attrs.dma_attr_sgllen = 1;
2047 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2048
2049 /* allocate Task Management ddi_dma resources */
2050 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2051 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2052 alloc_size, NULL) == FALSE) {
2053 return (DDI_FAILURE);
2054 }
2055 mpt->m_hshk_dma_size = alloc_size;
2056
2057 return (DDI_SUCCESS);
2058 }
2059
2060 static void
2061 mptsas_free_handshake_msg(mptsas_t *mpt)
2062 {
2063 if (mpt->m_hshk_dma_size == 0)
2064 return;
2065 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2066 mpt->m_hshk_dma_size = 0;
2067 }
2068
2069 static int
2070 mptsas_hba_setup(mptsas_t *mpt)
2071 {
2072 scsi_hba_tran_t *hba_tran;
2073 int tran_flags;
2074
2075 /* Allocate a transport structure */
2076 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2077 SCSI_HBA_CANSLEEP);
2078 ASSERT(mpt->m_tran != NULL);
2079
2080 hba_tran->tran_hba_private = mpt;
2081 hba_tran->tran_tgt_private = NULL;
2082
2083 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2084 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2085
2086 hba_tran->tran_start = mptsas_scsi_start;
2087 hba_tran->tran_reset = mptsas_scsi_reset;
2088 hba_tran->tran_abort = mptsas_scsi_abort;
2089 hba_tran->tran_getcap = mptsas_scsi_getcap;
2090 hba_tran->tran_setcap = mptsas_scsi_setcap;
2091 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2092 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2093
2094 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2095 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2096 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2097
2098 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2099 hba_tran->tran_get_name = mptsas_get_name;
2100
2101 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2102 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2103 hba_tran->tran_bus_reset = NULL;
2104
2105 hba_tran->tran_add_eventcall = NULL;
2106 hba_tran->tran_get_eventcookie = NULL;
2107 hba_tran->tran_post_event = NULL;
2108 hba_tran->tran_remove_eventcall = NULL;
2109
2110 hba_tran->tran_bus_config = mptsas_bus_config;
2111
2112 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2113
2114 /*
2115 * All children of the HBA are iports. We need tran was cloned.
2116 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2117 * inherited to iport's tran vector.
2118 */
2119 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2120
2121 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2122 hba_tran, tran_flags) != DDI_SUCCESS) {
2123 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2124 scsi_hba_tran_free(hba_tran);
2125 mpt->m_tran = NULL;
2126 return (FALSE);
2127 }
2128 return (TRUE);
2129 }
2130
2131 static void
2132 mptsas_hba_teardown(mptsas_t *mpt)
2133 {
2134 (void) scsi_hba_detach(mpt->m_dip);
2135 if (mpt->m_tran != NULL) {
2136 scsi_hba_tran_free(mpt->m_tran);
2137 mpt->m_tran = NULL;
2138 }
2139 }
2140
2141 static void
2142 mptsas_iport_register(mptsas_t *mpt)
2143 {
2144 int i, j;
2145 mptsas_phymask_t mask = 0x0;
2146 /*
2147 * initial value of mask is 0
2148 */
2149 mutex_enter(&mpt->m_mutex);
2150 for (i = 0; i < mpt->m_num_phys; i++) {
2151 mptsas_phymask_t phy_mask = 0x0;
2152 char phy_mask_name[MPTSAS_MAX_PHYS];
2153 uint8_t current_port;
2154
2155 if (mpt->m_phy_info[i].attached_devhdl == 0)
2156 continue;
2157
2158 bzero(phy_mask_name, sizeof (phy_mask_name));
2159
2160 current_port = mpt->m_phy_info[i].port_num;
2161
2162 if ((mask & (1 << i)) != 0)
2163 continue;
2164
2165 for (j = 0; j < mpt->m_num_phys; j++) {
2166 if (mpt->m_phy_info[j].attached_devhdl &&
2167 (mpt->m_phy_info[j].port_num == current_port)) {
2168 phy_mask |= (1 << j);
2169 }
2170 }
2171 mask = mask | phy_mask;
2172
2173 for (j = 0; j < mpt->m_num_phys; j++) {
2174 if ((phy_mask >> j) & 0x01) {
2175 mpt->m_phy_info[j].phy_mask = phy_mask;
2176 }
2177 }
2178
2179 (void) sprintf(phy_mask_name, "%x", phy_mask);
2180
2181 mutex_exit(&mpt->m_mutex);
2182 /*
2183 * register a iport
2184 */
2185 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2186 mutex_enter(&mpt->m_mutex);
2187 }
2188 mutex_exit(&mpt->m_mutex);
2189 /*
2190 * register a virtual port for RAID volume always
2191 */
2192 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2193
2194 }
2195
2196 static int
2197 mptsas_smp_setup(mptsas_t *mpt)
2198 {
2199 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2200 ASSERT(mpt->m_smptran != NULL);
2201 mpt->m_smptran->smp_tran_hba_private = mpt;
2202 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2203 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2204 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2205 smp_hba_tran_free(mpt->m_smptran);
2206 mpt->m_smptran = NULL;
2207 return (FALSE);
2208 }
2209 /*
2210 * Initialize smp hash table
2211 */
2212 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2213 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2214 mptsas_smp_free, sizeof (mptsas_smp_t),
2215 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2216 KM_SLEEP);
2217 mpt->m_smp_devhdl = 0xFFFF;
2218
2219 return (TRUE);
2220 }
2221
2222 static void
2223 mptsas_smp_teardown(mptsas_t *mpt)
2224 {
2225 (void) smp_hba_detach(mpt->m_dip);
2226 if (mpt->m_smptran != NULL) {
2227 smp_hba_tran_free(mpt->m_smptran);
2228 mpt->m_smptran = NULL;
2229 }
2230 mpt->m_smp_devhdl = 0;
2231 }
2232
2233 static int
2234 mptsas_cache_create(mptsas_t *mpt)
2235 {
2236 int instance = mpt->m_instance;
2237 char buf[64];
2238
2239 /*
2240 * create kmem cache for packets
2241 */
2242 (void) sprintf(buf, "mptsas%d_cache", instance);
2243 mpt->m_kmem_cache = kmem_cache_create(buf,
2244 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 16,
2245 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2246 NULL, (void *)mpt, NULL, 0);
2247
2248 if (mpt->m_kmem_cache == NULL) {
2249 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2250 return (FALSE);
2251 }
2252
2253 /*
2254 * create kmem cache for extra SGL frames if SGL cannot
2255 * be accomodated into main request frame.
2256 */
2257 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2258 mpt->m_cache_frames = kmem_cache_create(buf,
2259 sizeof (mptsas_cache_frames_t), 16,
2260 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2261 NULL, (void *)mpt, NULL, 0);
2262
2263 if (mpt->m_cache_frames == NULL) {
2264 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2265 return (FALSE);
2266 }
2267
2268 return (TRUE);
2269 }
2270
2271 static void
2272 mptsas_cache_destroy(mptsas_t *mpt)
2273 {
2274 /* deallocate in reverse order */
2275 if (mpt->m_cache_frames) {
2276 kmem_cache_destroy(mpt->m_cache_frames);
2277 mpt->m_cache_frames = NULL;
2278 }
2279 if (mpt->m_kmem_cache) {
2280 kmem_cache_destroy(mpt->m_kmem_cache);
2281 mpt->m_kmem_cache = NULL;
2282 }
2283 }
2284
2285 static int
2286 mptsas_power(dev_info_t *dip, int component, int level)
2287 {
2288 #ifndef __lock_lint
2289 _NOTE(ARGUNUSED(component))
2290 #endif
2291 mptsas_t *mpt;
2292 int rval = DDI_SUCCESS;
2293 int polls = 0;
2294 uint32_t ioc_status;
2295
2296 if (scsi_hba_iport_unit_address(dip) != 0)
2297 return (DDI_SUCCESS);
2298
2299 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2300 if (mpt == NULL) {
2301 return (DDI_FAILURE);
2302 }
2303
2304 mutex_enter(&mpt->m_mutex);
2305
2306 /*
2307 * If the device is busy, don't lower its power level
2308 */
2309 if (mpt->m_busy && (mpt->m_power_level > level)) {
2310 mutex_exit(&mpt->m_mutex);
2311 return (DDI_FAILURE);
2312 }
2313 switch (level) {
2314 case PM_LEVEL_D0:
2315 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2316 MPTSAS_POWER_ON(mpt);
2317 /*
2318 * Wait up to 30 seconds for IOC to come out of reset.
2319 */
2320 while (((ioc_status = ddi_get32(mpt->m_datap,
2321 &mpt->m_reg->Doorbell)) &
2322 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2323 if (polls++ > 3000) {
2324 break;
2325 }
2326 delay(drv_usectohz(10000));
2327 }
2328 /*
2329 * If IOC is not in operational state, try to hard reset it.
2330 */
2331 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2332 MPI2_IOC_STATE_OPERATIONAL) {
2333 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2334 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2335 mptsas_log(mpt, CE_WARN,
2336 "mptsas_power: hard reset failed");
2337 mutex_exit(&mpt->m_mutex);
2338 return (DDI_FAILURE);
2339 }
2340 }
2341 mpt->m_power_level = PM_LEVEL_D0;
2342 break;
2343 case PM_LEVEL_D3:
2344 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2345 MPTSAS_POWER_OFF(mpt);
2346 break;
2347 default:
2348 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2349 mpt->m_instance, level);
2350 rval = DDI_FAILURE;
2351 break;
2352 }
2353 mutex_exit(&mpt->m_mutex);
2354 return (rval);
2355 }
2356
2357 /*
2358 * Initialize configuration space and figure out which
2359 * chip and revison of the chip the mpt driver is using.
2360 */
2361 static int
2362 mptsas_config_space_init(mptsas_t *mpt)
2363 {
2364 NDBG0(("mptsas_config_space_init"));
2365
2366 if (mpt->m_config_handle != NULL)
2367 return (TRUE);
2368
2369 if (pci_config_setup(mpt->m_dip,
2370 &mpt->m_config_handle) != DDI_SUCCESS) {
2371 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2372 return (FALSE);
2373 }
2374
2375 /*
2376 * This is a workaround for a XMITS ASIC bug which does not
2377 * drive the CBE upper bits.
2378 */
2379 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2380 PCI_STAT_PERROR) {
2381 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2382 PCI_STAT_PERROR);
2383 }
2384
2385 mptsas_setup_cmd_reg(mpt);
2386
2387 /*
2388 * Get the chip device id:
2389 */
2390 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2391
2392 /*
2393 * Save the revision.
2394 */
2395 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2396
2397 /*
2398 * Save the SubSystem Vendor and Device IDs
2399 */
2400 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2401 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2402
2403 /*
2404 * Set the latency timer to 0x40 as specified by the upa -> pci
2405 * bridge chip design team. This may be done by the sparc pci
2406 * bus nexus driver, but the driver should make sure the latency
2407 * timer is correct for performance reasons.
2408 */
2409 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2410 MPTSAS_LATENCY_TIMER);
2411
2412 (void) mptsas_get_pci_cap(mpt);
2413 return (TRUE);
2414 }
2415
2416 static void
2417 mptsas_config_space_fini(mptsas_t *mpt)
2418 {
2419 if (mpt->m_config_handle != NULL) {
2420 mptsas_disable_bus_master(mpt);
2421 pci_config_teardown(&mpt->m_config_handle);
2422 mpt->m_config_handle = NULL;
2423 }
2424 }
2425
2426 static void
2427 mptsas_setup_cmd_reg(mptsas_t *mpt)
2428 {
2429 ushort_t cmdreg;
2430
2431 /*
2432 * Set the command register to the needed values.
2433 */
2434 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2435 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2436 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2437 cmdreg &= ~PCI_COMM_IO;
2438 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2439 }
2440
2441 static void
2442 mptsas_disable_bus_master(mptsas_t *mpt)
2443 {
2444 ushort_t cmdreg;
2445
2446 /*
2447 * Clear the master enable bit in the PCI command register.
2448 * This prevents any bus mastering activity like DMA.
2449 */
2450 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2451 cmdreg &= ~PCI_COMM_ME;
2452 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2453 }
2454
2455 int
2456 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2457 {
2458 ddi_dma_attr_t attrs;
2459
2460 attrs = mpt->m_io_dma_attr;
2461 attrs.dma_attr_sgllen = 1;
2462
2463 ASSERT(dma_statep != NULL);
2464
2465 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2466 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2467 &dma_statep->cookie) == FALSE) {
2468 return (DDI_FAILURE);
2469 }
2470
2471 return (DDI_SUCCESS);
2472 }
2473
2474 void
2475 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2476 {
2477 ASSERT(dma_statep != NULL);
2478 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2479 dma_statep->size = 0;
2480 }
2481
2482 int
2483 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2484 {
2485 ddi_dma_attr_t attrs;
2486 ddi_dma_handle_t dma_handle;
2487 caddr_t memp;
2488 ddi_acc_handle_t accessp;
2489 int rval;
2490
2491 ASSERT(mutex_owned(&mpt->m_mutex));
2492
2493 attrs = mpt->m_msg_dma_attr;
2494 attrs.dma_attr_sgllen = 1;
2495 attrs.dma_attr_granular = size;
2496
2497 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2498 &accessp, &memp, size, NULL) == FALSE) {
2499 return (DDI_FAILURE);
2500 }
2501
2502 rval = (*callback) (mpt, memp, var, accessp);
2503
2504 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2505 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2506 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2507 rval = DDI_FAILURE;
2508 }
2509
2510 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2511 return (rval);
2512
2513 }
2514
2515 static int
2516 mptsas_alloc_request_frames(mptsas_t *mpt)
2517 {
2518 ddi_dma_attr_t frame_dma_attrs;
2519 caddr_t memp;
2520 ddi_dma_cookie_t cookie;
2521 size_t mem_size;
2522
2523 /*
2524 * re-alloc when it has already alloced
2525 */
2526 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2527 &mpt->m_acc_req_frame_hdl);
2528
2529 /*
2530 * The size of the request frame pool is:
2531 * Number of Request Frames * Request Frame Size
2532 */
2533 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2534
2535 /*
2536 * set the DMA attributes. System Request Message Frames must be
2537 * aligned on a 16-byte boundry.
2538 */
2539 frame_dma_attrs = mpt->m_msg_dma_attr;
2540 frame_dma_attrs.dma_attr_align = 16;
2541 frame_dma_attrs.dma_attr_sgllen = 1;
2542
2543 /*
2544 * allocate the request frame pool.
2545 */
2546 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2547 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2548 mem_size, &cookie) == FALSE) {
2549 return (DDI_FAILURE);
2550 }
2551
2552 /*
2553 * Store the request frame memory address. This chip uses this
2554 * address to dma to and from the driver's frame. The second
2555 * address is the address mpt uses to fill in the frame.
2556 */
2557 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2558 mpt->m_req_frame = memp;
2559
2560 /*
2561 * Clear the request frame pool.
2562 */
2563 bzero(mpt->m_req_frame, mem_size);
2564
2565 return (DDI_SUCCESS);
2566 }
2567
2568 static int
2569 mptsas_alloc_reply_frames(mptsas_t *mpt)
2570 {
2571 ddi_dma_attr_t frame_dma_attrs;
2572 caddr_t memp;
2573 ddi_dma_cookie_t cookie;
2574 size_t mem_size;
2575
2576 /*
2577 * re-alloc when it has already alloced
2578 */
2579 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2580 &mpt->m_acc_reply_frame_hdl);
2581
2582 /*
2583 * The size of the reply frame pool is:
2584 * Number of Reply Frames * Reply Frame Size
2585 */
2586 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2587
2588 /*
2589 * set the DMA attributes. System Reply Message Frames must be
2590 * aligned on a 4-byte boundry. This is the default.
2591 */
2592 frame_dma_attrs = mpt->m_msg_dma_attr;
2593 frame_dma_attrs.dma_attr_sgllen = 1;
2594
2595 /*
2596 * allocate the reply frame pool
2597 */
2598 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2599 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2600 mem_size, &cookie) == FALSE) {
2601 return (DDI_FAILURE);
2602 }
2603
2604 /*
2605 * Store the reply frame memory address. This chip uses this
2606 * address to dma to and from the driver's frame. The second
2607 * address is the address mpt uses to process the frame.
2608 */
2609 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2610 mpt->m_reply_frame = memp;
2611
2612 /*
2613 * Clear the reply frame pool.
2614 */
2615 bzero(mpt->m_reply_frame, mem_size);
2616
2617 return (DDI_SUCCESS);
2618 }
2619
2620 static int
2621 mptsas_alloc_free_queue(mptsas_t *mpt)
2622 {
2623 ddi_dma_attr_t frame_dma_attrs;
2624 caddr_t memp;
2625 ddi_dma_cookie_t cookie;
2626 size_t mem_size;
2627
2628 /*
2629 * re-alloc when it has already alloced
2630 */
2631 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2632 &mpt->m_acc_free_queue_hdl);
2633
2634 /*
2635 * The reply free queue size is:
2636 * Reply Free Queue Depth * 4
2637 * The "4" is the size of one 32 bit address (low part of 64-bit
2638 * address)
2639 */
2640 mem_size = mpt->m_free_queue_depth * 4;
2641
2642 /*
2643 * set the DMA attributes The Reply Free Queue must be aligned on a
2644 * 16-byte boundry.
2645 */
2646 frame_dma_attrs = mpt->m_msg_dma_attr;
2647 frame_dma_attrs.dma_attr_align = 16;
2648 frame_dma_attrs.dma_attr_sgllen = 1;
2649
2650 /*
2651 * allocate the reply free queue
2652 */
2653 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2654 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2655 mem_size, &cookie) == FALSE) {
2656 return (DDI_FAILURE);
2657 }
2658
2659 /*
2660 * Store the reply free queue memory address. This chip uses this
2661 * address to read from the reply free queue. The second address
2662 * is the address mpt uses to manage the queue.
2663 */
2664 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2665 mpt->m_free_queue = memp;
2666
2667 /*
2668 * Clear the reply free queue memory.
2669 */
2670 bzero(mpt->m_free_queue, mem_size);
2671
2672 return (DDI_SUCCESS);
2673 }
2674
2675 static int
2676 mptsas_alloc_post_queue(mptsas_t *mpt)
2677 {
2678 ddi_dma_attr_t frame_dma_attrs;
2679 caddr_t memp;
2680 ddi_dma_cookie_t cookie;
2681 size_t mem_size;
2682
2683 /*
2684 * re-alloc when it has already alloced
2685 */
2686 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2687 &mpt->m_acc_post_queue_hdl);
2688
2689 /*
2690 * The reply descriptor post queue size is:
2691 * Reply Descriptor Post Queue Depth * 8
2692 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2693 */
2694 mem_size = mpt->m_post_queue_depth * 8;
2695
2696 /*
2697 * set the DMA attributes. The Reply Descriptor Post Queue must be
2698 * aligned on a 16-byte boundry.
2699 */
2700 frame_dma_attrs = mpt->m_msg_dma_attr;
2701 frame_dma_attrs.dma_attr_align = 16;
2702 frame_dma_attrs.dma_attr_sgllen = 1;
2703
2704 /*
2705 * allocate the reply post queue
2706 */
2707 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2708 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2709 mem_size, &cookie) == FALSE) {
2710 return (DDI_FAILURE);
2711 }
2712
2713 /*
2714 * Store the reply descriptor post queue memory address. This chip
2715 * uses this address to write to the reply descriptor post queue. The
2716 * second address is the address mpt uses to manage the queue.
2717 */
2718 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2719 mpt->m_post_queue = memp;
2720
2721 /*
2722 * Clear the reply post queue memory.
2723 */
2724 bzero(mpt->m_post_queue, mem_size);
2725
2726 return (DDI_SUCCESS);
2727 }
2728
2729 static void
2730 mptsas_alloc_reply_args(mptsas_t *mpt)
2731 {
2732 if (mpt->m_replyh_args == NULL) {
2733 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2734 mpt->m_max_replies, KM_SLEEP);
2735 }
2736 }
2737
2738 static int
2739 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2740 {
2741 mptsas_cache_frames_t *frames = NULL;
2742 if (cmd->cmd_extra_frames == NULL) {
2743 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2744 if (frames == NULL) {
2745 return (DDI_FAILURE);
2746 }
2747 cmd->cmd_extra_frames = frames;
2748 }
2749 return (DDI_SUCCESS);
2750 }
2751
2752 static void
2753 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2754 {
2755 if (cmd->cmd_extra_frames) {
2756 kmem_cache_free(mpt->m_cache_frames,
2757 (void *)cmd->cmd_extra_frames);
2758 cmd->cmd_extra_frames = NULL;
2759 }
2760 }
2761
2762 static void
2763 mptsas_cfg_fini(mptsas_t *mpt)
2764 {
2765 NDBG0(("mptsas_cfg_fini"));
2766 ddi_regs_map_free(&mpt->m_datap);
2767 }
2768
2769 static void
2770 mptsas_hba_fini(mptsas_t *mpt)
2771 {
2772 NDBG0(("mptsas_hba_fini"));
2773
2774 /*
2775 * Free up any allocated memory
2776 */
2777 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2778 &mpt->m_acc_req_frame_hdl);
2779
2780 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2781 &mpt->m_acc_reply_frame_hdl);
2782
2783 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2784 &mpt->m_acc_free_queue_hdl);
2785
2786 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2787 &mpt->m_acc_post_queue_hdl);
2788
2789 if (mpt->m_replyh_args != NULL) {
2790 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2791 * mpt->m_max_replies);
2792 }
2793 }
2794
2795 static int
2796 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2797 {
2798 int lun = 0;
2799 char *sas_wwn = NULL;
2800 int phynum = -1;
2801 int reallen = 0;
2802
2803 /* Get the target num */
2804 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2805 LUN_PROP, 0);
2806
2807 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2808 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2809 /*
2810 * Stick in the address of form "pPHY,LUN"
2811 */
2812 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2813 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2814 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2815 == DDI_PROP_SUCCESS) {
2816 /*
2817 * Stick in the address of the form "wWWN,LUN"
2818 */
2819 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2820 ddi_prop_free(sas_wwn);
2821 } else {
2822 return (DDI_FAILURE);
2823 }
2824
2825 ASSERT(reallen < len);
2826 if (reallen >= len) {
2827 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2828 "length too small, it needs to be %d bytes", reallen + 1);
2829 }
2830 return (DDI_SUCCESS);
2831 }
2832
2833 /*
2834 * tran_tgt_init(9E) - target device instance initialization
2835 */
2836 static int
2837 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2838 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2839 {
2840 #ifndef __lock_lint
2841 _NOTE(ARGUNUSED(hba_tran))
2842 #endif
2843
2844 /*
2845 * At this point, the scsi_device structure already exists
2846 * and has been initialized.
2847 *
2848 * Use this function to allocate target-private data structures,
2849 * if needed by this HBA. Add revised flow-control and queue
2850 * properties for child here, if desired and if you can tell they
2851 * support tagged queueing by now.
2852 */
2853 mptsas_t *mpt;
2854 int lun = sd->sd_address.a_lun;
2855 mdi_pathinfo_t *pip = NULL;
2856 mptsas_tgt_private_t *tgt_private = NULL;
2857 mptsas_target_t *ptgt = NULL;
2858 char *psas_wwn = NULL;
2859 mptsas_phymask_t phymask = 0;
2860 uint64_t sas_wwn = 0;
2861 mptsas_target_addr_t addr;
2862 mpt = SDEV2MPT(sd);
2863
2864 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2865
2866 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2867 (void *)hba_dip, (void *)tgt_dip, lun));
2868
2869 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2870 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2871 ddi_set_name_addr(tgt_dip, NULL);
2872 return (DDI_FAILURE);
2873 }
2874 /*
2875 * phymask is 0 means the virtual port for RAID
2876 */
2877 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2878 "phymask", 0);
2879 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2880 if ((pip = (void *)(sd->sd_private)) == NULL) {
2881 /*
2882 * Very bad news if this occurs. Somehow scsi_vhci has
2883 * lost the pathinfo node for this target.
2884 */
2885 return (DDI_NOT_WELL_FORMED);
2886 }
2887
2888 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2889 DDI_PROP_SUCCESS) {
2890 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2891 return (DDI_FAILURE);
2892 }
2893
2894 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2895 &psas_wwn) == MDI_SUCCESS) {
2896 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2897 sas_wwn = 0;
2898 }
2899 (void) mdi_prop_free(psas_wwn);
2900 }
2901 } else {
2902 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2903 DDI_PROP_DONTPASS, LUN_PROP, 0);
2904 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2905 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2906 DDI_PROP_SUCCESS) {
2907 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2908 sas_wwn = 0;
2909 }
2910 ddi_prop_free(psas_wwn);
2911 } else {
2912 sas_wwn = 0;
2913 }
2914 }
2915
2916 ASSERT((sas_wwn != 0) || (phymask != 0));
2917 addr.mta_wwn = sas_wwn;
2918 addr.mta_phymask = phymask;
2919 mutex_enter(&mpt->m_mutex);
2920 ptgt = refhash_lookup(mpt->m_targets, &addr);
2921 mutex_exit(&mpt->m_mutex);
2922 if (ptgt == NULL) {
2923 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2924 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2925 sas_wwn);
2926 return (DDI_FAILURE);
2927 }
2928 if (hba_tran->tran_tgt_private == NULL) {
2929 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2930 KM_SLEEP);
2931 tgt_private->t_lun = lun;
2932 tgt_private->t_private = ptgt;
2933 hba_tran->tran_tgt_private = tgt_private;
2934 }
2935
2936 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2937 return (DDI_SUCCESS);
2938 }
2939 mutex_enter(&mpt->m_mutex);
2940
2941 if (ptgt->m_deviceinfo &
2942 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2943 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2944 uchar_t *inq89 = NULL;
2945 int inq89_len = 0x238;
2946 int reallen = 0;
2947 int rval = 0;
2948 struct sata_id *sid = NULL;
2949 char model[SATA_ID_MODEL_LEN + 1];
2950 char fw[SATA_ID_FW_LEN + 1];
2951 char *vid, *pid;
2952 int i;
2953
2954 mutex_exit(&mpt->m_mutex);
2955 /*
2956 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2957 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2958 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2959 */
2960 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2961 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2962 inq89, inq89_len, &reallen, 1);
2963
2964 if (rval != 0) {
2965 if (inq89 != NULL) {
2966 kmem_free(inq89, inq89_len);
2967 }
2968
2969 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2970 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2971 return (DDI_SUCCESS);
2972 }
2973 sid = (void *)(&inq89[60]);
2974
2975 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2976 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2977
2978 model[SATA_ID_MODEL_LEN] = 0;
2979 fw[SATA_ID_FW_LEN] = 0;
2980
2981 /*
2982 * split model into into vid/pid
2983 */
2984 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2985 if ((*pid == ' ') || (*pid == '\t'))
2986 break;
2987 if (i < SATA_ID_MODEL_LEN) {
2988 vid = model;
2989 /*
2990 * terminate vid, establish pid
2991 */
2992 *pid++ = 0;
2993 } else {
2994 /*
2995 * vid will stay "ATA ", the rule is same
2996 * as sata framework implementation.
2997 */
2998 vid = NULL;
2999 /*
3000 * model is all pid
3001 */
3002 pid = model;
3003 }
3004
3005 /*
3006 * override SCSA "inquiry-*" properties
3007 */
3008 if (vid)
3009 (void) scsi_device_prop_update_inqstring(sd,
3010 INQUIRY_VENDOR_ID, vid, strlen(vid));
3011 if (pid)
3012 (void) scsi_device_prop_update_inqstring(sd,
3013 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3014 (void) scsi_device_prop_update_inqstring(sd,
3015 INQUIRY_REVISION_ID, fw, strlen(fw));
3016
3017 if (inq89 != NULL) {
3018 kmem_free(inq89, inq89_len);
3019 }
3020 } else {
3021 mutex_exit(&mpt->m_mutex);
3022 }
3023
3024 return (DDI_SUCCESS);
3025 }
3026 /*
3027 * tran_tgt_free(9E) - target device instance deallocation
3028 */
3029 static void
3030 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3031 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3032 {
3033 #ifndef __lock_lint
3034 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3035 #endif
3036
3037 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3038
3039 if (tgt_private != NULL) {
3040 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3041 hba_tran->tran_tgt_private = NULL;
3042 }
3043 }
3044
3045 /*
3046 * scsi_pkt handling
3047 *
3048 * Visible to the external world via the transport structure.
3049 */
3050
3051 /*
3052 * Notes:
3053 * - transport the command to the addressed SCSI target/lun device
3054 * - normal operation is to schedule the command to be transported,
3055 * and return TRAN_ACCEPT if this is successful.
3056 * - if NO_INTR, tran_start must poll device for command completion
3057 */
3058 static int
3059 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3060 {
3061 #ifndef __lock_lint
3062 _NOTE(ARGUNUSED(ap))
3063 #endif
3064 mptsas_t *mpt = PKT2MPT(pkt);
3065 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3066 int rval;
3067 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3068
3069 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3070 ASSERT(ptgt);
3071 if (ptgt == NULL)
3072 return (TRAN_FATAL_ERROR);
3073
3074 /*
3075 * prepare the pkt before taking mutex.
3076 */
3077 rval = mptsas_prepare_pkt(cmd);
3078 if (rval != TRAN_ACCEPT) {
3079 return (rval);
3080 }
3081
3082 /*
3083 * Send the command to target/lun, however your HBA requires it.
3084 * If busy, return TRAN_BUSY; if there's some other formatting error
3085 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3086 * return of TRAN_ACCEPT.
3087 *
3088 * Remember that access to shared resources, including the mptsas_t
3089 * data structure and the HBA hardware registers, must be protected
3090 * with mutexes, here and everywhere.
3091 *
3092 * Also remember that at interrupt time, you'll get an argument
3093 * to the interrupt handler which is a pointer to your mptsas_t
3094 * structure; you'll have to remember which commands are outstanding
3095 * and which scsi_pkt is the currently-running command so the
3096 * interrupt handler can refer to the pkt to set completion
3097 * status, call the target driver back through pkt_comp, etc.
3098 *
3099 * If the instance lock is held by other thread, don't spin to wait
3100 * for it. Instead, queue the cmd and next time when the instance lock
3101 * is not held, accept all the queued cmd. A extra tx_waitq is
3102 * introduced to protect the queue.
3103 *
3104 * The polled cmd will not be queud and accepted as usual.
3105 *
3106 * Under the tx_waitq mutex, record whether a thread is draining
3107 * the tx_waitq. An IO requesting thread that finds the instance
3108 * mutex contended appends to the tx_waitq and while holding the
3109 * tx_wait mutex, if the draining flag is not set, sets it and then
3110 * proceeds to spin for the instance mutex. This scheme ensures that
3111 * the last cmd in a burst be processed.
3112 *
3113 * we enable this feature only when the helper threads are enabled,
3114 * at which we think the loads are heavy.
3115 *
3116 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3117 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3118 */
3119
3120 if (mpt->m_doneq_thread_n) {
3121 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3122 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3123 mutex_exit(&mpt->m_mutex);
3124 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3125 mutex_enter(&mpt->m_mutex);
3126 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3127 mutex_exit(&mpt->m_mutex);
3128 } else {
3129 mutex_enter(&mpt->m_tx_waitq_mutex);
3130 /*
3131 * ptgt->m_dr_flag is protected by m_mutex or
3132 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3133 * is acquired.
3134 */
3135 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3136 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3137 /*
3138 * The command should be allowed to
3139 * retry by returning TRAN_BUSY to
3140 * to stall the I/O's which come from
3141 * scsi_vhci since the device/path is
3142 * in unstable state now.
3143 */
3144 mutex_exit(&mpt->m_tx_waitq_mutex);
3145 return (TRAN_BUSY);
3146 } else {
3147 /*
3148 * The device is offline, just fail the
3149 * command by returning
3150 * TRAN_FATAL_ERROR.
3151 */
3152 mutex_exit(&mpt->m_tx_waitq_mutex);
3153 return (TRAN_FATAL_ERROR);
3154 }
3155 }
3156 if (mpt->m_tx_draining) {
3157 cmd->cmd_flags |= CFLAG_TXQ;
3158 *mpt->m_tx_waitqtail = cmd;
3159 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3160 mutex_exit(&mpt->m_tx_waitq_mutex);
3161 } else { /* drain the queue */
3162 mpt->m_tx_draining = 1;
3163 mutex_exit(&mpt->m_tx_waitq_mutex);
3164 mutex_enter(&mpt->m_mutex);
3165 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3166 mutex_exit(&mpt->m_mutex);
3167 }
3168 }
3169 } else {
3170 mutex_enter(&mpt->m_mutex);
3171 /*
3172 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3173 * in this case, m_mutex is acquired.
3174 */
3175 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3176 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3177 /*
3178 * commands should be allowed to retry by
3179 * returning TRAN_BUSY to stall the I/O's
3180 * which come from scsi_vhci since the device/
3181 * path is in unstable state now.
3182 */
3183 mutex_exit(&mpt->m_mutex);
3184 return (TRAN_BUSY);
3185 } else {
3186 /*
3187 * The device is offline, just fail the
3188 * command by returning TRAN_FATAL_ERROR.
3189 */
3190 mutex_exit(&mpt->m_mutex);
3191 return (TRAN_FATAL_ERROR);
3192 }
3193 }
3194 rval = mptsas_accept_pkt(mpt, cmd);
3195 mutex_exit(&mpt->m_mutex);
3196 }
3197
3198 return (rval);
3199 }
3200
3201 /*
3202 * Accept all the queued cmds(if any) before accept the current one.
3203 */
3204 static int
3205 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3206 {
3207 int rval;
3208 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3209
3210 ASSERT(mutex_owned(&mpt->m_mutex));
3211 /*
3212 * The call to mptsas_accept_tx_waitq() must always be performed
3213 * because that is where mpt->m_tx_draining is cleared.
3214 */
3215 mutex_enter(&mpt->m_tx_waitq_mutex);
3216 mptsas_accept_tx_waitq(mpt);
3217 mutex_exit(&mpt->m_tx_waitq_mutex);
3218 /*
3219 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3220 * in this case, m_mutex is acquired.
3221 */
3222 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3223 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3224 /*
3225 * The command should be allowed to retry by returning
3226 * TRAN_BUSY to stall the I/O's which come from
3227 * scsi_vhci since the device/path is in unstable state
3228 * now.
3229 */
3230 return (TRAN_BUSY);
3231 } else {
3232 /*
3233 * The device is offline, just fail the command by
3234 * return TRAN_FATAL_ERROR.
3235 */
3236 return (TRAN_FATAL_ERROR);
3237 }
3238 }
3239 rval = mptsas_accept_pkt(mpt, cmd);
3240
3241 return (rval);
3242 }
3243
3244 static int
3245 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3246 {
3247 int rval = TRAN_ACCEPT;
3248 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3249
3250 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3251
3252 ASSERT(mutex_owned(&mpt->m_mutex));
3253
3254 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3255 rval = mptsas_prepare_pkt(cmd);
3256 if (rval != TRAN_ACCEPT) {
3257 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3258 return (rval);
3259 }
3260 }
3261
3262 /*
3263 * reset the throttle if we were draining
3264 */
3265 if ((ptgt->m_t_ncmds == 0) &&
3266 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3267 NDBG23(("reset throttle"));
3268 ASSERT(ptgt->m_reset_delay == 0);
3269 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3270 }
3271
3272 /*
3273 * If HBA is being reset, the DevHandles are being re-initialized,
3274 * which means that they could be invalid even if the target is still
3275 * attached. Check if being reset and if DevHandle is being
3276 * re-initialized. If this is the case, return BUSY so the I/O can be
3277 * retried later.
3278 */
3279 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3280 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3281 if (cmd->cmd_flags & CFLAG_TXQ) {
3282 mptsas_doneq_add(mpt, cmd);
3283 mptsas_doneq_empty(mpt);
3284 return (rval);
3285 } else {
3286 return (TRAN_BUSY);
3287 }
3288 }
3289
3290 /*
3291 * If device handle has already been invalidated, just
3292 * fail the command. In theory, command from scsi_vhci
3293 * client is impossible send down command with invalid
3294 * devhdl since devhdl is set after path offline, target
3295 * driver is not suppose to select a offlined path.
3296 */
3297 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3298 NDBG20(("rejecting command, it might because invalid devhdl "
3299 "request."));
3300 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3301 if (cmd->cmd_flags & CFLAG_TXQ) {
3302 mptsas_doneq_add(mpt, cmd);
3303 mptsas_doneq_empty(mpt);
3304 return (rval);
3305 } else {
3306 return (TRAN_FATAL_ERROR);
3307 }
3308 }
3309 /*
3310 * The first case is the normal case. mpt gets a command from the
3311 * target driver and starts it.
3312 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3313 * commands is m_max_requests - 2.
3314 */
3315 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3316 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3317 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3318 (ptgt->m_reset_delay == 0) &&
3319 (ptgt->m_t_nwait == 0) &&
3320 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3321 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3322 (void) mptsas_start_cmd(mpt, cmd);
3323 } else {
3324 mptsas_waitq_add(mpt, cmd);
3325 }
3326 } else {
3327 /*
3328 * Add this pkt to the work queue
3329 */
3330 mptsas_waitq_add(mpt, cmd);
3331
3332 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3333 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3334
3335 /*
3336 * Only flush the doneq if this is not a TM
3337 * cmd. For TM cmds the flushing of the
3338 * doneq will be done in those routines.
3339 */
3340 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3341 mptsas_doneq_empty(mpt);
3342 }
3343 }
3344 }
3345 return (rval);
3346 }
3347
3348 int
3349 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3350 {
3351 mptsas_slots_t *slots = mpt->m_active;
3352 uint_t slot, start_rotor;
3353 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3354
3355 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3356
3357 /*
3358 * Account for reserved TM request slot and reserved SMID of 0.
3359 */
3360 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3361
3362 /*
3363 * Find the next available slot, beginning at m_rotor. If no slot is
3364 * available, we'll return FALSE to indicate that. This mechanism
3365 * considers only the normal slots, not the reserved slot 0 nor the
3366 * task management slot m_n_normal + 1. The rotor is left to point to
3367 * the normal slot after the one we select, unless we select the last
3368 * normal slot in which case it returns to slot 1.
3369 */
3370 start_rotor = slots->m_rotor;
3371 do {
3372 slot = slots->m_rotor++;
3373 if (slots->m_rotor > slots->m_n_normal)
3374 slots->m_rotor = 1;
3375
3376 if (slots->m_rotor == start_rotor)
3377 break;
3378 } while (slots->m_slot[slot] != NULL);
3379
3380 if (slots->m_slot[slot] != NULL)
3381 return (FALSE);
3382
3383 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3384
3385 cmd->cmd_slot = slot;
3386 slots->m_slot[slot] = cmd;
3387 mpt->m_ncmds++;
3388
3389 /*
3390 * only increment per target ncmds if this is not a
3391 * command that has no target associated with it (i.e. a
3392 * event acknoledgment)
3393 */
3394 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3395 /*
3396 * Expiration time is set in mptsas_start_cmd
3397 */
3398 ptgt->m_t_ncmds++;
3399 cmd->cmd_active_expiration = 0;
3400 } else {
3401 /*
3402 * Initialize expiration time for passthrough commands,
3403 */
3404 cmd->cmd_active_expiration = gethrtime() +
3405 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3406 }
3407 return (TRUE);
3408 }
3409
3410 /*
3411 * prepare the pkt:
3412 * the pkt may have been resubmitted or just reused so
3413 * initialize some fields and do some checks.
3414 */
3415 static int
3416 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3417 {
3418 struct scsi_pkt *pkt = CMD2PKT(cmd);
3419
3420 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3421
3422 /*
3423 * Reinitialize some fields that need it; the packet may
3424 * have been resubmitted
3425 */
3426 pkt->pkt_reason = CMD_CMPLT;
3427 pkt->pkt_state = 0;
3428 pkt->pkt_statistics = 0;
3429 pkt->pkt_resid = 0;
3430 cmd->cmd_age = 0;
3431 cmd->cmd_pkt_flags = pkt->pkt_flags;
3432
3433 /*
3434 * zero status byte.
3435 */
3436 *(pkt->pkt_scbp) = 0;
3437
3438 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3439 pkt->pkt_resid = cmd->cmd_dmacount;
3440
3441 /*
3442 * consistent packets need to be sync'ed first
3443 * (only for data going out)
3444 */
3445 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3446 (cmd->cmd_flags & CFLAG_DMASEND)) {
3447 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3448 DDI_DMA_SYNC_FORDEV);
3449 }
3450 }
3451
3452 cmd->cmd_flags =
3453 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3454 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3455
3456 return (TRAN_ACCEPT);
3457 }
3458
3459 /*
3460 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3461 *
3462 * One of three possibilities:
3463 * - allocate scsi_pkt
3464 * - allocate scsi_pkt and DMA resources
3465 * - allocate DMA resources to an already-allocated pkt
3466 */
3467 static struct scsi_pkt *
3468 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3469 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3470 int (*callback)(), caddr_t arg)
3471 {
3472 mptsas_cmd_t *cmd, *new_cmd;
3473 mptsas_t *mpt = ADDR2MPT(ap);
3474 int failure = 1;
3475 uint_t oldcookiec;
3476 mptsas_target_t *ptgt = NULL;
3477 int rval;
3478 mptsas_tgt_private_t *tgt_private;
3479 int kf;
3480
3481 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3482
3483 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3484 tran_tgt_private;
3485 ASSERT(tgt_private != NULL);
3486 if (tgt_private == NULL) {
3487 return (NULL);
3488 }
3489 ptgt = tgt_private->t_private;
3490 ASSERT(ptgt != NULL);
3491 if (ptgt == NULL)
3492 return (NULL);
3493 ap->a_target = ptgt->m_devhdl;
3494 ap->a_lun = tgt_private->t_lun;
3495
3496 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3497 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3498 statuslen *= 100; tgtlen *= 4;
3499 #endif
3500 NDBG3(("mptsas_scsi_init_pkt:\n"
3501 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3502 ap->a_target, (void *)pkt, (void *)bp,
3503 cmdlen, statuslen, tgtlen, flags));
3504
3505 /*
3506 * Allocate the new packet.
3507 */
3508 if (pkt == NULL) {
3509 ddi_dma_handle_t save_dma_handle;
3510 ddi_dma_handle_t save_arq_dma_handle;
3511 struct buf *save_arq_bp;
3512 ddi_dma_cookie_t save_arqcookie;
3513
3514 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3515
3516 if (cmd) {
3517 save_dma_handle = cmd->cmd_dmahandle;
3518 save_arq_dma_handle = cmd->cmd_arqhandle;
3519 save_arq_bp = cmd->cmd_arq_buf;
3520 save_arqcookie = cmd->cmd_arqcookie;
3521 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3522 cmd->cmd_dmahandle = save_dma_handle;
3523 cmd->cmd_arqhandle = save_arq_dma_handle;
3524 cmd->cmd_arq_buf = save_arq_bp;
3525 cmd->cmd_arqcookie = save_arqcookie;
3526
3527 pkt = (void *)((uchar_t *)cmd +
3528 sizeof (struct mptsas_cmd));
3529 pkt->pkt_ha_private = (opaque_t)cmd;
3530 pkt->pkt_address = *ap;
3531 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3532 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3533 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3534 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3535 cmd->cmd_cdblen = (uchar_t)cmdlen;
3536 cmd->cmd_scblen = statuslen;
3537 cmd->cmd_rqslen = SENSE_LENGTH;
3538 cmd->cmd_tgt_addr = ptgt;
3539 failure = 0;
3540 }
3541
3542 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3543 (tgtlen > PKT_PRIV_LEN) ||
3544 (statuslen > EXTCMDS_STATUS_SIZE)) {
3545 if (failure == 0) {
3546 /*
3547 * if extern alloc fails, all will be
3548 * deallocated, including cmd
3549 */
3550 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3551 cmdlen, tgtlen, statuslen, kf);
3552 }
3553 if (failure) {
3554 /*
3555 * if extern allocation fails, it will
3556 * deallocate the new pkt as well
3557 */
3558 return (NULL);
3559 }
3560 }
3561 new_cmd = cmd;
3562
3563 } else {
3564 cmd = PKT2CMD(pkt);
3565 new_cmd = NULL;
3566 }
3567
3568
3569 /* grab cmd->cmd_cookiec here as oldcookiec */
3570
3571 oldcookiec = cmd->cmd_cookiec;
3572
3573 /*
3574 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3575 * greater than 0 and we'll need to grab the next dma window
3576 */
3577 /*
3578 * SLM-not doing extra command frame right now; may add later
3579 */
3580
3581 if (cmd->cmd_nwin > 0) {
3582
3583 /*
3584 * Make sure we havn't gone past the the total number
3585 * of windows
3586 */
3587 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3588 return (NULL);
3589 }
3590 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3591 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3592 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3593 return (NULL);
3594 }
3595 goto get_dma_cookies;
3596 }
3597
3598
3599 if (flags & PKT_XARQ) {
3600 cmd->cmd_flags |= CFLAG_XARQ;
3601 }
3602
3603 /*
3604 * DMA resource allocation. This version assumes your
3605 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3606 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3607 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3608 */
3609 if (bp && (bp->b_bcount != 0) &&
3610 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3611
3612 int cnt, dma_flags;
3613 mptti_t *dmap; /* ptr to the S/G list */
3614
3615 /*
3616 * Set up DMA memory and position to the next DMA segment.
3617 */
3618 ASSERT(cmd->cmd_dmahandle != NULL);
3619
3620 if (bp->b_flags & B_READ) {
3621 dma_flags = DDI_DMA_READ;
3622 cmd->cmd_flags &= ~CFLAG_DMASEND;
3623 } else {
3624 dma_flags = DDI_DMA_WRITE;
3625 cmd->cmd_flags |= CFLAG_DMASEND;
3626 }
3627 if (flags & PKT_CONSISTENT) {
3628 cmd->cmd_flags |= CFLAG_CMDIOPB;
3629 dma_flags |= DDI_DMA_CONSISTENT;
3630 }
3631
3632 if (flags & PKT_DMA_PARTIAL) {
3633 dma_flags |= DDI_DMA_PARTIAL;
3634 }
3635
3636 /*
3637 * workaround for byte hole issue on psycho and
3638 * schizo pre 2.1
3639 */
3640 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3641 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3642 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3643 dma_flags |= DDI_DMA_CONSISTENT;
3644 }
3645
3646 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3647 dma_flags, callback, arg,
3648 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3649 if (rval == DDI_DMA_PARTIAL_MAP) {
3650 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3651 &cmd->cmd_nwin);
3652 cmd->cmd_winindex = 0;
3653 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3654 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3655 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3656 &cmd->cmd_cookiec);
3657 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3658 switch (rval) {
3659 case DDI_DMA_NORESOURCES:
3660 bioerror(bp, 0);
3661 break;
3662 case DDI_DMA_BADATTR:
3663 case DDI_DMA_NOMAPPING:
3664 bioerror(bp, EFAULT);
3665 break;
3666 case DDI_DMA_TOOBIG:
3667 default:
3668 bioerror(bp, EINVAL);
3669 break;
3670 }
3671 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3672 if (new_cmd) {
3673 mptsas_scsi_destroy_pkt(ap, pkt);
3674 }
3675 return ((struct scsi_pkt *)NULL);
3676 }
3677
3678 get_dma_cookies:
3679 cmd->cmd_flags |= CFLAG_DMAVALID;
3680 ASSERT(cmd->cmd_cookiec > 0);
3681
3682 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3683 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3684 cmd->cmd_cookiec);
3685 bioerror(bp, EINVAL);
3686 if (new_cmd) {
3687 mptsas_scsi_destroy_pkt(ap, pkt);
3688 }
3689 return ((struct scsi_pkt *)NULL);
3690 }
3691
3692 /*
3693 * Allocate extra SGL buffer if needed.
3694 */
3695 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3696 (cmd->cmd_extra_frames == NULL)) {
3697 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3698 DDI_FAILURE) {
3699 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3700 "failed");
3701 bioerror(bp, ENOMEM);
3702 if (new_cmd) {
3703 mptsas_scsi_destroy_pkt(ap, pkt);
3704 }
3705 return ((struct scsi_pkt *)NULL);
3706 }
3707 }
3708
3709 /*
3710 * Always use scatter-gather transfer
3711 * Use the loop below to store physical addresses of
3712 * DMA segments, from the DMA cookies, into your HBA's
3713 * scatter-gather list.
3714 * We need to ensure we have enough kmem alloc'd
3715 * for the sg entries since we are no longer using an
3716 * array inside mptsas_cmd_t.
3717 *
3718 * We check cmd->cmd_cookiec against oldcookiec so
3719 * the scatter-gather list is correctly allocated
3720 */
3721
3722 if (oldcookiec != cmd->cmd_cookiec) {
3723 if (cmd->cmd_sg != (mptti_t *)NULL) {
3724 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3725 oldcookiec);
3726 cmd->cmd_sg = NULL;
3727 }
3728 }
3729
3730 if (cmd->cmd_sg == (mptti_t *)NULL) {
3731 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3732 cmd->cmd_cookiec), kf);
3733
3734 if (cmd->cmd_sg == (mptti_t *)NULL) {
3735 mptsas_log(mpt, CE_WARN,
3736 "unable to kmem_alloc enough memory "
3737 "for scatter/gather list");
3738 /*
3739 * if we have an ENOMEM condition we need to behave
3740 * the same way as the rest of this routine
3741 */
3742
3743 bioerror(bp, ENOMEM);
3744 if (new_cmd) {
3745 mptsas_scsi_destroy_pkt(ap, pkt);
3746 }
3747 return ((struct scsi_pkt *)NULL);
3748 }
3749 }
3750
3751 dmap = cmd->cmd_sg;
3752
3753 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3754
3755 /*
3756 * store the first segment into the S/G list
3757 */
3758 dmap->count = cmd->cmd_cookie.dmac_size;
3759 dmap->addr.address64.Low = (uint32_t)
3760 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3761 dmap->addr.address64.High = (uint32_t)
3762 (cmd->cmd_cookie.dmac_laddress >> 32);
3763
3764 /*
3765 * dmacount counts the size of the dma for this window
3766 * (if partial dma is being used). totaldmacount
3767 * keeps track of the total amount of dma we have
3768 * transferred for all the windows (needed to calculate
3769 * the resid value below).
3770 */
3771 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3772 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3773
3774 /*
3775 * We already stored the first DMA scatter gather segment,
3776 * start at 1 if we need to store more.
3777 */
3778 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3779 /*
3780 * Get next DMA cookie
3781 */
3782 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3783 &cmd->cmd_cookie);
3784 dmap++;
3785
3786 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3787 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3788
3789 /*
3790 * store the segment parms into the S/G list
3791 */
3792 dmap->count = cmd->cmd_cookie.dmac_size;
3793 dmap->addr.address64.Low = (uint32_t)
3794 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3795 dmap->addr.address64.High = (uint32_t)
3796 (cmd->cmd_cookie.dmac_laddress >> 32);
3797 }
3798
3799 /*
3800 * If this was partially allocated we set the resid
3801 * the amount of data NOT transferred in this window
3802 * If there is only one window, the resid will be 0
3803 */
3804 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3805 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3806 }
3807 return (pkt);
3808 }
3809
3810 /*
3811 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3812 *
3813 * Notes:
3814 * - also frees DMA resources if allocated
3815 * - implicit DMA synchonization
3816 */
3817 static void
3818 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3819 {
3820 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3821 mptsas_t *mpt = ADDR2MPT(ap);
3822
3823 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3824 ap->a_target, (void *)pkt));
3825
3826 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3827 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3828 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3829 }
3830
3831 if (cmd->cmd_sg) {
3832 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3833 cmd->cmd_sg = NULL;
3834 }
3835
3836 mptsas_free_extra_sgl_frame(mpt, cmd);
3837
3838 if ((cmd->cmd_flags &
3839 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3840 CFLAG_SCBEXTERN)) == 0) {
3841 cmd->cmd_flags = CFLAG_FREE;
3842 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3843 } else {
3844 mptsas_pkt_destroy_extern(mpt, cmd);
3845 }
3846 }
3847
3848 /*
3849 * kmem cache constructor and destructor:
3850 * When constructing, we bzero the cmd and allocate the dma handle
3851 * When destructing, just free the dma handle
3852 */
3853 static int
3854 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3855 {
3856 mptsas_cmd_t *cmd = buf;
3857 mptsas_t *mpt = cdrarg;
3858 struct scsi_address ap;
3859 uint_t cookiec;
3860 ddi_dma_attr_t arq_dma_attr;
3861 int (*callback)(caddr_t);
3862
3863 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3864
3865 NDBG4(("mptsas_kmem_cache_constructor"));
3866
3867 ap.a_hba_tran = mpt->m_tran;
3868 ap.a_target = 0;
3869 ap.a_lun = 0;
3870
3871 /*
3872 * allocate a dma handle
3873 */
3874 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3875 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3876 cmd->cmd_dmahandle = NULL;
3877 return (-1);
3878 }
3879
3880 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3881 SENSE_LENGTH, B_READ, callback, NULL);
3882 if (cmd->cmd_arq_buf == NULL) {
3883 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3884 cmd->cmd_dmahandle = NULL;
3885 return (-1);
3886 }
3887
3888 /*
3889 * allocate a arq handle
3890 */
3891 arq_dma_attr = mpt->m_msg_dma_attr;
3892 arq_dma_attr.dma_attr_sgllen = 1;
3893 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3894 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3895 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3896 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3897 cmd->cmd_dmahandle = NULL;
3898 cmd->cmd_arqhandle = NULL;
3899 return (-1);
3900 }
3901
3902 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3903 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3904 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3905 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3906 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3907 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3908 cmd->cmd_dmahandle = NULL;
3909 cmd->cmd_arqhandle = NULL;
3910 cmd->cmd_arq_buf = NULL;
3911 return (-1);
3912 }
3913
3914 return (0);
3915 }
3916
3917 static void
3918 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3919 {
3920 #ifndef __lock_lint
3921 _NOTE(ARGUNUSED(cdrarg))
3922 #endif
3923 mptsas_cmd_t *cmd = buf;
3924
3925 NDBG4(("mptsas_kmem_cache_destructor"));
3926
3927 if (cmd->cmd_arqhandle) {
3928 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3929 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3930 cmd->cmd_arqhandle = NULL;
3931 }
3932 if (cmd->cmd_arq_buf) {
3933 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3934 cmd->cmd_arq_buf = NULL;
3935 }
3936 if (cmd->cmd_dmahandle) {
3937 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3938 cmd->cmd_dmahandle = NULL;
3939 }
3940 }
3941
3942 static int
3943 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3944 {
3945 mptsas_cache_frames_t *p = buf;
3946 mptsas_t *mpt = cdrarg;
3947 ddi_dma_attr_t frame_dma_attr;
3948 size_t mem_size, alloc_len;
3949 ddi_dma_cookie_t cookie;
3950 uint_t ncookie;
3951 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3952 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3953
3954 frame_dma_attr = mpt->m_msg_dma_attr;
3955 frame_dma_attr.dma_attr_align = 0x10;
3956 frame_dma_attr.dma_attr_sgllen = 1;
3957
3958 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3959 &p->m_dma_hdl) != DDI_SUCCESS) {
3960 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3961 " extra SGL.");
3962 return (DDI_FAILURE);
3963 }
3964
3965 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3966
3967 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3968 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3969 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3970 ddi_dma_free_handle(&p->m_dma_hdl);
3971 p->m_dma_hdl = NULL;
3972 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3973 " extra SGL.");
3974 return (DDI_FAILURE);
3975 }
3976
3977 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3978 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3979 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3980 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3981 ddi_dma_free_handle(&p->m_dma_hdl);
3982 p->m_dma_hdl = NULL;
3983 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3984 " extra SGL");
3985 return (DDI_FAILURE);
3986 }
3987
3988 /*
3989 * Store the SGL memory address. This chip uses this
3990 * address to dma to and from the driver. The second
3991 * address is the address mpt uses to fill in the SGL.
3992 */
3993 p->m_phys_addr = cookie.dmac_laddress;
3994
3995 return (DDI_SUCCESS);
3996 }
3997
3998 static void
3999 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4000 {
4001 #ifndef __lock_lint
4002 _NOTE(ARGUNUSED(cdrarg))
4003 #endif
4004 mptsas_cache_frames_t *p = buf;
4005 if (p->m_dma_hdl != NULL) {
4006 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4007 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4008 ddi_dma_free_handle(&p->m_dma_hdl);
4009 p->m_phys_addr = NULL;
4010 p->m_frames_addr = NULL;
4011 p->m_dma_hdl = NULL;
4012 p->m_acc_hdl = NULL;
4013 }
4014
4015 }
4016
4017 /*
4018 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4019 * for non-standard length cdb, pkt_private, status areas
4020 * if allocation fails, then deallocate all external space and the pkt
4021 */
4022 /* ARGSUSED */
4023 static int
4024 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4025 int cmdlen, int tgtlen, int statuslen, int kf)
4026 {
4027 caddr_t cdbp, scbp, tgt;
4028 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
4029 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
4030 struct scsi_address ap;
4031 size_t senselength;
4032 ddi_dma_attr_t ext_arq_dma_attr;
4033 uint_t cookiec;
4034
4035 NDBG3(("mptsas_pkt_alloc_extern: "
4036 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4037 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4038
4039 tgt = cdbp = scbp = NULL;
4040 cmd->cmd_scblen = statuslen;
4041 cmd->cmd_privlen = (uchar_t)tgtlen;
4042
4043 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4044 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4045 goto fail;
4046 }
4047 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4048 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4049 }
4050 if (tgtlen > PKT_PRIV_LEN) {
4051 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4052 goto fail;
4053 }
4054 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4055 cmd->cmd_pkt->pkt_private = tgt;
4056 }
4057 if (statuslen > EXTCMDS_STATUS_SIZE) {
4058 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4059 goto fail;
4060 }
4061 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4062 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4063
4064 /* allocate sense data buf for DMA */
4065
4066 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4067 struct scsi_arq_status, sts_sensedata);
4068 cmd->cmd_rqslen = (uchar_t)senselength;
4069
4070 ap.a_hba_tran = mpt->m_tran;
4071 ap.a_target = 0;
4072 ap.a_lun = 0;
4073
4074 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4075 (struct buf *)NULL, senselength, B_READ,
4076 callback, NULL);
4077
4078 if (cmd->cmd_ext_arq_buf == NULL) {
4079 goto fail;
4080 }
4081 /*
4082 * allocate a extern arq handle and bind the buf
4083 */
4084 ext_arq_dma_attr = mpt->m_msg_dma_attr;
4085 ext_arq_dma_attr.dma_attr_sgllen = 1;
4086 if ((ddi_dma_alloc_handle(mpt->m_dip,
4087 &ext_arq_dma_attr, callback,
4088 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4089 goto fail;
4090 }
4091
4092 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4093 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4094 callback, NULL, &cmd->cmd_ext_arqcookie,
4095 &cookiec)
4096 != DDI_SUCCESS) {
4097 goto fail;
4098 }
4099 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4100 }
4101 return (0);
4102 fail:
4103 mptsas_pkt_destroy_extern(mpt, cmd);
4104 return (1);
4105 }
4106
4107 /*
4108 * deallocate external pkt space and deallocate the pkt
4109 */
4110 static void
4111 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4112 {
4113 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4114
4115 if (cmd->cmd_flags & CFLAG_FREE) {
4116 mptsas_log(mpt, CE_PANIC,
4117 "mptsas_pkt_destroy_extern: freeing free packet");
4118 _NOTE(NOT_REACHED)
4119 /* NOTREACHED */
4120 }
4121 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4122 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4123 }
4124 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4125 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4126 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4127 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4128 }
4129 if (cmd->cmd_ext_arqhandle) {
4130 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4131 cmd->cmd_ext_arqhandle = NULL;
4132 }
4133 if (cmd->cmd_ext_arq_buf)
4134 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4135 }
4136 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4137 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4138 }
4139 cmd->cmd_flags = CFLAG_FREE;
4140 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4141 }
4142
4143 /*
4144 * tran_sync_pkt(9E) - explicit DMA synchronization
4145 */
4146 /*ARGSUSED*/
4147 static void
4148 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4149 {
4150 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4151
4152 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4153 ap->a_target, (void *)pkt));
4154
4155 if (cmd->cmd_dmahandle) {
4156 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4157 (cmd->cmd_flags & CFLAG_DMASEND) ?
4158 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4159 }
4160 }
4161
4162 /*
4163 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4164 */
4165 /*ARGSUSED*/
4166 static void
4167 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4168 {
4169 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4170 mptsas_t *mpt = ADDR2MPT(ap);
4171
4172 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4173 ap->a_target, (void *)pkt));
4174
4175 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4176 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4177 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4178 }
4179
4180 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4181 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4182 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4183 }
4184
4185 mptsas_free_extra_sgl_frame(mpt, cmd);
4186 }
4187
4188 static void
4189 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4190 {
4191 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4192 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4193 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4194 DDI_DMA_SYNC_FORCPU);
4195 }
4196 (*pkt->pkt_comp)(pkt);
4197 }
4198
4199 static void
4200 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4201 ddi_acc_handle_t acc_hdl, uint_t cookiec,
4202 uint32_t end_flags)
4203 {
4204 pMpi2SGESimple64_t sge;
4205 mptti_t *dmap;
4206 uint32_t flags;
4207
4208 dmap = cmd->cmd_sg;
4209
4210 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4211 while (cookiec--) {
4212 ddi_put32(acc_hdl, &sge->Address.Low,
4213 dmap->addr.address64.Low);
4214 ddi_put32(acc_hdl, &sge->Address.High,
4215 dmap->addr.address64.High);
4216 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4217 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4218 flags |= ((uint32_t)
4219 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4220 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4221 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4222 MPI2_SGE_FLAGS_SHIFT);
4223
4224 /*
4225 * If this is the last cookie, we set the flags
4226 * to indicate so
4227 */
4228 if (cookiec == 0) {
4229 flags |= end_flags;
4230 }
4231 if (cmd->cmd_flags & CFLAG_DMASEND) {
4232 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4233 MPI2_SGE_FLAGS_SHIFT);
4234 } else {
4235 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4236 MPI2_SGE_FLAGS_SHIFT);
4237 }
4238 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4239 dmap++;
4240 sge++;
4241 }
4242 }
4243
4244 static void
4245 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4246 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4247 {
4248 pMpi2SGESimple64_t sge;
4249 pMpi2SGEChain64_t sgechain;
4250 uint64_t nframe_phys_addr;
4251 uint_t cookiec;
4252 mptti_t *dmap;
4253 uint32_t flags;
4254 int i, j, k, l, frames, sgemax;
4255 int temp, maxframe_sges;
4256 uint8_t chainflags;
4257 uint16_t chainlength;
4258 mptsas_cache_frames_t *p;
4259
4260 cookiec = cmd->cmd_cookiec;
4261
4262 /*
4263 * Hereby we start to deal with multiple frames.
4264 * The process is as follows:
4265 * 1. Determine how many frames are needed for SGL element
4266 * storage; Note that all frames are stored in contiguous
4267 * memory space and in 64-bit DMA mode each element is
4268 * 3 double-words (12 bytes) long.
4269 * 2. Fill up the main frame. We need to do this separately
4270 * since it contains the SCSI IO request header and needs
4271 * dedicated processing. Note that the last 4 double-words
4272 * of the SCSI IO header is for SGL element storage
4273 * (MPI2_SGE_IO_UNION).
4274 * 3. Fill the chain element in the main frame, so the DMA
4275 * engine can use the following frames.
4276 * 4. Enter a loop to fill the remaining frames. Note that the
4277 * last frame contains no chain element. The remaining
4278 * frames go into the mpt SGL buffer allocated on the fly,
4279 * not immediately following the main message frame, as in
4280 * Gen1.
4281 * Some restrictions:
4282 * 1. For 64-bit DMA, the simple element and chain element
4283 * are both of 3 double-words (12 bytes) in size, even
4284 * though all frames are stored in the first 4G of mem
4285 * range and the higher 32-bits of the address are always 0.
4286 * 2. On some controllers (like the 1064/1068), a frame can
4287 * hold SGL elements with the last 1 or 2 double-words
4288 * (4 or 8 bytes) un-used. On these controllers, we should
4289 * recognize that there's not enough room for another SGL
4290 * element and move the sge pointer to the next frame.
4291 */
4292
4293 /*
4294 * Sgemax is the number of SGE's that will fit
4295 * each extra frame and frames is total
4296 * number of frames we'll need. 1 sge entry per
4297 * frame is reseverd for the chain element thus the -1 below.
4298 */
4299 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64)) - 1);
4300 maxframe_sges = MPTSAS_MAX_FRAME_SGES64(mpt);
4301 temp = (cookiec - (maxframe_sges - 1)) / sgemax;
4302
4303 /*
4304 * A little check to see if we need to round up the number
4305 * of frames we need
4306 */
4307 if ((cookiec - (maxframe_sges - 1)) - (temp * sgemax) > 1) {
4308 frames = (temp + 1);
4309 } else {
4310 frames = temp;
4311 }
4312 dmap = cmd->cmd_sg;
4313 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4314
4315 /*
4316 * First fill in the main frame
4317 */
4318 j = maxframe_sges - 1;
4319 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4320 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4321 MPI2_SGE_FLAGS_SHIFT));
4322 dmap += j;
4323 sge += j;
4324 j++;
4325
4326 /*
4327 * Fill in the chain element in the main frame.
4328 * About calculation on ChainOffset:
4329 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4330 * in the end reserved for SGL element storage
4331 * (MPI2_SGE_IO_UNION); we should count it in our
4332 * calculation. See its definition in the header file.
4333 * 2. Constant j is the counter of the current SGL element
4334 * that will be processed, and (j - 1) is the number of
4335 * SGL elements that have been processed (stored in the
4336 * main frame).
4337 * 3. ChainOffset value should be in units of double-words (4
4338 * bytes) so the last value should be divided by 4.
4339 */
4340 ddi_put8(acc_hdl, &frame->ChainOffset,
4341 (sizeof (MPI2_SCSI_IO_REQUEST) -
4342 sizeof (MPI2_SGE_IO_UNION) +
4343 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4344 sgechain = (pMpi2SGEChain64_t)sge;
4345 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4346 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4347 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4348 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4349
4350 /*
4351 * The size of the next frame is the accurate size of space
4352 * (in bytes) used to store the SGL elements. j is the counter
4353 * of SGL elements. (j - 1) is the number of SGL elements that
4354 * have been processed (stored in frames).
4355 */
4356 if (frames >= 2) {
4357 chainlength = mpt->m_req_frame_size /
4358 sizeof (MPI2_SGE_SIMPLE64) *
4359 sizeof (MPI2_SGE_SIMPLE64);
4360 } else {
4361 chainlength = ((cookiec - (j - 1)) *
4362 sizeof (MPI2_SGE_SIMPLE64));
4363 }
4364
4365 p = cmd->cmd_extra_frames;
4366
4367 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4368 ddi_put32(acc_hdl, &sgechain->Address.Low,
4369 (p->m_phys_addr&0xffffffffull));
4370 ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr>>32);
4371
4372 /*
4373 * If there are more than 2 frames left we have to
4374 * fill in the next chain offset to the location of
4375 * the chain element in the next frame.
4376 * sgemax is the number of simple elements in an extra
4377 * frame. Note that the value NextChainOffset should be
4378 * in double-words (4 bytes).
4379 */
4380 if (frames >= 2) {
4381 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4382 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4383 } else {
4384 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4385 }
4386
4387 /*
4388 * Jump to next frame;
4389 * Starting here, chain buffers go into the per command SGL.
4390 * This buffer is allocated when chain buffers are needed.
4391 */
4392 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4393 i = cookiec;
4394
4395 /*
4396 * Start filling in frames with SGE's. If we
4397 * reach the end of frame and still have SGE's
4398 * to fill we need to add a chain element and
4399 * use another frame. j will be our counter
4400 * for what cookie we are at and i will be
4401 * the total cookiec. k is the current frame
4402 */
4403 for (k = 1; k <= frames; k++) {
4404 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4405
4406 /*
4407 * If we have reached the end of frame
4408 * and we have more SGE's to fill in
4409 * we have to fill the final entry
4410 * with a chain element and then
4411 * continue to the next frame
4412 */
4413 if ((l == (sgemax + 1)) && (k != frames)) {
4414 sgechain = (pMpi2SGEChain64_t)sge;
4415 j--;
4416 chainflags = (
4417 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4418 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4419 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4420 ddi_put8(p->m_acc_hdl,
4421 &sgechain->Flags, chainflags);
4422 /*
4423 * k is the frame counter and (k + 1)
4424 * is the number of the next frame.
4425 * Note that frames are in contiguous
4426 * memory space.
4427 */
4428 nframe_phys_addr = p->m_phys_addr +
4429 (mpt->m_req_frame_size * k);
4430 ddi_put32(p->m_acc_hdl,
4431 &sgechain->Address.Low,
4432 nframe_phys_addr&0xffffffffull);
4433 ddi_put32(p->m_acc_hdl,
4434 &sgechain->Address.High,
4435 nframe_phys_addr>>32);
4436
4437 /*
4438 * If there are more than 2 frames left
4439 * we have to next chain offset to
4440 * the location of the chain element
4441 * in the next frame and fill in the
4442 * length of the next chain
4443 */
4444 if ((frames - k) >= 2) {
4445 ddi_put8(p->m_acc_hdl,
4446 &sgechain->NextChainOffset,
4447 (sgemax *
4448 sizeof (MPI2_SGE_SIMPLE64))
4449 >> 2);
4450 ddi_put16(p->m_acc_hdl,
4451 &sgechain->Length,
4452 mpt->m_req_frame_size /
4453 sizeof (MPI2_SGE_SIMPLE64) *
4454 sizeof (MPI2_SGE_SIMPLE64));
4455 } else {
4456 /*
4457 * This is the last frame. Set
4458 * the NextChainOffset to 0 and
4459 * Length is the total size of
4460 * all remaining simple elements
4461 */
4462 ddi_put8(p->m_acc_hdl,
4463 &sgechain->NextChainOffset,
4464 0);
4465 ddi_put16(p->m_acc_hdl,
4466 &sgechain->Length,
4467 (cookiec - j) *
4468 sizeof (MPI2_SGE_SIMPLE64));
4469 }
4470
4471 /* Jump to the next frame */
4472 sge = (pMpi2SGESimple64_t)
4473 ((char *)p->m_frames_addr +
4474 (int)mpt->m_req_frame_size * k);
4475
4476 continue;
4477 }
4478
4479 ddi_put32(p->m_acc_hdl,
4480 &sge->Address.Low,
4481 dmap->addr.address64.Low);
4482 ddi_put32(p->m_acc_hdl,
4483 &sge->Address.High,
4484 dmap->addr.address64.High);
4485 ddi_put32(p->m_acc_hdl,
4486 &sge->FlagsLength, dmap->count);
4487 flags = ddi_get32(p->m_acc_hdl,
4488 &sge->FlagsLength);
4489 flags |= ((uint32_t)(
4490 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4491 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4492 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4493 MPI2_SGE_FLAGS_SHIFT);
4494
4495 /*
4496 * If we are at the end of the frame and
4497 * there is another frame to fill in
4498 * we set the last simple element as last
4499 * element
4500 */
4501 if ((l == sgemax) && (k != frames)) {
4502 flags |= ((uint32_t)
4503 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4504 MPI2_SGE_FLAGS_SHIFT);
4505 }
4506
4507 /*
4508 * If this is the final cookie we
4509 * indicate it by setting the flags
4510 */
4511 if (j == i) {
4512 flags |= ((uint32_t)
4513 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4514 MPI2_SGE_FLAGS_END_OF_BUFFER |
4515 MPI2_SGE_FLAGS_END_OF_LIST) <<
4516 MPI2_SGE_FLAGS_SHIFT);
4517 }
4518 if (cmd->cmd_flags & CFLAG_DMASEND) {
4519 flags |=
4520 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4521 MPI2_SGE_FLAGS_SHIFT);
4522 } else {
4523 flags |=
4524 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4525 MPI2_SGE_FLAGS_SHIFT);
4526 }
4527 ddi_put32(p->m_acc_hdl,
4528 &sge->FlagsLength, flags);
4529 dmap++;
4530 sge++;
4531 }
4532 }
4533
4534 /*
4535 * Sync DMA with the chain buffers that were just created
4536 */
4537 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4538 }
4539
4540 static void
4541 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4542 ddi_acc_handle_t acc_hdl, uint_t cookiec,
4543 uint8_t end_flag)
4544 {
4545 pMpi2IeeeSgeSimple64_t ieeesge;
4546 mptti_t *dmap;
4547 uint8_t flags;
4548
4549 dmap = cmd->cmd_sg;
4550
4551 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4552 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4553
4554 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4555 while (cookiec--) {
4556 ddi_put32(acc_hdl, &ieeesge->Address.Low,
4557 dmap->addr.address64.Low);
4558 ddi_put32(acc_hdl, &ieeesge->Address.High,
4559 dmap->addr.address64.High);
4560 ddi_put32(acc_hdl, &ieeesge->Length, dmap->count);
4561 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4562 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4563 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4564
4565 /*
4566 * If this is the last cookie, we set the flags
4567 * to indicate so
4568 */
4569 if (cookiec == 0) {
4570 flags |= end_flag;
4571 }
4572
4573 /*
4574 * XXX: Hmmm, what about the direction based on
4575 * cmd->cmd_flags & CFLAG_DMASEND?
4576 */
4577 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4578 dmap++;
4579 ieeesge++;
4580 }
4581 }
4582
4583 static void
4584 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4585 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4586 {
4587 pMpi2IeeeSgeSimple64_t ieeesge;
4588 pMpi25IeeeSgeChain64_t ieeesgechain;
4589 uint64_t nframe_phys_addr;
4590 uint_t cookiec;
4591 mptti_t *dmap;
4592 uint8_t flags;
4593 int i, j, k, l, frames, sgemax;
4594 int temp, maxframe_sges;
4595 uint8_t chainflags;
4596 uint32_t chainlength;
4597 mptsas_cache_frames_t *p;
4598
4599 cookiec = cmd->cmd_cookiec;
4600
4601 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4602
4603 /*
4604 * Hereby we start to deal with multiple frames.
4605 * The process is as follows:
4606 * 1. Determine how many frames are needed for SGL element
4607 * storage; Note that all frames are stored in contiguous
4608 * memory space and in 64-bit DMA mode each element is
4609 * 4 double-words (16 bytes) long.
4610 * 2. Fill up the main frame. We need to do this separately
4611 * since it contains the SCSI IO request header and needs
4612 * dedicated processing. Note that the last 4 double-words
4613 * of the SCSI IO header is for SGL element storage
4614 * (MPI2_SGE_IO_UNION).
4615 * 3. Fill the chain element in the main frame, so the DMA
4616 * engine can use the following frames.
4617 * 4. Enter a loop to fill the remaining frames. Note that the
4618 * last frame contains no chain element. The remaining
4619 * frames go into the mpt SGL buffer allocated on the fly,
4620 * not immediately following the main message frame, as in
4621 * Gen1.
4622 * Some restrictions:
4623 * 1. For 64-bit DMA, the simple element and chain element
4624 * are both of 4 double-words (16 bytes) in size, even
4625 * though all frames are stored in the first 4G of mem
4626 * range and the higher 32-bits of the address are always 0.
4627 * 2. On some controllers (like the 1064/1068), a frame can
4628 * hold SGL elements with the last 1 or 2 double-words
4629 * (4 or 8 bytes) un-used. On these controllers, we should
4630 * recognize that there's not enough room for another SGL
4631 * element and move the sge pointer to the next frame.
4632 */
4633
4634 /*
4635 * Sgemax is the number of SGE's that will fit
4636 * each extra frame and frames is total
4637 * number of frames we'll need. 1 sge entry per
4638 * frame is reseverd for the chain element thus the -1 below.
4639 */
4640 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4641 - 1);
4642 maxframe_sges = MPTSAS_MAX_FRAME_SGES64(mpt);
4643 temp = (cookiec - (maxframe_sges - 1)) / sgemax;
4644
4645 /*
4646 * A little check to see if we need to round up the number
4647 * of frames we need
4648 */
4649 if ((cookiec - (maxframe_sges - 1)) - (temp * sgemax) > 1) {
4650 frames = (temp + 1);
4651 } else {
4652 frames = temp;
4653 }
4654 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4655 dmap = cmd->cmd_sg;
4656 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4657
4658 /*
4659 * First fill in the main frame
4660 */
4661 j = maxframe_sges - 1;
4662 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4663 dmap += j;
4664 ieeesge += j;
4665 j++;
4666
4667 /*
4668 * Fill in the chain element in the main frame.
4669 * About calculation on ChainOffset:
4670 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4671 * in the end reserved for SGL element storage
4672 * (MPI2_SGE_IO_UNION); we should count it in our
4673 * calculation. See its definition in the header file.
4674 * 2. Constant j is the counter of the current SGL element
4675 * that will be processed, and (j - 1) is the number of
4676 * SGL elements that have been processed (stored in the
4677 * main frame).
4678 * 3. ChainOffset value should be in units of quad-words (16
4679 * bytes) so the last value should be divided by 16.
4680 */
4681 ddi_put8(acc_hdl, &frame->ChainOffset,
4682 (sizeof (MPI2_SCSI_IO_REQUEST) -
4683 sizeof (MPI2_SGE_IO_UNION) +
4684 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4685 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4686 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4687 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4688 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4689
4690 /*
4691 * The size of the next frame is the accurate size of space
4692 * (in bytes) used to store the SGL elements. j is the counter
4693 * of SGL elements. (j - 1) is the number of SGL elements that
4694 * have been processed (stored in frames).
4695 */
4696 if (frames >= 2) {
4697 chainlength = mpt->m_req_frame_size /
4698 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4699 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4700 } else {
4701 chainlength = ((cookiec - (j - 1)) *
4702 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4703 }
4704
4705 p = cmd->cmd_extra_frames;
4706
4707 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4708 ddi_put32(acc_hdl, &ieeesgechain->Address.Low,
4709 p->m_phys_addr&0xffffffffull);
4710 ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr>>32);
4711
4712 /*
4713 * If there are more than 2 frames left we have to
4714 * fill in the next chain offset to the location of
4715 * the chain element in the next frame.
4716 * sgemax is the number of simple elements in an extra
4717 * frame. Note that the value NextChainOffset should be
4718 * in double-words (4 bytes).
4719 */
4720 if (frames >= 2) {
4721 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4722 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4723 } else {
4724 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4725 }
4726
4727 /*
4728 * Jump to next frame;
4729 * Starting here, chain buffers go into the per command SGL.
4730 * This buffer is allocated when chain buffers are needed.
4731 */
4732 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4733 i = cookiec;
4734
4735 /*
4736 * Start filling in frames with SGE's. If we
4737 * reach the end of frame and still have SGE's
4738 * to fill we need to add a chain element and
4739 * use another frame. j will be our counter
4740 * for what cookie we are at and i will be
4741 * the total cookiec. k is the current frame
4742 */
4743 for (k = 1; k <= frames; k++) {
4744 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4745
4746 /*
4747 * If we have reached the end of frame
4748 * and we have more SGE's to fill in
4749 * we have to fill the final entry
4750 * with a chain element and then
4751 * continue to the next frame
4752 */
4753 if ((l == (sgemax + 1)) && (k != frames)) {
4754 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4755 j--;
4756 chainflags =
4757 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4758 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4759 ddi_put8(p->m_acc_hdl,
4760 &ieeesgechain->Flags, chainflags);
4761 /*
4762 * k is the frame counter and (k + 1)
4763 * is the number of the next frame.
4764 * Note that frames are in contiguous
4765 * memory space.
4766 */
4767 nframe_phys_addr = p->m_phys_addr +
4768 (mpt->m_req_frame_size * k);
4769 ddi_put32(p->m_acc_hdl,
4770 &ieeesgechain->Address.Low,
4771 nframe_phys_addr&0xffffffffull);
4772 ddi_put32(p->m_acc_hdl,
4773 &ieeesgechain->Address.High,
4774 nframe_phys_addr>>32);
4775
4776 /*
4777 * If there are more than 2 frames left
4778 * we have to next chain offset to
4779 * the location of the chain element
4780 * in the next frame and fill in the
4781 * length of the next chain
4782 */
4783 if ((frames - k) >= 2) {
4784 ddi_put8(p->m_acc_hdl,
4785 &ieeesgechain->NextChainOffset,
4786 (sgemax *
4787 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4788 >> 4);
4789 ddi_put32(p->m_acc_hdl,
4790 &ieeesgechain->Length,
4791 mpt->m_req_frame_size /
4792 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4793 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4794 } else {
4795 /*
4796 * This is the last frame. Set
4797 * the NextChainOffset to 0 and
4798 * Length is the total size of
4799 * all remaining simple elements
4800 */
4801 ddi_put8(p->m_acc_hdl,
4802 &ieeesgechain->NextChainOffset,
4803 0);
4804 ddi_put32(p->m_acc_hdl,
4805 &ieeesgechain->Length,
4806 (cookiec - j) *
4807 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4808 }
4809
4810 /* Jump to the next frame */
4811 ieeesge = (pMpi2IeeeSgeSimple64_t)
4812 ((char *)p->m_frames_addr +
4813 (int)mpt->m_req_frame_size * k);
4814
4815 continue;
4816 }
4817
4818 ddi_put32(p->m_acc_hdl,
4819 &ieeesge->Address.Low,
4820 dmap->addr.address64.Low);
4821 ddi_put32(p->m_acc_hdl,
4822 &ieeesge->Address.High,
4823 dmap->addr.address64.High);
4824 ddi_put32(p->m_acc_hdl,
4825 &ieeesge->Length, dmap->count);
4826 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4827 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4828
4829 /*
4830 * If we are at the end of the frame and
4831 * there is another frame to fill in
4832 * do we need to do anything?
4833 * if ((l == sgemax) && (k != frames)) {
4834 * }
4835 */
4836
4837 /*
4838 * If this is the final cookie set end of list.
4839 */
4840 if (j == i) {
4841 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
4842 }
4843
4844 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
4845 dmap++;
4846 ieeesge++;
4847 }
4848 }
4849
4850 /*
4851 * Sync DMA with the chain buffers that were just created
4852 */
4853 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4854 }
4855
4856 static void
4857 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4858 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4859 {
4860 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4861
4862 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
4863
4864 /*
4865 * Set read/write bit in control.
4866 */
4867 if (cmd->cmd_flags & CFLAG_DMASEND) {
4868 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4869 } else {
4870 *control |= MPI2_SCSIIO_CONTROL_READ;
4871 }
4872
4873 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4874
4875 /*
4876 * We have 4 cases here. First where we can fit all the
4877 * SG elements into the main frame, and the case
4878 * where we can't. The SG element is also different when using
4879 * MPI2.5 interface.
4880 * If we have more cookies than we can attach to a frame
4881 * we will need to use a chain element to point
4882 * a location of memory where the rest of the S/G
4883 * elements reside.
4884 */
4885 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4886 if (mpt->m_MPI25) {
4887 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
4888 cmd->cmd_cookiec,
4889 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
4890 } else {
4891 mptsas_sge_mainframe(cmd, frame, acc_hdl,
4892 cmd->cmd_cookiec,
4893 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4894 | MPI2_SGE_FLAGS_END_OF_BUFFER
4895 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4896 MPI2_SGE_FLAGS_SHIFT));
4897 }
4898 } else {
4899 if (mpt->m_MPI25) {
4900 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
4901 } else {
4902 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
4903 }
4904 }
4905 }
4906
4907 /*
4908 * Interrupt handling
4909 * Utility routine. Poll for status of a command sent to HBA
4910 * without interrupts (a FLAG_NOINTR command).
4911 */
4912 int
4913 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4914 {
4915 int rval = TRUE;
4916
4917 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4918
4919 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4920 mptsas_restart_hba(mpt);
4921 }
4922
4923 /*
4924 * Wait, using drv_usecwait(), long enough for the command to
4925 * reasonably return from the target if the target isn't
4926 * "dead". A polled command may well be sent from scsi_poll, and
4927 * there are retries built in to scsi_poll if the transport
4928 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4929 * and retries the transport up to scsi_poll_busycnt times
4930 * (currently 60) if
4931 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4932 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4933 *
4934 * limit the waiting to avoid a hang in the event that the
4935 * cmd never gets started but we are still receiving interrupts
4936 */
4937 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4938 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4939 NDBG5(("mptsas_poll: command incomplete"));
4940 rval = FALSE;
4941 break;
4942 }
4943 }
4944
4945 if (rval == FALSE) {
4946
4947 /*
4948 * this isn't supposed to happen, the hba must be wedged
4949 * Mark this cmd as a timeout.
4950 */
4951 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4952 (STAT_TIMEOUT|STAT_ABORTED));
4953
4954 if (poll_cmd->cmd_queued == FALSE) {
4955
4956 NDBG5(("mptsas_poll: not on waitq"));
4957
4958 poll_cmd->cmd_pkt->pkt_state |=
4959 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4960 } else {
4961
4962 /* find and remove it from the waitq */
4963 NDBG5(("mptsas_poll: delete from waitq"));
4964 mptsas_waitq_delete(mpt, poll_cmd);
4965 }
4966
4967 }
4968 mptsas_fma_check(mpt, poll_cmd);
4969 NDBG5(("mptsas_poll: done"));
4970 return (rval);
4971 }
4972
4973 /*
4974 * Used for polling cmds and TM function
4975 */
4976 static int
4977 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4978 {
4979 int cnt;
4980 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4981 uint32_t int_mask;
4982
4983 NDBG5(("mptsas_wait_intr"));
4984
4985 mpt->m_polled_intr = 1;
4986
4987 /*
4988 * Get the current interrupt mask and disable interrupts. When
4989 * re-enabling ints, set mask to saved value.
4990 */
4991 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4992 MPTSAS_DISABLE_INTR(mpt);
4993
4994 /*
4995 * Keep polling for at least (polltime * 1000) seconds
4996 */
4997 for (cnt = 0; cnt < polltime; cnt++) {
4998 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4999 DDI_DMA_SYNC_FORCPU);
5000
5001 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5002 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5003
5004 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5005 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5006 ddi_get32(mpt->m_acc_post_queue_hdl,
5007 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5008 drv_usecwait(1000);
5009 continue;
5010 }
5011
5012 /*
5013 * The reply is valid, process it according to its
5014 * type.
5015 */
5016 mptsas_process_intr(mpt, reply_desc_union);
5017
5018 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5019 mpt->m_post_index = 0;
5020 }
5021
5022 /*
5023 * Update the global reply index
5024 */
5025 ddi_put32(mpt->m_datap,
5026 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5027 mpt->m_polled_intr = 0;
5028
5029 /*
5030 * Re-enable interrupts and quit.
5031 */
5032 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5033 int_mask);
5034 return (TRUE);
5035
5036 }
5037
5038 /*
5039 * Clear polling flag, re-enable interrupts and quit.
5040 */
5041 mpt->m_polled_intr = 0;
5042 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5043 return (FALSE);
5044 }
5045
5046 static void
5047 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5048 pMpi2ReplyDescriptorsUnion_t reply_desc)
5049 {
5050 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5051 uint16_t SMID;
5052 mptsas_slots_t *slots = mpt->m_active;
5053 mptsas_cmd_t *cmd = NULL;
5054 struct scsi_pkt *pkt;
5055
5056 ASSERT(mutex_owned(&mpt->m_mutex));
5057
5058 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5059 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5060
5061 /*
5062 * This is a success reply so just complete the IO. First, do a sanity
5063 * check on the SMID. The final slot is used for TM requests, which
5064 * would not come into this reply handler.
5065 */
5066 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5067 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5068 SMID);
5069 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5070 return;
5071 }
5072
5073 cmd = slots->m_slot[SMID];
5074
5075 /*
5076 * print warning and return if the slot is empty
5077 */
5078 if (cmd == NULL) {
5079 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5080 "in slot %d", SMID);
5081 return;
5082 }
5083
5084 pkt = CMD2PKT(cmd);
5085 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5086 STATE_GOT_STATUS);
5087 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5088 pkt->pkt_state |= STATE_XFERRED_DATA;
5089 }
5090 pkt->pkt_resid = 0;
5091
5092 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5093 cmd->cmd_flags |= CFLAG_FINISHED;
5094 cv_broadcast(&mpt->m_passthru_cv);
5095 return;
5096 } else {
5097 mptsas_remove_cmd(mpt, cmd);
5098 }
5099
5100 if (cmd->cmd_flags & CFLAG_RETRY) {
5101 /*
5102 * The target returned QFULL or busy, do not add tihs
5103 * pkt to the doneq since the hba will retry
5104 * this cmd.
5105 *
5106 * The pkt has already been resubmitted in
5107 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5108 * Remove this cmd_flag here.
5109 */
5110 cmd->cmd_flags &= ~CFLAG_RETRY;
5111 } else {
5112 mptsas_doneq_add(mpt, cmd);
5113 }
5114 }
5115
5116 static void
5117 mptsas_handle_address_reply(mptsas_t *mpt,
5118 pMpi2ReplyDescriptorsUnion_t reply_desc)
5119 {
5120 pMpi2AddressReplyDescriptor_t address_reply;
5121 pMPI2DefaultReply_t reply;
5122 mptsas_fw_diagnostic_buffer_t *pBuffer;
5123 uint32_t reply_addr;
5124 uint16_t SMID, iocstatus;
5125 mptsas_slots_t *slots = mpt->m_active;
5126 mptsas_cmd_t *cmd = NULL;
5127 uint8_t function, buffer_type;
5128 m_replyh_arg_t *args;
5129 int reply_frame_no;
5130
5131 ASSERT(mutex_owned(&mpt->m_mutex));
5132
5133 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5134 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5135 &address_reply->ReplyFrameAddress);
5136 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5137
5138 /*
5139 * If reply frame is not in the proper range we should ignore this
5140 * message and exit the interrupt handler.
5141 */
5142 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
5143 (reply_addr >= (mpt->m_reply_frame_dma_addr +
5144 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5145 ((reply_addr - mpt->m_reply_frame_dma_addr) %
5146 mpt->m_reply_frame_size != 0)) {
5147 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5148 "address 0x%x\n", reply_addr);
5149 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5150 return;
5151 }
5152
5153 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5154 DDI_DMA_SYNC_FORCPU);
5155 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5156 mpt->m_reply_frame_dma_addr));
5157 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5158
5159 /*
5160 * don't get slot information and command for events since these values
5161 * don't exist
5162 */
5163 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5164 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5165 /*
5166 * This could be a TM reply, which use the last allocated SMID,
5167 * so allow for that.
5168 */
5169 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5170 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5171 "%d\n", SMID);
5172 ddi_fm_service_impact(mpt->m_dip,
5173 DDI_SERVICE_UNAFFECTED);
5174 return;
5175 }
5176
5177 cmd = slots->m_slot[SMID];
5178
5179 /*
5180 * print warning and return if the slot is empty
5181 */
5182 if (cmd == NULL) {
5183 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5184 "reply in slot %d", SMID);
5185 return;
5186 }
5187 if ((cmd->cmd_flags &
5188 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5189 cmd->cmd_rfm = reply_addr;
5190 cmd->cmd_flags |= CFLAG_FINISHED;
5191 cv_broadcast(&mpt->m_passthru_cv);
5192 cv_broadcast(&mpt->m_config_cv);
5193 cv_broadcast(&mpt->m_fw_diag_cv);
5194 return;
5195 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5196 mptsas_remove_cmd(mpt, cmd);
5197 }
5198 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5199 }
5200 /*
5201 * Depending on the function, we need to handle
5202 * the reply frame (and cmd) differently.
5203 */
5204 switch (function) {
5205 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5206 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5207 break;
5208 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5209 cmd->cmd_rfm = reply_addr;
5210 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5211 cmd);
5212 break;
5213 case MPI2_FUNCTION_FW_DOWNLOAD:
5214 cmd->cmd_flags |= CFLAG_FINISHED;
5215 cv_signal(&mpt->m_fw_cv);
5216 break;
5217 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5218 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
5219 mpt->m_reply_frame_size;
5220 args = &mpt->m_replyh_args[reply_frame_no];
5221 args->mpt = (void *)mpt;
5222 args->rfm = reply_addr;
5223
5224 /*
5225 * Record the event if its type is enabled in
5226 * this mpt instance by ioctl.
5227 */
5228 mptsas_record_event(args);
5229
5230 /*
5231 * Handle time critical events
5232 * NOT_RESPONDING/ADDED only now
5233 */
5234 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5235 /*
5236 * Would not return main process,
5237 * just let taskq resolve ack action
5238 * and ack would be sent in taskq thread
5239 */
5240 NDBG20(("send mptsas_handle_event_sync success"));
5241 }
5242
5243 if (mpt->m_in_reset) {
5244 NDBG20(("dropping event received during reset"));
5245 return;
5246 }
5247
5248 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5249 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5250 mptsas_log(mpt, CE_WARN, "No memory available"
5251 "for dispatch taskq");
5252 /*
5253 * Return the reply frame to the free queue.
5254 */
5255 ddi_put32(mpt->m_acc_free_queue_hdl,
5256 &((uint32_t *)(void *)
5257 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5258 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5259 DDI_DMA_SYNC_FORDEV);
5260 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5261 mpt->m_free_index = 0;
5262 }
5263
5264 ddi_put32(mpt->m_datap,
5265 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5266 }
5267 return;
5268 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5269 /*
5270 * If SMID is 0, this implies that the reply is due to a
5271 * release function with a status that the buffer has been
5272 * released. Set the buffer flags accordingly.
5273 */
5274 if (SMID == 0) {
5275 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5276 &reply->IOCStatus);
5277 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5278 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5279 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5280 pBuffer =
5281 &mpt->m_fw_diag_buffer_list[buffer_type];
5282 pBuffer->valid_data = TRUE;
5283 pBuffer->owned_by_firmware = FALSE;
5284 pBuffer->immediate = FALSE;
5285 }
5286 } else {
5287 /*
5288 * Normal handling of diag post reply with SMID.
5289 */
5290 cmd = slots->m_slot[SMID];
5291
5292 /*
5293 * print warning and return if the slot is empty
5294 */
5295 if (cmd == NULL) {
5296 mptsas_log(mpt, CE_WARN, "?NULL command for "
5297 "address reply in slot %d", SMID);
5298 return;
5299 }
5300 cmd->cmd_rfm = reply_addr;
5301 cmd->cmd_flags |= CFLAG_FINISHED;
5302 cv_broadcast(&mpt->m_fw_diag_cv);
5303 }
5304 return;
5305 default:
5306 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5307 break;
5308 }
5309
5310 /*
5311 * Return the reply frame to the free queue.
5312 */
5313 ddi_put32(mpt->m_acc_free_queue_hdl,
5314 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5315 reply_addr);
5316 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5317 DDI_DMA_SYNC_FORDEV);
5318 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5319 mpt->m_free_index = 0;
5320 }
5321 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5322 mpt->m_free_index);
5323
5324 if (cmd->cmd_flags & CFLAG_FW_CMD)
5325 return;
5326
5327 if (cmd->cmd_flags & CFLAG_RETRY) {
5328 /*
5329 * The target returned QFULL or busy, do not add tihs
5330 * pkt to the doneq since the hba will retry
5331 * this cmd.
5332 *
5333 * The pkt has already been resubmitted in
5334 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5335 * Remove this cmd_flag here.
5336 */
5337 cmd->cmd_flags &= ~CFLAG_RETRY;
5338 } else {
5339 mptsas_doneq_add(mpt, cmd);
5340 }
5341 }
5342
5343 static void
5344 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5345 mptsas_cmd_t *cmd)
5346 {
5347 uint8_t scsi_status, scsi_state;
5348 uint16_t ioc_status;
5349 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5350 struct scsi_pkt *pkt;
5351 struct scsi_arq_status *arqstat;
5352 struct buf *bp;
5353 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5354 uint8_t *sensedata = NULL;
5355 uint64_t sas_wwn;
5356 uint8_t phy;
5357 char wwn_str[MPTSAS_WWN_STRLEN];
5358
5359 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5360 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5361 bp = cmd->cmd_ext_arq_buf;
5362 } else {
5363 bp = cmd->cmd_arq_buf;
5364 }
5365
5366 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5367 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5368 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5369 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5370 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5371 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5372 &reply->ResponseInfo);
5373
5374 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5375 sas_wwn = ptgt->m_addr.mta_wwn;
5376 phy = ptgt->m_phynum;
5377 if (sas_wwn == 0) {
5378 (void) sprintf(wwn_str, "p%x", phy);
5379 } else {
5380 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5381 }
5382 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5383 &reply->IOCLogInfo);
5384 mptsas_log(mpt, CE_NOTE,
5385 "?Log info 0x%x received for target %d %s.\n"
5386 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5387 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5388 scsi_state);
5389 }
5390
5391 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5392 scsi_status, ioc_status, scsi_state));
5393
5394 pkt = CMD2PKT(cmd);
5395 *(pkt->pkt_scbp) = scsi_status;
5396
5397 if (loginfo == 0x31170000) {
5398 /*
5399 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5400 * 0x31170000 comes, that means the device missing delay
5401 * is in progressing, the command need retry later.
5402 */
5403 *(pkt->pkt_scbp) = STATUS_BUSY;
5404 return;
5405 }
5406
5407 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5408 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5409 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5410 pkt->pkt_reason = CMD_INCOMPLETE;
5411 pkt->pkt_state |= STATE_GOT_BUS;
5412 if (ptgt->m_reset_delay == 0) {
5413 mptsas_set_throttle(mpt, ptgt,
5414 DRAIN_THROTTLE);
5415 }
5416 return;
5417 }
5418
5419 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5420 responsedata &= 0x000000FF;
5421 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5422 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5423 pkt->pkt_reason = CMD_TLR_OFF;
5424 return;
5425 }
5426 }
5427
5428
5429 switch (scsi_status) {
5430 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5431 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5432 arqstat = (void*)(pkt->pkt_scbp);
5433 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5434 (pkt->pkt_scbp));
5435 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5436 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5437 if (cmd->cmd_flags & CFLAG_XARQ) {
5438 pkt->pkt_state |= STATE_XARQ_DONE;
5439 }
5440 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5441 pkt->pkt_state |= STATE_XFERRED_DATA;
5442 }
5443 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5444 arqstat->sts_rqpkt_state = pkt->pkt_state;
5445 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5446 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5447 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5448
5449 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5450 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5451 cmd->cmd_rqslen));
5452 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5453 cmd->cmd_flags |= CFLAG_CMDARQ;
5454 /*
5455 * Set proper status for pkt if autosense was valid
5456 */
5457 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5458 struct scsi_status zero_status = { 0 };
5459 arqstat->sts_rqpkt_status = zero_status;
5460 }
5461
5462 /*
5463 * ASC=0x47 is parity error
5464 * ASC=0x48 is initiator detected error received
5465 */
5466 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5467 ((scsi_sense_asc(sensedata) == 0x47) ||
5468 (scsi_sense_asc(sensedata) == 0x48))) {
5469 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5470 }
5471
5472 /*
5473 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5474 * ASC/ASCQ=0x25/0x00 means invalid lun
5475 */
5476 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5477 (scsi_sense_asc(sensedata) == 0x3F) &&
5478 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5479 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5480 (scsi_sense_asc(sensedata) == 0x25) &&
5481 (scsi_sense_ascq(sensedata) == 0x00))) {
5482 mptsas_topo_change_list_t *topo_node = NULL;
5483
5484 topo_node = kmem_zalloc(
5485 sizeof (mptsas_topo_change_list_t),
5486 KM_NOSLEEP);
5487 if (topo_node == NULL) {
5488 mptsas_log(mpt, CE_NOTE, "No memory"
5489 "resource for handle SAS dynamic"
5490 "reconfigure.\n");
5491 break;
5492 }
5493 topo_node->mpt = mpt;
5494 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5495 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5496 topo_node->devhdl = ptgt->m_devhdl;
5497 topo_node->object = (void *)ptgt;
5498 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5499
5500 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5501 mptsas_handle_dr,
5502 (void *)topo_node,
5503 DDI_NOSLEEP)) != DDI_SUCCESS) {
5504 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5505 "for handle SAS dynamic reconfigure"
5506 "failed. \n");
5507 }
5508 }
5509 break;
5510 case MPI2_SCSI_STATUS_GOOD:
5511 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5512 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5513 pkt->pkt_reason = CMD_DEV_GONE;
5514 pkt->pkt_state |= STATE_GOT_BUS;
5515 if (ptgt->m_reset_delay == 0) {
5516 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5517 }
5518 NDBG31(("lost disk for target%d, command:%x",
5519 Tgt(cmd), pkt->pkt_cdbp[0]));
5520 break;
5521 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5522 NDBG31(("data overrun: xferred=%d", xferred));
5523 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5524 pkt->pkt_reason = CMD_DATA_OVR;
5525 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5526 | STATE_SENT_CMD | STATE_GOT_STATUS
5527 | STATE_XFERRED_DATA);
5528 pkt->pkt_resid = 0;
5529 break;
5530 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5531 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5532 NDBG31(("data underrun: xferred=%d", xferred));
5533 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5534 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5535 | STATE_SENT_CMD | STATE_GOT_STATUS);
5536 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5537 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5538 pkt->pkt_state |= STATE_XFERRED_DATA;
5539 }
5540 break;
5541 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5542 if (cmd->cmd_active_expiration <= gethrtime()) {
5543 /*
5544 * When timeout requested, propagate
5545 * proper reason and statistics to
5546 * target drivers.
5547 */
5548 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5549 STAT_BUS_RESET | STAT_TIMEOUT);
5550 } else {
5551 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5552 STAT_BUS_RESET);
5553 }
5554 break;
5555 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5556 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5557 mptsas_set_pkt_reason(mpt,
5558 cmd, CMD_RESET, STAT_DEV_RESET);
5559 break;
5560 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5561 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5562 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5563 mptsas_set_pkt_reason(mpt,
5564 cmd, CMD_TERMINATED, STAT_TERMINATED);
5565 break;
5566 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5567 case MPI2_IOCSTATUS_BUSY:
5568 /*
5569 * set throttles to drain
5570 */
5571 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5572 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5573 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5574 }
5575
5576 /*
5577 * retry command
5578 */
5579 cmd->cmd_flags |= CFLAG_RETRY;
5580 cmd->cmd_pkt_flags |= FLAG_HEAD;
5581
5582 (void) mptsas_accept_pkt(mpt, cmd);
5583 break;
5584 default:
5585 mptsas_log(mpt, CE_WARN,
5586 "unknown ioc_status = %x\n", ioc_status);
5587 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5588 "count = %x, scsi_status = %x", scsi_state,
5589 xferred, scsi_status);
5590 break;
5591 }
5592 break;
5593 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5594 mptsas_handle_qfull(mpt, cmd);
5595 break;
5596 case MPI2_SCSI_STATUS_BUSY:
5597 NDBG31(("scsi_status busy received"));
5598 break;
5599 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5600 NDBG31(("scsi_status reservation conflict received"));
5601 break;
5602 default:
5603 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5604 scsi_status, ioc_status);
5605 mptsas_log(mpt, CE_WARN,
5606 "mptsas_process_intr: invalid scsi status\n");
5607 break;
5608 }
5609 }
5610
5611 static void
5612 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5613 mptsas_cmd_t *cmd)
5614 {
5615 uint8_t task_type;
5616 uint16_t ioc_status;
5617 uint32_t log_info;
5618 uint16_t dev_handle;
5619 struct scsi_pkt *pkt = CMD2PKT(cmd);
5620
5621 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5622 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5623 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5624 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5625
5626 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5627 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5628 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5629 task_type, ioc_status, log_info, dev_handle);
5630 pkt->pkt_reason = CMD_INCOMPLETE;
5631 return;
5632 }
5633
5634 switch (task_type) {
5635 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5636 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5637 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5638 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5639 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5640 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5641 break;
5642 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5643 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5644 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5645 /*
5646 * Check for invalid DevHandle of 0 in case application
5647 * sends bad command. DevHandle of 0 could cause problems.
5648 */
5649 if (dev_handle == 0) {
5650 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5651 " DevHandle of 0.");
5652 } else {
5653 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5654 task_type);
5655 }
5656 break;
5657 default:
5658 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5659 task_type);
5660 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5661 break;
5662 }
5663 }
5664
5665 static void
5666 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5667 {
5668 mptsas_t *mpt = arg->mpt;
5669 uint64_t t = arg->t;
5670 mptsas_cmd_t *cmd;
5671 struct scsi_pkt *pkt;
5672 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5673
5674 mutex_enter(&item->mutex);
5675 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5676 if (!item->doneq) {
5677 cv_wait(&item->cv, &item->mutex);
5678 }
5679 pkt = NULL;
5680 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5681 cmd->cmd_flags |= CFLAG_COMPLETED;
5682 pkt = CMD2PKT(cmd);
5683 }
5684 mutex_exit(&item->mutex);
5685 if (pkt) {
5686 mptsas_pkt_comp(pkt, cmd);
5687 }
5688 mutex_enter(&item->mutex);
5689 }
5690 mutex_exit(&item->mutex);
5691 mutex_enter(&mpt->m_doneq_mutex);
5692 mpt->m_doneq_thread_n--;
5693 cv_broadcast(&mpt->m_doneq_thread_cv);
5694 mutex_exit(&mpt->m_doneq_mutex);
5695 }
5696
5697
5698 /*
5699 * mpt interrupt handler.
5700 */
5701 static uint_t
5702 mptsas_intr(caddr_t arg1, caddr_t arg2)
5703 {
5704 mptsas_t *mpt = (void *)arg1;
5705 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5706 uchar_t did_reply = FALSE;
5707
5708 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5709
5710 mutex_enter(&mpt->m_mutex);
5711
5712 /*
5713 * If interrupts are shared by two channels then check whether this
5714 * interrupt is genuinely for this channel by making sure first the
5715 * chip is in high power state.
5716 */
5717 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5718 (mpt->m_power_level != PM_LEVEL_D0)) {
5719 mutex_exit(&mpt->m_mutex);
5720 return (DDI_INTR_UNCLAIMED);
5721 }
5722
5723 /*
5724 * If polling, interrupt was triggered by some shared interrupt because
5725 * IOC interrupts are disabled during polling, so polling routine will
5726 * handle any replies. Considering this, if polling is happening,
5727 * return with interrupt unclaimed.
5728 */
5729 if (mpt->m_polled_intr) {
5730 mutex_exit(&mpt->m_mutex);
5731 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5732 return (DDI_INTR_UNCLAIMED);
5733 }
5734
5735 /*
5736 * Read the istat register.
5737 */
5738 if ((INTPENDING(mpt)) != 0) {
5739 /*
5740 * read fifo until empty.
5741 */
5742 #ifndef __lock_lint
5743 _NOTE(CONSTCOND)
5744 #endif
5745 while (TRUE) {
5746 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5747 DDI_DMA_SYNC_FORCPU);
5748 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5749 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5750
5751 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5752 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5753 ddi_get32(mpt->m_acc_post_queue_hdl,
5754 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5755 break;
5756 }
5757
5758 /*
5759 * The reply is valid, process it according to its
5760 * type. Also, set a flag for updating the reply index
5761 * after they've all been processed.
5762 */
5763 did_reply = TRUE;
5764
5765 mptsas_process_intr(mpt, reply_desc_union);
5766
5767 /*
5768 * Increment post index and roll over if needed.
5769 */
5770 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5771 mpt->m_post_index = 0;
5772 }
5773 }
5774
5775 /*
5776 * Update the global reply index if at least one reply was
5777 * processed.
5778 */
5779 if (did_reply) {
5780 ddi_put32(mpt->m_datap,
5781 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5782 }
5783 } else {
5784 mutex_exit(&mpt->m_mutex);
5785 return (DDI_INTR_UNCLAIMED);
5786 }
5787 NDBG1(("mptsas_intr complete"));
5788
5789 /*
5790 * If no helper threads are created, process the doneq in ISR. If
5791 * helpers are created, use the doneq length as a metric to measure the
5792 * load on the interrupt CPU. If it is long enough, which indicates the
5793 * load is heavy, then we deliver the IO completions to the helpers.
5794 * This measurement has some limitations, although it is simple and
5795 * straightforward and works well for most of the cases at present.
5796 */
5797 if (!mpt->m_doneq_thread_n ||
5798 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5799 mptsas_doneq_empty(mpt);
5800 } else {
5801 mptsas_deliver_doneq_thread(mpt);
5802 }
5803
5804 /*
5805 * If there are queued cmd, start them now.
5806 */
5807 if (mpt->m_waitq != NULL) {
5808 mptsas_restart_waitq(mpt);
5809 }
5810
5811 mutex_exit(&mpt->m_mutex);
5812 return (DDI_INTR_CLAIMED);
5813 }
5814
5815 static void
5816 mptsas_process_intr(mptsas_t *mpt,
5817 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5818 {
5819 uint8_t reply_type;
5820
5821 ASSERT(mutex_owned(&mpt->m_mutex));
5822
5823 /*
5824 * The reply is valid, process it according to its
5825 * type. Also, set a flag for updated the reply index
5826 * after they've all been processed.
5827 */
5828 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5829 &reply_desc_union->Default.ReplyFlags);
5830 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5831 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
5832 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
5833 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5834 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5835 mptsas_handle_address_reply(mpt, reply_desc_union);
5836 } else {
5837 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5838 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5839 }
5840
5841 /*
5842 * Clear the reply descriptor for re-use and increment
5843 * index.
5844 */
5845 ddi_put64(mpt->m_acc_post_queue_hdl,
5846 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5847 0xFFFFFFFFFFFFFFFF);
5848 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5849 DDI_DMA_SYNC_FORDEV);
5850 }
5851
5852 /*
5853 * handle qfull condition
5854 */
5855 static void
5856 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5857 {
5858 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5859
5860 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5861 (ptgt->m_qfull_retries == 0)) {
5862 /*
5863 * We have exhausted the retries on QFULL, or,
5864 * the target driver has indicated that it
5865 * wants to handle QFULL itself by setting
5866 * qfull-retries capability to 0. In either case
5867 * we want the target driver's QFULL handling
5868 * to kick in. We do this by having pkt_reason
5869 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5870 */
5871 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5872 } else {
5873 if (ptgt->m_reset_delay == 0) {
5874 ptgt->m_t_throttle =
5875 max((ptgt->m_t_ncmds - 2), 0);
5876 }
5877
5878 cmd->cmd_pkt_flags |= FLAG_HEAD;
5879 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5880 cmd->cmd_flags |= CFLAG_RETRY;
5881
5882 (void) mptsas_accept_pkt(mpt, cmd);
5883
5884 /*
5885 * when target gives queue full status with no commands
5886 * outstanding (m_t_ncmds == 0), throttle is set to 0
5887 * (HOLD_THROTTLE), and the queue full handling start
5888 * (see psarc/1994/313); if there are commands outstanding,
5889 * throttle is set to (m_t_ncmds - 2)
5890 */
5891 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5892 /*
5893 * By setting throttle to QFULL_THROTTLE, we
5894 * avoid submitting new commands and in
5895 * mptsas_restart_cmd find out slots which need
5896 * their throttles to be cleared.
5897 */
5898 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5899 if (mpt->m_restart_cmd_timeid == 0) {
5900 mpt->m_restart_cmd_timeid =
5901 timeout(mptsas_restart_cmd, mpt,
5902 ptgt->m_qfull_retry_interval);
5903 }
5904 }
5905 }
5906 }
5907
5908 mptsas_phymask_t
5909 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5910 {
5911 mptsas_phymask_t phy_mask = 0;
5912 uint8_t i = 0;
5913
5914 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5915
5916 ASSERT(mutex_owned(&mpt->m_mutex));
5917
5918 /*
5919 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5920 */
5921 if (physport == 0xFF) {
5922 return (0);
5923 }
5924
5925 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5926 if (mpt->m_phy_info[i].attached_devhdl &&
5927 (mpt->m_phy_info[i].phy_mask != 0) &&
5928 (mpt->m_phy_info[i].port_num == physport)) {
5929 phy_mask = mpt->m_phy_info[i].phy_mask;
5930 break;
5931 }
5932 }
5933 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5934 mpt->m_instance, physport, phy_mask));
5935 return (phy_mask);
5936 }
5937
5938 /*
5939 * mpt free device handle after device gone, by use of passthrough
5940 */
5941 static int
5942 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5943 {
5944 Mpi2SasIoUnitControlRequest_t req;
5945 Mpi2SasIoUnitControlReply_t rep;
5946 int ret;
5947
5948 ASSERT(mutex_owned(&mpt->m_mutex));
5949
5950 /*
5951 * Need to compose a SAS IO Unit Control request message
5952 * and call mptsas_do_passthru() function
5953 */
5954 bzero(&req, sizeof (req));
5955 bzero(&rep, sizeof (rep));
5956
5957 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5958 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5959 req.DevHandle = LE_16(devhdl);
5960
5961 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5962 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5963 if (ret != 0) {
5964 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5965 "Control error %d", ret);
5966 return (DDI_FAILURE);
5967 }
5968
5969 /* do passthrough success, check the ioc status */
5970 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5971 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5972 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5973 return (DDI_FAILURE);
5974 }
5975
5976 return (DDI_SUCCESS);
5977 }
5978
5979 static void
5980 mptsas_update_phymask(mptsas_t *mpt)
5981 {
5982 mptsas_phymask_t mask = 0, phy_mask;
5983 char *phy_mask_name;
5984 uint8_t current_port;
5985 int i, j;
5986
5987 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5988
5989 ASSERT(mutex_owned(&mpt->m_mutex));
5990
5991 (void) mptsas_get_sas_io_unit_page(mpt);
5992
5993 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5994
5995 for (i = 0; i < mpt->m_num_phys; i++) {
5996 phy_mask = 0x00;
5997
5998 if (mpt->m_phy_info[i].attached_devhdl == 0)
5999 continue;
6000
6001 bzero(phy_mask_name, sizeof (phy_mask_name));
6002
6003 current_port = mpt->m_phy_info[i].port_num;
6004
6005 if ((mask & (1 << i)) != 0)
6006 continue;
6007
6008 for (j = 0; j < mpt->m_num_phys; j++) {
6009 if (mpt->m_phy_info[j].attached_devhdl &&
6010 (mpt->m_phy_info[j].port_num == current_port)) {
6011 phy_mask |= (1 << j);
6012 }
6013 }
6014 mask = mask | phy_mask;
6015
6016 for (j = 0; j < mpt->m_num_phys; j++) {
6017 if ((phy_mask >> j) & 0x01) {
6018 mpt->m_phy_info[j].phy_mask = phy_mask;
6019 }
6020 }
6021
6022 (void) sprintf(phy_mask_name, "%x", phy_mask);
6023
6024 mutex_exit(&mpt->m_mutex);
6025 /*
6026 * register a iport, if the port has already been existed
6027 * SCSA will do nothing and just return.
6028 */
6029 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6030 mutex_enter(&mpt->m_mutex);
6031 }
6032 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6033 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6034 }
6035
6036 /*
6037 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6038 * 1. Directly attched Device Added/Removed.
6039 * 2. Expander Device Added/Removed.
6040 * 3. Indirectly Attached Device Added/Expander.
6041 * 4. LUNs of a existing device status change.
6042 * 5. RAID volume created/deleted.
6043 * 6. Member of RAID volume is released because of RAID deletion.
6044 * 7. Physical disks are removed because of RAID creation.
6045 */
6046 static void
6047 mptsas_handle_dr(void *args) {
6048 mptsas_topo_change_list_t *topo_node = NULL;
6049 mptsas_topo_change_list_t *save_node = NULL;
6050 mptsas_t *mpt;
6051 dev_info_t *parent = NULL;
6052 mptsas_phymask_t phymask = 0;
6053 char *phy_mask_name;
6054 uint8_t flags = 0, physport = 0xff;
6055 uint8_t port_update = 0;
6056 uint_t event;
6057
6058 topo_node = (mptsas_topo_change_list_t *)args;
6059
6060 mpt = topo_node->mpt;
6061 event = topo_node->event;
6062 flags = topo_node->flags;
6063
6064 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6065
6066 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6067
6068 switch (event) {
6069 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6070 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6071 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6072 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6073 /*
6074 * Direct attached or expander attached device added
6075 * into system or a Phys Disk that is being unhidden.
6076 */
6077 port_update = 1;
6078 }
6079 break;
6080 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6081 /*
6082 * New expander added into system, it must be the head
6083 * of topo_change_list_t
6084 */
6085 port_update = 1;
6086 break;
6087 default:
6088 port_update = 0;
6089 break;
6090 }
6091 /*
6092 * All cases port_update == 1 may cause initiator port form change
6093 */
6094 mutex_enter(&mpt->m_mutex);
6095 if (mpt->m_port_chng && port_update) {
6096 /*
6097 * mpt->m_port_chng flag indicates some PHYs of initiator
6098 * port have changed to online. So when expander added or
6099 * directly attached device online event come, we force to
6100 * update port information by issueing SAS IO Unit Page and
6101 * update PHYMASKs.
6102 */
6103 (void) mptsas_update_phymask(mpt);
6104 mpt->m_port_chng = 0;
6105
6106 }
6107 mutex_exit(&mpt->m_mutex);
6108 while (topo_node) {
6109 phymask = 0;
6110 if (parent == NULL) {
6111 physport = topo_node->un.physport;
6112 event = topo_node->event;
6113 flags = topo_node->flags;
6114 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6115 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6116 /*
6117 * For all offline events, phymask is known
6118 */
6119 phymask = topo_node->un.phymask;
6120 goto find_parent;
6121 }
6122 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6123 goto handle_topo_change;
6124 }
6125 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6126 phymask = topo_node->un.phymask;
6127 goto find_parent;
6128 }
6129
6130 if ((flags ==
6131 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6132 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6133 /*
6134 * There is no any field in IR_CONFIG_CHANGE
6135 * event indicate physport/phynum, let's get
6136 * parent after SAS Device Page0 request.
6137 */
6138 goto handle_topo_change;
6139 }
6140
6141 mutex_enter(&mpt->m_mutex);
6142 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6143 /*
6144 * If the direct attached device added or a
6145 * phys disk is being unhidden, argument
6146 * physport actually is PHY#, so we have to get
6147 * phymask according PHY#.
6148 */
6149 physport = mpt->m_phy_info[physport].port_num;
6150 }
6151
6152 /*
6153 * Translate physport to phymask so that we can search
6154 * parent dip.
6155 */
6156 phymask = mptsas_physport_to_phymask(mpt,
6157 physport);
6158 mutex_exit(&mpt->m_mutex);
6159
6160 find_parent:
6161 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6162 /*
6163 * For RAID topology change node, write the iport name
6164 * as v0.
6165 */
6166 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6167 (void) sprintf(phy_mask_name, "v0");
6168 } else {
6169 /*
6170 * phymask can bo 0 if the drive has been
6171 * pulled by the time an add event is
6172 * processed. If phymask is 0, just skip this
6173 * event and continue.
6174 */
6175 if (phymask == 0) {
6176 mutex_enter(&mpt->m_mutex);
6177 save_node = topo_node;
6178 topo_node = topo_node->next;
6179 ASSERT(save_node);
6180 kmem_free(save_node,
6181 sizeof (mptsas_topo_change_list_t));
6182 mutex_exit(&mpt->m_mutex);
6183
6184 parent = NULL;
6185 continue;
6186 }
6187 (void) sprintf(phy_mask_name, "%x", phymask);
6188 }
6189 parent = scsi_hba_iport_find(mpt->m_dip,
6190 phy_mask_name);
6191 if (parent == NULL) {
6192 mptsas_log(mpt, CE_WARN, "Failed to find an "
6193 "iport, should not happen!");
6194 goto out;
6195 }
6196
6197 }
6198 ASSERT(parent);
6199 handle_topo_change:
6200
6201 mutex_enter(&mpt->m_mutex);
6202 /*
6203 * If HBA is being reset, don't perform operations depending
6204 * on the IOC. We must free the topo list, however.
6205 */
6206 if (!mpt->m_in_reset)
6207 mptsas_handle_topo_change(topo_node, parent);
6208 else
6209 NDBG20(("skipping topo change received during reset"));
6210 save_node = topo_node;
6211 topo_node = topo_node->next;
6212 ASSERT(save_node);
6213 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6214 mutex_exit(&mpt->m_mutex);
6215
6216 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6217 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6218 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6219 /*
6220 * If direct attached device associated, make sure
6221 * reset the parent before start the next one. But
6222 * all devices associated with expander shares the
6223 * parent. Also, reset parent if this is for RAID.
6224 */
6225 parent = NULL;
6226 }
6227 }
6228 out:
6229 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6230 }
6231
6232 static void
6233 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6234 dev_info_t *parent)
6235 {
6236 mptsas_target_t *ptgt = NULL;
6237 mptsas_smp_t *psmp = NULL;
6238 mptsas_t *mpt = (void *)topo_node->mpt;
6239 uint16_t devhdl;
6240 uint16_t attached_devhdl;
6241 uint64_t sas_wwn = 0;
6242 int rval = 0;
6243 uint32_t page_address;
6244 uint8_t phy, flags;
6245 char *addr = NULL;
6246 dev_info_t *lundip;
6247 int circ = 0, circ1 = 0;
6248 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6249
6250 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
6251
6252 ASSERT(mutex_owned(&mpt->m_mutex));
6253
6254 switch (topo_node->event) {
6255 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6256 {
6257 char *phy_mask_name;
6258 mptsas_phymask_t phymask = 0;
6259
6260 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6261 /*
6262 * Get latest RAID info.
6263 */
6264 (void) mptsas_get_raid_info(mpt);
6265 ptgt = refhash_linear_search(mpt->m_targets,
6266 mptsas_target_eval_devhdl, &topo_node->devhdl);
6267 if (ptgt == NULL)
6268 break;
6269 } else {
6270 ptgt = (void *)topo_node->object;
6271 }
6272
6273 if (ptgt == NULL) {
6274 /*
6275 * If a Phys Disk was deleted, RAID info needs to be
6276 * updated to reflect the new topology.
6277 */
6278 (void) mptsas_get_raid_info(mpt);
6279
6280 /*
6281 * Get sas device page 0 by DevHandle to make sure if
6282 * SSP/SATA end device exist.
6283 */
6284 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6285 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6286 topo_node->devhdl;
6287
6288 rval = mptsas_get_target_device_info(mpt, page_address,
6289 &devhdl, &ptgt);
6290 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6291 mptsas_log(mpt, CE_NOTE,
6292 "mptsas_handle_topo_change: target %d is "
6293 "not a SAS/SATA device. \n",
6294 topo_node->devhdl);
6295 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6296 mptsas_log(mpt, CE_NOTE,
6297 "mptsas_handle_topo_change: could not "
6298 "allocate memory. \n");
6299 }
6300 /*
6301 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6302 * else to do, just leave.
6303 */
6304 if (rval != DEV_INFO_SUCCESS) {
6305 return;
6306 }
6307 }
6308
6309 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6310
6311 mutex_exit(&mpt->m_mutex);
6312 flags = topo_node->flags;
6313
6314 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6315 phymask = ptgt->m_addr.mta_phymask;
6316 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6317 (void) sprintf(phy_mask_name, "%x", phymask);
6318 parent = scsi_hba_iport_find(mpt->m_dip,
6319 phy_mask_name);
6320 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6321 if (parent == NULL) {
6322 mptsas_log(mpt, CE_WARN, "Failed to find a "
6323 "iport for PD, should not happen!");
6324 mutex_enter(&mpt->m_mutex);
6325 break;
6326 }
6327 }
6328
6329 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6330 ndi_devi_enter(parent, &circ1);
6331 (void) mptsas_config_raid(parent, topo_node->devhdl,
6332 &lundip);
6333 ndi_devi_exit(parent, circ1);
6334 } else {
6335 /*
6336 * hold nexus for bus configure
6337 */
6338 ndi_devi_enter(scsi_vhci_dip, &circ);
6339 ndi_devi_enter(parent, &circ1);
6340 rval = mptsas_config_target(parent, ptgt);
6341 /*
6342 * release nexus for bus configure
6343 */
6344 ndi_devi_exit(parent, circ1);
6345 ndi_devi_exit(scsi_vhci_dip, circ);
6346
6347 /*
6348 * Add parent's props for SMHBA support
6349 */
6350 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6351 bzero(attached_wwnstr,
6352 sizeof (attached_wwnstr));
6353 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6354 ptgt->m_addr.mta_wwn);
6355 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6356 parent,
6357 SCSI_ADDR_PROP_ATTACHED_PORT,
6358 attached_wwnstr)
6359 != DDI_PROP_SUCCESS) {
6360 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6361 parent,
6362 SCSI_ADDR_PROP_ATTACHED_PORT);
6363 mptsas_log(mpt, CE_WARN, "Failed to"
6364 "attached-port props");
6365 return;
6366 }
6367 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6368 MPTSAS_NUM_PHYS, 1) !=
6369 DDI_PROP_SUCCESS) {
6370 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6371 parent, MPTSAS_NUM_PHYS);
6372 mptsas_log(mpt, CE_WARN, "Failed to"
6373 " create num-phys props");
6374 return;
6375 }
6376
6377 /*
6378 * Update PHY info for smhba
6379 */
6380 mutex_enter(&mpt->m_mutex);
6381 if (mptsas_smhba_phy_init(mpt)) {
6382 mutex_exit(&mpt->m_mutex);
6383 mptsas_log(mpt, CE_WARN, "mptsas phy"
6384 " update failed");
6385 return;
6386 }
6387 mutex_exit(&mpt->m_mutex);
6388
6389 /*
6390 * topo_node->un.physport is really the PHY#
6391 * for direct attached devices
6392 */
6393 mptsas_smhba_set_one_phy_props(mpt, parent,
6394 topo_node->un.physport, &attached_devhdl);
6395
6396 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6397 MPTSAS_VIRTUAL_PORT, 0) !=
6398 DDI_PROP_SUCCESS) {
6399 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6400 parent, MPTSAS_VIRTUAL_PORT);
6401 mptsas_log(mpt, CE_WARN,
6402 "mptsas virtual-port"
6403 "port prop update failed");
6404 return;
6405 }
6406 }
6407 }
6408 mutex_enter(&mpt->m_mutex);
6409
6410 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6411 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6412 ptgt->m_addr.mta_phymask));
6413 break;
6414 }
6415 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6416 {
6417 devhdl = topo_node->devhdl;
6418 ptgt = refhash_linear_search(mpt->m_targets,
6419 mptsas_target_eval_devhdl, &devhdl);
6420 if (ptgt == NULL)
6421 break;
6422
6423 sas_wwn = ptgt->m_addr.mta_wwn;
6424 phy = ptgt->m_phynum;
6425
6426 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6427
6428 if (sas_wwn) {
6429 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6430 } else {
6431 (void) sprintf(addr, "p%x", phy);
6432 }
6433 ASSERT(ptgt->m_devhdl == devhdl);
6434
6435 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6436 (topo_node->flags ==
6437 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6438 /*
6439 * Get latest RAID info if RAID volume status changes
6440 * or Phys Disk status changes
6441 */
6442 (void) mptsas_get_raid_info(mpt);
6443 }
6444 /*
6445 * Abort all outstanding command on the device
6446 */
6447 rval = mptsas_do_scsi_reset(mpt, devhdl);
6448 if (rval) {
6449 NDBG20(("mptsas%d handle_topo_change to reset target "
6450 "before offline devhdl:%x, phymask:%x, rval:%x",
6451 mpt->m_instance, ptgt->m_devhdl,
6452 ptgt->m_addr.mta_phymask, rval));
6453 }
6454
6455 mutex_exit(&mpt->m_mutex);
6456
6457 ndi_devi_enter(scsi_vhci_dip, &circ);
6458 ndi_devi_enter(parent, &circ1);
6459 rval = mptsas_offline_target(parent, addr);
6460 ndi_devi_exit(parent, circ1);
6461 ndi_devi_exit(scsi_vhci_dip, circ);
6462 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6463 "phymask:%x, rval:%x", mpt->m_instance,
6464 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6465
6466 kmem_free(addr, SCSI_MAXNAMELEN);
6467
6468 /*
6469 * Clear parent's props for SMHBA support
6470 */
6471 flags = topo_node->flags;
6472 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6473 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6474 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6475 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6476 DDI_PROP_SUCCESS) {
6477 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6478 SCSI_ADDR_PROP_ATTACHED_PORT);
6479 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6480 "prop update failed");
6481 break;
6482 }
6483 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6484 MPTSAS_NUM_PHYS, 0) !=
6485 DDI_PROP_SUCCESS) {
6486 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6487 MPTSAS_NUM_PHYS);
6488 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6489 "prop update failed");
6490 break;
6491 }
6492 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6493 MPTSAS_VIRTUAL_PORT, 1) !=
6494 DDI_PROP_SUCCESS) {
6495 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6496 MPTSAS_VIRTUAL_PORT);
6497 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6498 "prop update failed");
6499 break;
6500 }
6501 }
6502
6503 mutex_enter(&mpt->m_mutex);
6504 ptgt->m_led_status = 0;
6505 (void) mptsas_flush_led_status(mpt, ptgt);
6506 if (rval == DDI_SUCCESS) {
6507 refhash_remove(mpt->m_targets, ptgt);
6508 ptgt = NULL;
6509 } else {
6510 /*
6511 * clean DR_INTRANSITION flag to allow I/O down to
6512 * PHCI driver since failover finished.
6513 * Invalidate the devhdl
6514 */
6515 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6516 ptgt->m_tgt_unconfigured = 0;
6517 mutex_enter(&mpt->m_tx_waitq_mutex);
6518 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6519 mutex_exit(&mpt->m_tx_waitq_mutex);
6520 }
6521
6522 /*
6523 * Send SAS IO Unit Control to free the dev handle
6524 */
6525 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6526 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6527 rval = mptsas_free_devhdl(mpt, devhdl);
6528
6529 NDBG20(("mptsas%d handle_topo_change to remove "
6530 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6531 rval));
6532 }
6533
6534 break;
6535 }
6536 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6537 {
6538 devhdl = topo_node->devhdl;
6539 /*
6540 * If this is the remove handle event, do a reset first.
6541 */
6542 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6543 rval = mptsas_do_scsi_reset(mpt, devhdl);
6544 if (rval) {
6545 NDBG20(("mpt%d reset target before remove "
6546 "devhdl:%x, rval:%x", mpt->m_instance,
6547 devhdl, rval));
6548 }
6549 }
6550
6551 /*
6552 * Send SAS IO Unit Control to free the dev handle
6553 */
6554 rval = mptsas_free_devhdl(mpt, devhdl);
6555 NDBG20(("mptsas%d handle_topo_change to remove "
6556 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6557 rval));
6558 break;
6559 }
6560 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6561 {
6562 mptsas_smp_t smp;
6563 dev_info_t *smpdip;
6564
6565 devhdl = topo_node->devhdl;
6566
6567 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6568 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6569 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6570 if (rval != DDI_SUCCESS) {
6571 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6572 "handle %x", devhdl);
6573 return;
6574 }
6575
6576 psmp = mptsas_smp_alloc(mpt, &smp);
6577 if (psmp == NULL) {
6578 return;
6579 }
6580
6581 mutex_exit(&mpt->m_mutex);
6582 ndi_devi_enter(parent, &circ1);
6583 (void) mptsas_online_smp(parent, psmp, &smpdip);
6584 ndi_devi_exit(parent, circ1);
6585
6586 mutex_enter(&mpt->m_mutex);
6587 break;
6588 }
6589 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6590 {
6591 devhdl = topo_node->devhdl;
6592 uint32_t dev_info;
6593
6594 psmp = refhash_linear_search(mpt->m_smp_targets,
6595 mptsas_smp_eval_devhdl, &devhdl);
6596 if (psmp == NULL)
6597 break;
6598 /*
6599 * The mptsas_smp_t data is released only if the dip is offlined
6600 * successfully.
6601 */
6602 mutex_exit(&mpt->m_mutex);
6603
6604 ndi_devi_enter(parent, &circ1);
6605 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6606 ndi_devi_exit(parent, circ1);
6607
6608 dev_info = psmp->m_deviceinfo;
6609 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6610 DEVINFO_DIRECT_ATTACHED) {
6611 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6612 MPTSAS_VIRTUAL_PORT, 1) !=
6613 DDI_PROP_SUCCESS) {
6614 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6615 MPTSAS_VIRTUAL_PORT);
6616 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6617 "prop update failed");
6618 return;
6619 }
6620 /*
6621 * Check whether the smp connected to the iport,
6622 */
6623 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6624 MPTSAS_NUM_PHYS, 0) !=
6625 DDI_PROP_SUCCESS) {
6626 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6627 MPTSAS_NUM_PHYS);
6628 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6629 "prop update failed");
6630 return;
6631 }
6632 /*
6633 * Clear parent's attached-port props
6634 */
6635 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6636 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6637 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6638 DDI_PROP_SUCCESS) {
6639 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6640 SCSI_ADDR_PROP_ATTACHED_PORT);
6641 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6642 "prop update failed");
6643 return;
6644 }
6645 }
6646
6647 mutex_enter(&mpt->m_mutex);
6648 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6649 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6650 if (rval == DDI_SUCCESS) {
6651 refhash_remove(mpt->m_smp_targets, psmp);
6652 } else {
6653 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6654 }
6655
6656 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6657
6658 break;
6659 }
6660 default:
6661 return;
6662 }
6663 }
6664
6665 /*
6666 * Record the event if its type is enabled in mpt instance by ioctl.
6667 */
6668 static void
6669 mptsas_record_event(void *args)
6670 {
6671 m_replyh_arg_t *replyh_arg;
6672 pMpi2EventNotificationReply_t eventreply;
6673 uint32_t event, rfm;
6674 mptsas_t *mpt;
6675 int i, j;
6676 uint16_t event_data_len;
6677 boolean_t sendAEN = FALSE;
6678
6679 replyh_arg = (m_replyh_arg_t *)args;
6680 rfm = replyh_arg->rfm;
6681 mpt = replyh_arg->mpt;
6682
6683 eventreply = (pMpi2EventNotificationReply_t)
6684 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6685 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6686
6687
6688 /*
6689 * Generate a system event to let anyone who cares know that a
6690 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6691 * event mask is set to.
6692 */
6693 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6694 sendAEN = TRUE;
6695 }
6696
6697 /*
6698 * Record the event only if it is not masked. Determine which dword
6699 * and bit of event mask to test.
6700 */
6701 i = (uint8_t)(event / 32);
6702 j = (uint8_t)(event % 32);
6703 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6704 i = mpt->m_event_index;
6705 mpt->m_events[i].Type = event;
6706 mpt->m_events[i].Number = ++mpt->m_event_number;
6707 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6708 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6709 &eventreply->EventDataLength);
6710
6711 if (event_data_len > 0) {
6712 /*
6713 * Limit data to size in m_event entry
6714 */
6715 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6716 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6717 }
6718 for (j = 0; j < event_data_len; j++) {
6719 mpt->m_events[i].Data[j] =
6720 ddi_get32(mpt->m_acc_reply_frame_hdl,
6721 &(eventreply->EventData[j]));
6722 }
6723
6724 /*
6725 * check for index wrap-around
6726 */
6727 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6728 i = 0;
6729 }
6730 mpt->m_event_index = (uint8_t)i;
6731
6732 /*
6733 * Set flag to send the event.
6734 */
6735 sendAEN = TRUE;
6736 }
6737 }
6738
6739 /*
6740 * Generate a system event if flag is set to let anyone who cares know
6741 * that an event has occurred.
6742 */
6743 if (sendAEN) {
6744 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6745 "SAS", NULL, NULL, DDI_NOSLEEP);
6746 }
6747 }
6748
6749 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6750 /*
6751 * handle sync events from ioc in interrupt
6752 * return value:
6753 * DDI_SUCCESS: The event is handled by this func
6754 * DDI_FAILURE: Event is not handled
6755 */
6756 static int
6757 mptsas_handle_event_sync(void *args)
6758 {
6759 m_replyh_arg_t *replyh_arg;
6760 pMpi2EventNotificationReply_t eventreply;
6761 uint32_t event, rfm;
6762 mptsas_t *mpt;
6763 uint_t iocstatus;
6764
6765 replyh_arg = (m_replyh_arg_t *)args;
6766 rfm = replyh_arg->rfm;
6767 mpt = replyh_arg->mpt;
6768
6769 ASSERT(mutex_owned(&mpt->m_mutex));
6770
6771 eventreply = (pMpi2EventNotificationReply_t)
6772 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6773 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6774
6775 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6776 &eventreply->IOCStatus)) {
6777 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6778 mptsas_log(mpt, CE_WARN,
6779 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6780 "IOCLogInfo=0x%x", iocstatus,
6781 ddi_get32(mpt->m_acc_reply_frame_hdl,
6782 &eventreply->IOCLogInfo));
6783 } else {
6784 mptsas_log(mpt, CE_WARN,
6785 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6786 "IOCLogInfo=0x%x", iocstatus,
6787 ddi_get32(mpt->m_acc_reply_frame_hdl,
6788 &eventreply->IOCLogInfo));
6789 }
6790 }
6791
6792 /*
6793 * figure out what kind of event we got and handle accordingly
6794 */
6795 switch (event) {
6796 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6797 {
6798 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6799 uint8_t num_entries, expstatus, phy;
6800 uint8_t phystatus, physport, state, i;
6801 uint8_t start_phy_num, link_rate;
6802 uint16_t dev_handle, reason_code;
6803 uint16_t enc_handle, expd_handle;
6804 char string[80], curr[80], prev[80];
6805 mptsas_topo_change_list_t *topo_head = NULL;
6806 mptsas_topo_change_list_t *topo_tail = NULL;
6807 mptsas_topo_change_list_t *topo_node = NULL;
6808 mptsas_target_t *ptgt;
6809 mptsas_smp_t *psmp;
6810 uint8_t flags = 0, exp_flag;
6811 smhba_info_t *pSmhba = NULL;
6812
6813 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6814
6815 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6816 eventreply->EventData;
6817
6818 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6819 &sas_topo_change_list->EnclosureHandle);
6820 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6821 &sas_topo_change_list->ExpanderDevHandle);
6822 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6823 &sas_topo_change_list->NumEntries);
6824 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6825 &sas_topo_change_list->StartPhyNum);
6826 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6827 &sas_topo_change_list->ExpStatus);
6828 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6829 &sas_topo_change_list->PhysicalPort);
6830
6831 string[0] = 0;
6832 if (expd_handle) {
6833 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6834 switch (expstatus) {
6835 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6836 (void) sprintf(string, " added");
6837 /*
6838 * New expander device added
6839 */
6840 mpt->m_port_chng = 1;
6841 topo_node = kmem_zalloc(
6842 sizeof (mptsas_topo_change_list_t),
6843 KM_SLEEP);
6844 topo_node->mpt = mpt;
6845 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6846 topo_node->un.physport = physport;
6847 topo_node->devhdl = expd_handle;
6848 topo_node->flags = flags;
6849 topo_node->object = NULL;
6850 if (topo_head == NULL) {
6851 topo_head = topo_tail = topo_node;
6852 } else {
6853 topo_tail->next = topo_node;
6854 topo_tail = topo_node;
6855 }
6856 break;
6857 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6858 (void) sprintf(string, " not responding, "
6859 "removed");
6860 psmp = refhash_linear_search(mpt->m_smp_targets,
6861 mptsas_smp_eval_devhdl, &expd_handle);
6862 if (psmp == NULL)
6863 break;
6864
6865 topo_node = kmem_zalloc(
6866 sizeof (mptsas_topo_change_list_t),
6867 KM_SLEEP);
6868 topo_node->mpt = mpt;
6869 topo_node->un.phymask =
6870 psmp->m_addr.mta_phymask;
6871 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6872 topo_node->devhdl = expd_handle;
6873 topo_node->flags = flags;
6874 topo_node->object = NULL;
6875 if (topo_head == NULL) {
6876 topo_head = topo_tail = topo_node;
6877 } else {
6878 topo_tail->next = topo_node;
6879 topo_tail = topo_node;
6880 }
6881 break;
6882 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6883 break;
6884 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6885 (void) sprintf(string, " not responding, "
6886 "delaying removal");
6887 break;
6888 default:
6889 break;
6890 }
6891 } else {
6892 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6893 }
6894
6895 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6896 enc_handle, expd_handle, string));
6897 for (i = 0; i < num_entries; i++) {
6898 phy = i + start_phy_num;
6899 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6900 &sas_topo_change_list->PHY[i].PhyStatus);
6901 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6902 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6903 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6904 /*
6905 * Filter out processing of Phy Vacant Status unless
6906 * the reason code is "Not Responding". Process all
6907 * other combinations of Phy Status and Reason Codes.
6908 */
6909 if ((phystatus &
6910 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6911 (reason_code !=
6912 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6913 continue;
6914 }
6915 curr[0] = 0;
6916 prev[0] = 0;
6917 string[0] = 0;
6918 switch (reason_code) {
6919 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6920 {
6921 NDBG20(("mptsas%d phy %d physical_port %d "
6922 "dev_handle %d added", mpt->m_instance, phy,
6923 physport, dev_handle));
6924 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6925 &sas_topo_change_list->PHY[i].LinkRate);
6926 state = (link_rate &
6927 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6928 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6929 switch (state) {
6930 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6931 (void) sprintf(curr, "is disabled");
6932 break;
6933 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6934 (void) sprintf(curr, "is offline, "
6935 "failed speed negotiation");
6936 break;
6937 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6938 (void) sprintf(curr, "SATA OOB "
6939 "complete");
6940 break;
6941 case SMP_RESET_IN_PROGRESS:
6942 (void) sprintf(curr, "SMP reset in "
6943 "progress");
6944 break;
6945 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6946 (void) sprintf(curr, "is online at "
6947 "1.5 Gbps");
6948 break;
6949 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6950 (void) sprintf(curr, "is online at 3.0 "
6951 "Gbps");
6952 break;
6953 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6954 (void) sprintf(curr, "is online at 6.0 "
6955 "Gbps");
6956 break;
6957 default:
6958 (void) sprintf(curr, "state is "
6959 "unknown");
6960 break;
6961 }
6962 /*
6963 * New target device added into the system.
6964 * Set association flag according to if an
6965 * expander is used or not.
6966 */
6967 exp_flag =
6968 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6969 if (flags ==
6970 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6971 flags = exp_flag;
6972 }
6973 topo_node = kmem_zalloc(
6974 sizeof (mptsas_topo_change_list_t),
6975 KM_SLEEP);
6976 topo_node->mpt = mpt;
6977 topo_node->event =
6978 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6979 if (expd_handle == 0) {
6980 /*
6981 * Per MPI 2, if expander dev handle
6982 * is 0, it's a directly attached
6983 * device. So driver use PHY to decide
6984 * which iport is associated
6985 */
6986 physport = phy;
6987 mpt->m_port_chng = 1;
6988 }
6989 topo_node->un.physport = physport;
6990 topo_node->devhdl = dev_handle;
6991 topo_node->flags = flags;
6992 topo_node->object = NULL;
6993 if (topo_head == NULL) {
6994 topo_head = topo_tail = topo_node;
6995 } else {
6996 topo_tail->next = topo_node;
6997 topo_tail = topo_node;
6998 }
6999 break;
7000 }
7001 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7002 {
7003 NDBG20(("mptsas%d phy %d physical_port %d "
7004 "dev_handle %d removed", mpt->m_instance,
7005 phy, physport, dev_handle));
7006 /*
7007 * Set association flag according to if an
7008 * expander is used or not.
7009 */
7010 exp_flag =
7011 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7012 if (flags ==
7013 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7014 flags = exp_flag;
7015 }
7016 /*
7017 * Target device is removed from the system
7018 * Before the device is really offline from
7019 * from system.
7020 */
7021 ptgt = refhash_linear_search(mpt->m_targets,
7022 mptsas_target_eval_devhdl, &dev_handle);
7023 /*
7024 * If ptgt is NULL here, it means that the
7025 * DevHandle is not in the hash table. This is
7026 * reasonable sometimes. For example, if a
7027 * disk was pulled, then added, then pulled
7028 * again, the disk will not have been put into
7029 * the hash table because the add event will
7030 * have an invalid phymask. BUT, this does not
7031 * mean that the DevHandle is invalid. The
7032 * controller will still have a valid DevHandle
7033 * that must be removed. To do this, use the
7034 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7035 */
7036 if (ptgt == NULL) {
7037 topo_node = kmem_zalloc(
7038 sizeof (mptsas_topo_change_list_t),
7039 KM_SLEEP);
7040 topo_node->mpt = mpt;
7041 topo_node->un.phymask = 0;
7042 topo_node->event =
7043 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7044 topo_node->devhdl = dev_handle;
7045 topo_node->flags = flags;
7046 topo_node->object = NULL;
7047 if (topo_head == NULL) {
7048 topo_head = topo_tail =
7049 topo_node;
7050 } else {
7051 topo_tail->next = topo_node;
7052 topo_tail = topo_node;
7053 }
7054 break;
7055 }
7056
7057 /*
7058 * Update DR flag immediately avoid I/O failure
7059 * before failover finish. Pay attention to the
7060 * mutex protect, we need grab m_tx_waitq_mutex
7061 * during set m_dr_flag because we won't add
7062 * the following command into waitq, instead,
7063 * we need return TRAN_BUSY in the tran_start
7064 * context.
7065 */
7066 mutex_enter(&mpt->m_tx_waitq_mutex);
7067 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7068 mutex_exit(&mpt->m_tx_waitq_mutex);
7069
7070 topo_node = kmem_zalloc(
7071 sizeof (mptsas_topo_change_list_t),
7072 KM_SLEEP);
7073 topo_node->mpt = mpt;
7074 topo_node->un.phymask =
7075 ptgt->m_addr.mta_phymask;
7076 topo_node->event =
7077 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7078 topo_node->devhdl = dev_handle;
7079 topo_node->flags = flags;
7080 topo_node->object = NULL;
7081 if (topo_head == NULL) {
7082 topo_head = topo_tail = topo_node;
7083 } else {
7084 topo_tail->next = topo_node;
7085 topo_tail = topo_node;
7086 }
7087 break;
7088 }
7089 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7090 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7091 &sas_topo_change_list->PHY[i].LinkRate);
7092 state = (link_rate &
7093 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7094 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7095 pSmhba = &mpt->m_phy_info[i].smhba_info;
7096 pSmhba->negotiated_link_rate = state;
7097 switch (state) {
7098 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7099 (void) sprintf(curr, "is disabled");
7100 mptsas_smhba_log_sysevent(mpt,
7101 ESC_SAS_PHY_EVENT,
7102 SAS_PHY_REMOVE,
7103 &mpt->m_phy_info[i].smhba_info);
7104 mpt->m_phy_info[i].smhba_info.
7105 negotiated_link_rate
7106 = 0x1;
7107 break;
7108 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7109 (void) sprintf(curr, "is offline, "
7110 "failed speed negotiation");
7111 mptsas_smhba_log_sysevent(mpt,
7112 ESC_SAS_PHY_EVENT,
7113 SAS_PHY_OFFLINE,
7114 &mpt->m_phy_info[i].smhba_info);
7115 break;
7116 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7117 (void) sprintf(curr, "SATA OOB "
7118 "complete");
7119 break;
7120 case SMP_RESET_IN_PROGRESS:
7121 (void) sprintf(curr, "SMP reset in "
7122 "progress");
7123 break;
7124 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7125 (void) sprintf(curr, "is online at "
7126 "1.5 Gbps");
7127 if ((expd_handle == 0) &&
7128 (enc_handle == 1)) {
7129 mpt->m_port_chng = 1;
7130 }
7131 mptsas_smhba_log_sysevent(mpt,
7132 ESC_SAS_PHY_EVENT,
7133 SAS_PHY_ONLINE,
7134 &mpt->m_phy_info[i].smhba_info);
7135 break;
7136 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7137 (void) sprintf(curr, "is online at 3.0 "
7138 "Gbps");
7139 if ((expd_handle == 0) &&
7140 (enc_handle == 1)) {
7141 mpt->m_port_chng = 1;
7142 }
7143 mptsas_smhba_log_sysevent(mpt,
7144 ESC_SAS_PHY_EVENT,
7145 SAS_PHY_ONLINE,
7146 &mpt->m_phy_info[i].smhba_info);
7147 break;
7148 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7149 (void) sprintf(curr, "is online at "
7150 "6.0 Gbps");
7151 if ((expd_handle == 0) &&
7152 (enc_handle == 1)) {
7153 mpt->m_port_chng = 1;
7154 }
7155 mptsas_smhba_log_sysevent(mpt,
7156 ESC_SAS_PHY_EVENT,
7157 SAS_PHY_ONLINE,
7158 &mpt->m_phy_info[i].smhba_info);
7159 break;
7160 default:
7161 (void) sprintf(curr, "state is "
7162 "unknown");
7163 break;
7164 }
7165
7166 state = (link_rate &
7167 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7168 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7169 switch (state) {
7170 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7171 (void) sprintf(prev, ", was disabled");
7172 break;
7173 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7174 (void) sprintf(prev, ", was offline, "
7175 "failed speed negotiation");
7176 break;
7177 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7178 (void) sprintf(prev, ", was SATA OOB "
7179 "complete");
7180 break;
7181 case SMP_RESET_IN_PROGRESS:
7182 (void) sprintf(prev, ", was SMP reset "
7183 "in progress");
7184 break;
7185 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7186 (void) sprintf(prev, ", was online at "
7187 "1.5 Gbps");
7188 break;
7189 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7190 (void) sprintf(prev, ", was online at "
7191 "3.0 Gbps");
7192 break;
7193 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7194 (void) sprintf(prev, ", was online at "
7195 "6.0 Gbps");
7196 break;
7197 default:
7198 break;
7199 }
7200 (void) sprintf(&string[strlen(string)], "link "
7201 "changed, ");
7202 break;
7203 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7204 continue;
7205 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7206 (void) sprintf(&string[strlen(string)],
7207 "target not responding, delaying "
7208 "removal");
7209 break;
7210 }
7211 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7212 mpt->m_instance, phy, dev_handle, string, curr,
7213 prev));
7214 }
7215 if (topo_head != NULL) {
7216 /*
7217 * Launch DR taskq to handle topology change
7218 */
7219 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7220 mptsas_handle_dr, (void *)topo_head,
7221 DDI_NOSLEEP)) != DDI_SUCCESS) {
7222 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7223 "for handle SAS DR event failed. \n");
7224 }
7225 }
7226 break;
7227 }
7228 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7229 {
7230 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7231 mptsas_topo_change_list_t *topo_head = NULL;
7232 mptsas_topo_change_list_t *topo_tail = NULL;
7233 mptsas_topo_change_list_t *topo_node = NULL;
7234 mptsas_target_t *ptgt;
7235 uint8_t num_entries, i, reason;
7236 uint16_t volhandle, diskhandle;
7237
7238 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7239 eventreply->EventData;
7240 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7241 &irChangeList->NumElements);
7242
7243 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7244 mpt->m_instance));
7245
7246 for (i = 0; i < num_entries; i++) {
7247 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7248 &irChangeList->ConfigElement[i].ReasonCode);
7249 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7250 &irChangeList->ConfigElement[i].VolDevHandle);
7251 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7252 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7253
7254 switch (reason) {
7255 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7256 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7257 {
7258 NDBG20(("mptsas %d volume added\n",
7259 mpt->m_instance));
7260
7261 topo_node = kmem_zalloc(
7262 sizeof (mptsas_topo_change_list_t),
7263 KM_SLEEP);
7264
7265 topo_node->mpt = mpt;
7266 topo_node->event =
7267 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7268 topo_node->un.physport = 0xff;
7269 topo_node->devhdl = volhandle;
7270 topo_node->flags =
7271 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7272 topo_node->object = NULL;
7273 if (topo_head == NULL) {
7274 topo_head = topo_tail = topo_node;
7275 } else {
7276 topo_tail->next = topo_node;
7277 topo_tail = topo_node;
7278 }
7279 break;
7280 }
7281 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7282 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7283 {
7284 NDBG20(("mptsas %d volume deleted\n",
7285 mpt->m_instance));
7286 ptgt = refhash_linear_search(mpt->m_targets,
7287 mptsas_target_eval_devhdl, &volhandle);
7288 if (ptgt == NULL)
7289 break;
7290
7291 /*
7292 * Clear any flags related to volume
7293 */
7294 (void) mptsas_delete_volume(mpt, volhandle);
7295
7296 /*
7297 * Update DR flag immediately avoid I/O failure
7298 */
7299 mutex_enter(&mpt->m_tx_waitq_mutex);
7300 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7301 mutex_exit(&mpt->m_tx_waitq_mutex);
7302
7303 topo_node = kmem_zalloc(
7304 sizeof (mptsas_topo_change_list_t),
7305 KM_SLEEP);
7306 topo_node->mpt = mpt;
7307 topo_node->un.phymask =
7308 ptgt->m_addr.mta_phymask;
7309 topo_node->event =
7310 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7311 topo_node->devhdl = volhandle;
7312 topo_node->flags =
7313 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7314 topo_node->object = (void *)ptgt;
7315 if (topo_head == NULL) {
7316 topo_head = topo_tail = topo_node;
7317 } else {
7318 topo_tail->next = topo_node;
7319 topo_tail = topo_node;
7320 }
7321 break;
7322 }
7323 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7324 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7325 {
7326 ptgt = refhash_linear_search(mpt->m_targets,
7327 mptsas_target_eval_devhdl, &diskhandle);
7328 if (ptgt == NULL)
7329 break;
7330
7331 /*
7332 * Update DR flag immediately avoid I/O failure
7333 */
7334 mutex_enter(&mpt->m_tx_waitq_mutex);
7335 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7336 mutex_exit(&mpt->m_tx_waitq_mutex);
7337
7338 topo_node = kmem_zalloc(
7339 sizeof (mptsas_topo_change_list_t),
7340 KM_SLEEP);
7341 topo_node->mpt = mpt;
7342 topo_node->un.phymask =
7343 ptgt->m_addr.mta_phymask;
7344 topo_node->event =
7345 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7346 topo_node->devhdl = diskhandle;
7347 topo_node->flags =
7348 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7349 topo_node->object = (void *)ptgt;
7350 if (topo_head == NULL) {
7351 topo_head = topo_tail = topo_node;
7352 } else {
7353 topo_tail->next = topo_node;
7354 topo_tail = topo_node;
7355 }
7356 break;
7357 }
7358 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7359 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7360 {
7361 /*
7362 * The physical drive is released by a IR
7363 * volume. But we cannot get the the physport
7364 * or phynum from the event data, so we only
7365 * can get the physport/phynum after SAS
7366 * Device Page0 request for the devhdl.
7367 */
7368 topo_node = kmem_zalloc(
7369 sizeof (mptsas_topo_change_list_t),
7370 KM_SLEEP);
7371 topo_node->mpt = mpt;
7372 topo_node->un.phymask = 0;
7373 topo_node->event =
7374 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7375 topo_node->devhdl = diskhandle;
7376 topo_node->flags =
7377 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7378 topo_node->object = NULL;
7379 mpt->m_port_chng = 1;
7380 if (topo_head == NULL) {
7381 topo_head = topo_tail = topo_node;
7382 } else {
7383 topo_tail->next = topo_node;
7384 topo_tail = topo_node;
7385 }
7386 break;
7387 }
7388 default:
7389 break;
7390 }
7391 }
7392
7393 if (topo_head != NULL) {
7394 /*
7395 * Launch DR taskq to handle topology change
7396 */
7397 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7398 mptsas_handle_dr, (void *)topo_head,
7399 DDI_NOSLEEP)) != DDI_SUCCESS) {
7400 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7401 "for handle SAS DR event failed. \n");
7402 }
7403 }
7404 break;
7405 }
7406 default:
7407 return (DDI_FAILURE);
7408 }
7409
7410 return (DDI_SUCCESS);
7411 }
7412
7413 /*
7414 * handle events from ioc
7415 */
7416 static void
7417 mptsas_handle_event(void *args)
7418 {
7419 m_replyh_arg_t *replyh_arg;
7420 pMpi2EventNotificationReply_t eventreply;
7421 uint32_t event, iocloginfo, rfm;
7422 uint32_t status;
7423 uint8_t port;
7424 mptsas_t *mpt;
7425 uint_t iocstatus;
7426
7427 replyh_arg = (m_replyh_arg_t *)args;
7428 rfm = replyh_arg->rfm;
7429 mpt = replyh_arg->mpt;
7430
7431 mutex_enter(&mpt->m_mutex);
7432 /*
7433 * If HBA is being reset, drop incoming event.
7434 */
7435 if (mpt->m_in_reset) {
7436 NDBG20(("dropping event received prior to reset"));
7437 mutex_exit(&mpt->m_mutex);
7438 return;
7439 }
7440
7441 eventreply = (pMpi2EventNotificationReply_t)
7442 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7443 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7444
7445 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7446 &eventreply->IOCStatus)) {
7447 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7448 mptsas_log(mpt, CE_WARN,
7449 "!mptsas_handle_event: IOCStatus=0x%x, "
7450 "IOCLogInfo=0x%x", iocstatus,
7451 ddi_get32(mpt->m_acc_reply_frame_hdl,
7452 &eventreply->IOCLogInfo));
7453 } else {
7454 mptsas_log(mpt, CE_WARN,
7455 "mptsas_handle_event: IOCStatus=0x%x, "
7456 "IOCLogInfo=0x%x", iocstatus,
7457 ddi_get32(mpt->m_acc_reply_frame_hdl,
7458 &eventreply->IOCLogInfo));
7459 }
7460 }
7461
7462 /*
7463 * figure out what kind of event we got and handle accordingly
7464 */
7465 switch (event) {
7466 case MPI2_EVENT_LOG_ENTRY_ADDED:
7467 break;
7468 case MPI2_EVENT_LOG_DATA:
7469 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7470 &eventreply->IOCLogInfo);
7471 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7472 iocloginfo));
7473 break;
7474 case MPI2_EVENT_STATE_CHANGE:
7475 NDBG20(("mptsas%d state change.", mpt->m_instance));
7476 break;
7477 case MPI2_EVENT_HARD_RESET_RECEIVED:
7478 NDBG20(("mptsas%d event change.", mpt->m_instance));
7479 break;
7480 case MPI2_EVENT_SAS_DISCOVERY:
7481 {
7482 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7483 char string[80];
7484 uint8_t rc;
7485
7486 sasdiscovery =
7487 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7488
7489 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7490 &sasdiscovery->ReasonCode);
7491 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7492 &sasdiscovery->PhysicalPort);
7493 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7494 &sasdiscovery->DiscoveryStatus);
7495
7496 string[0] = 0;
7497 switch (rc) {
7498 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7499 (void) sprintf(string, "STARTING");
7500 break;
7501 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7502 (void) sprintf(string, "COMPLETED");
7503 break;
7504 default:
7505 (void) sprintf(string, "UNKNOWN");
7506 break;
7507 }
7508
7509 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7510 port, status));
7511
7512 break;
7513 }
7514 case MPI2_EVENT_EVENT_CHANGE:
7515 NDBG20(("mptsas%d event change.", mpt->m_instance));
7516 break;
7517 case MPI2_EVENT_TASK_SET_FULL:
7518 {
7519 pMpi2EventDataTaskSetFull_t taskfull;
7520
7521 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7522
7523 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7524 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7525 &taskfull->CurrentDepth)));
7526 break;
7527 }
7528 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7529 {
7530 /*
7531 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7532 * in mptsas_handle_event_sync() of interrupt context
7533 */
7534 break;
7535 }
7536 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7537 {
7538 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7539 uint8_t rc;
7540 char string[80];
7541
7542 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7543 eventreply->EventData;
7544
7545 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7546 &encstatus->ReasonCode);
7547 switch (rc) {
7548 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7549 (void) sprintf(string, "added");
7550 break;
7551 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7552 (void) sprintf(string, ", not responding");
7553 break;
7554 default:
7555 break;
7556 }
7557 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7558 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7559 &encstatus->EnclosureHandle), string));
7560 break;
7561 }
7562
7563 /*
7564 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7565 * mptsas_handle_event_sync,in here just send ack message.
7566 */
7567 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7568 {
7569 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7570 uint8_t rc;
7571 uint16_t devhdl;
7572 uint64_t wwn = 0;
7573 uint32_t wwn_lo, wwn_hi;
7574
7575 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7576 eventreply->EventData;
7577 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7578 &statuschange->ReasonCode);
7579 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7580 (uint32_t *)(void *)&statuschange->SASAddress);
7581 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7582 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7583 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7584 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7585 &statuschange->DevHandle);
7586
7587 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7588 wwn));
7589
7590 switch (rc) {
7591 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7592 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7593 ddi_get8(mpt->m_acc_reply_frame_hdl,
7594 &statuschange->ASC),
7595 ddi_get8(mpt->m_acc_reply_frame_hdl,
7596 &statuschange->ASCQ)));
7597 break;
7598
7599 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7600 NDBG20(("Device not supported"));
7601 break;
7602
7603 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7604 NDBG20(("IOC internally generated the Target Reset "
7605 "for devhdl:%x", devhdl));
7606 break;
7607
7608 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7609 NDBG20(("IOC's internally generated Target Reset "
7610 "completed for devhdl:%x", devhdl));
7611 break;
7612
7613 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7614 NDBG20(("IOC internally generated Abort Task"));
7615 break;
7616
7617 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7618 NDBG20(("IOC's internally generated Abort Task "
7619 "completed"));
7620 break;
7621
7622 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7623 NDBG20(("IOC internally generated Abort Task Set"));
7624 break;
7625
7626 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7627 NDBG20(("IOC internally generated Clear Task Set"));
7628 break;
7629
7630 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7631 NDBG20(("IOC internally generated Query Task"));
7632 break;
7633
7634 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7635 NDBG20(("Device sent an Asynchronous Notification"));
7636 break;
7637
7638 default:
7639 break;
7640 }
7641 break;
7642 }
7643 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7644 {
7645 /*
7646 * IR TOPOLOGY CHANGE LIST Event has already been handled
7647 * in mpt_handle_event_sync() of interrupt context
7648 */
7649 break;
7650 }
7651 case MPI2_EVENT_IR_OPERATION_STATUS:
7652 {
7653 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7654 char reason_str[80];
7655 uint8_t rc, percent;
7656 uint16_t handle;
7657
7658 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7659 eventreply->EventData;
7660 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7661 &irOpStatus->RAIDOperation);
7662 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7663 &irOpStatus->PercentComplete);
7664 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7665 &irOpStatus->VolDevHandle);
7666
7667 switch (rc) {
7668 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7669 (void) sprintf(reason_str, "resync");
7670 break;
7671 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7672 (void) sprintf(reason_str, "online capacity "
7673 "expansion");
7674 break;
7675 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7676 (void) sprintf(reason_str, "consistency check");
7677 break;
7678 default:
7679 (void) sprintf(reason_str, "unknown reason %x",
7680 rc);
7681 }
7682
7683 NDBG20(("mptsas%d raid operational status: (%s)"
7684 "\thandle(0x%04x), percent complete(%d)\n",
7685 mpt->m_instance, reason_str, handle, percent));
7686 break;
7687 }
7688 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7689 {
7690 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7691 uint8_t phy_num;
7692 uint8_t primitive;
7693
7694 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7695 eventreply->EventData;
7696
7697 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7698 &sas_broadcast->PhyNum);
7699 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7700 &sas_broadcast->Primitive);
7701
7702 switch (primitive) {
7703 case MPI2_EVENT_PRIMITIVE_CHANGE:
7704 mptsas_smhba_log_sysevent(mpt,
7705 ESC_SAS_HBA_PORT_BROADCAST,
7706 SAS_PORT_BROADCAST_CHANGE,
7707 &mpt->m_phy_info[phy_num].smhba_info);
7708 break;
7709 case MPI2_EVENT_PRIMITIVE_SES:
7710 mptsas_smhba_log_sysevent(mpt,
7711 ESC_SAS_HBA_PORT_BROADCAST,
7712 SAS_PORT_BROADCAST_SES,
7713 &mpt->m_phy_info[phy_num].smhba_info);
7714 break;
7715 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7716 mptsas_smhba_log_sysevent(mpt,
7717 ESC_SAS_HBA_PORT_BROADCAST,
7718 SAS_PORT_BROADCAST_D01_4,
7719 &mpt->m_phy_info[phy_num].smhba_info);
7720 break;
7721 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7722 mptsas_smhba_log_sysevent(mpt,
7723 ESC_SAS_HBA_PORT_BROADCAST,
7724 SAS_PORT_BROADCAST_D04_7,
7725 &mpt->m_phy_info[phy_num].smhba_info);
7726 break;
7727 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7728 mptsas_smhba_log_sysevent(mpt,
7729 ESC_SAS_HBA_PORT_BROADCAST,
7730 SAS_PORT_BROADCAST_D16_7,
7731 &mpt->m_phy_info[phy_num].smhba_info);
7732 break;
7733 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7734 mptsas_smhba_log_sysevent(mpt,
7735 ESC_SAS_HBA_PORT_BROADCAST,
7736 SAS_PORT_BROADCAST_D29_7,
7737 &mpt->m_phy_info[phy_num].smhba_info);
7738 break;
7739 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7740 mptsas_smhba_log_sysevent(mpt,
7741 ESC_SAS_HBA_PORT_BROADCAST,
7742 SAS_PORT_BROADCAST_D24_0,
7743 &mpt->m_phy_info[phy_num].smhba_info);
7744 break;
7745 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7746 mptsas_smhba_log_sysevent(mpt,
7747 ESC_SAS_HBA_PORT_BROADCAST,
7748 SAS_PORT_BROADCAST_D27_4,
7749 &mpt->m_phy_info[phy_num].smhba_info);
7750 break;
7751 default:
7752 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7753 " %x received",
7754 mpt->m_instance, primitive));
7755 break;
7756 }
7757 NDBG20(("mptsas%d sas broadcast primitive: "
7758 "\tprimitive(0x%04x), phy(%d) complete\n",
7759 mpt->m_instance, primitive, phy_num));
7760 break;
7761 }
7762 case MPI2_EVENT_IR_VOLUME:
7763 {
7764 Mpi2EventDataIrVolume_t *irVolume;
7765 uint16_t devhandle;
7766 uint32_t state;
7767 int config, vol;
7768 uint8_t found = FALSE;
7769
7770 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7771 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7772 &irVolume->NewValue);
7773 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7774 &irVolume->VolDevHandle);
7775
7776 NDBG20(("EVENT_IR_VOLUME event is received"));
7777
7778 /*
7779 * Get latest RAID info and then find the DevHandle for this
7780 * event in the configuration. If the DevHandle is not found
7781 * just exit the event.
7782 */
7783 (void) mptsas_get_raid_info(mpt);
7784 for (config = 0; (config < mpt->m_num_raid_configs) &&
7785 (!found); config++) {
7786 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7787 if (mpt->m_raidconfig[config].m_raidvol[vol].
7788 m_raidhandle == devhandle) {
7789 found = TRUE;
7790 break;
7791 }
7792 }
7793 }
7794 if (!found) {
7795 break;
7796 }
7797
7798 switch (irVolume->ReasonCode) {
7799 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7800 {
7801 uint32_t i;
7802 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7803 state;
7804
7805 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7806 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7807 ", auto-config of hot-swap drives is %s"
7808 ", write caching is %s"
7809 ", hot-spare pool mask is %02x\n",
7810 vol, state &
7811 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7812 ? "disabled" : "enabled",
7813 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7814 ? "controlled by member disks" :
7815 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7816 ? "disabled" :
7817 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7818 ? "enabled" :
7819 "incorrectly set",
7820 (state >> 16) & 0xff);
7821 break;
7822 }
7823 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7824 {
7825 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7826 (uint8_t)state;
7827
7828 mptsas_log(mpt, CE_NOTE,
7829 "Volume %d is now %s\n", vol,
7830 state == MPI2_RAID_VOL_STATE_OPTIMAL
7831 ? "optimal" :
7832 state == MPI2_RAID_VOL_STATE_DEGRADED
7833 ? "degraded" :
7834 state == MPI2_RAID_VOL_STATE_ONLINE
7835 ? "online" :
7836 state == MPI2_RAID_VOL_STATE_INITIALIZING
7837 ? "initializing" :
7838 state == MPI2_RAID_VOL_STATE_FAILED
7839 ? "failed" :
7840 state == MPI2_RAID_VOL_STATE_MISSING
7841 ? "missing" :
7842 "state unknown");
7843 break;
7844 }
7845 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7846 {
7847 mpt->m_raidconfig[config].m_raidvol[vol].
7848 m_statusflags = state;
7849
7850 mptsas_log(mpt, CE_NOTE,
7851 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7852 vol,
7853 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7854 ? ", enabled" : ", disabled",
7855 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7856 ? ", quiesced" : "",
7857 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7858 ? ", inactive" : ", active",
7859 state &
7860 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7861 ? ", bad block table is full" : "",
7862 state &
7863 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7864 ? ", resync in progress" : "",
7865 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7866 ? ", background initialization in progress" : "",
7867 state &
7868 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7869 ? ", capacity expansion in progress" : "",
7870 state &
7871 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7872 ? ", consistency check in progress" : "",
7873 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7874 ? ", data scrub in progress" : "");
7875 break;
7876 }
7877 default:
7878 break;
7879 }
7880 break;
7881 }
7882 case MPI2_EVENT_IR_PHYSICAL_DISK:
7883 {
7884 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7885 uint16_t devhandle, enchandle, slot;
7886 uint32_t status, state;
7887 uint8_t physdisknum, reason;
7888
7889 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7890 eventreply->EventData;
7891 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7892 &irPhysDisk->PhysDiskNum);
7893 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7894 &irPhysDisk->PhysDiskDevHandle);
7895 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7896 &irPhysDisk->EnclosureHandle);
7897 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7898 &irPhysDisk->Slot);
7899 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7900 &irPhysDisk->NewValue);
7901 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7902 &irPhysDisk->ReasonCode);
7903
7904 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7905
7906 switch (reason) {
7907 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7908 mptsas_log(mpt, CE_NOTE,
7909 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7910 "for enclosure with handle 0x%x is now in hot "
7911 "spare pool %d",
7912 physdisknum, devhandle, slot, enchandle,
7913 (state >> 16) & 0xff);
7914 break;
7915
7916 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7917 status = state;
7918 mptsas_log(mpt, CE_NOTE,
7919 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7920 "for enclosure with handle 0x%x is now "
7921 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7922 enchandle,
7923 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7924 ? ", inactive" : ", active",
7925 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7926 ? ", out of sync" : "",
7927 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7928 ? ", quiesced" : "",
7929 status &
7930 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7931 ? ", write cache enabled" : "",
7932 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7933 ? ", capacity expansion target" : "");
7934 break;
7935
7936 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7937 mptsas_log(mpt, CE_NOTE,
7938 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7939 "for enclosure with handle 0x%x is now %s\n",
7940 physdisknum, devhandle, slot, enchandle,
7941 state == MPI2_RAID_PD_STATE_OPTIMAL
7942 ? "optimal" :
7943 state == MPI2_RAID_PD_STATE_REBUILDING
7944 ? "rebuilding" :
7945 state == MPI2_RAID_PD_STATE_DEGRADED
7946 ? "degraded" :
7947 state == MPI2_RAID_PD_STATE_HOT_SPARE
7948 ? "a hot spare" :
7949 state == MPI2_RAID_PD_STATE_ONLINE
7950 ? "online" :
7951 state == MPI2_RAID_PD_STATE_OFFLINE
7952 ? "offline" :
7953 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7954 ? "not compatible" :
7955 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7956 ? "not configured" :
7957 "state unknown");
7958 break;
7959 }
7960 break;
7961 }
7962 default:
7963 NDBG20(("mptsas%d: unknown event %x received",
7964 mpt->m_instance, event));
7965 break;
7966 }
7967
7968 /*
7969 * Return the reply frame to the free queue.
7970 */
7971 ddi_put32(mpt->m_acc_free_queue_hdl,
7972 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7973 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7974 DDI_DMA_SYNC_FORDEV);
7975 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7976 mpt->m_free_index = 0;
7977 }
7978 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7979 mpt->m_free_index);
7980 mutex_exit(&mpt->m_mutex);
7981 }
7982
7983 /*
7984 * invoked from timeout() to restart qfull cmds with throttle == 0
7985 */
7986 static void
7987 mptsas_restart_cmd(void *arg)
7988 {
7989 mptsas_t *mpt = arg;
7990 mptsas_target_t *ptgt = NULL;
7991
7992 mutex_enter(&mpt->m_mutex);
7993
7994 mpt->m_restart_cmd_timeid = 0;
7995
7996 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
7997 ptgt = refhash_next(mpt->m_targets, ptgt)) {
7998 if (ptgt->m_reset_delay == 0) {
7999 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8000 mptsas_set_throttle(mpt, ptgt,
8001 MAX_THROTTLE);
8002 }
8003 }
8004 }
8005 mptsas_restart_hba(mpt);
8006 mutex_exit(&mpt->m_mutex);
8007 }
8008
8009 void
8010 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8011 {
8012 int slot;
8013 mptsas_slots_t *slots = mpt->m_active;
8014 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8015
8016 ASSERT(cmd != NULL);
8017 ASSERT(cmd->cmd_queued == FALSE);
8018
8019 /*
8020 * Task Management cmds are removed in their own routines. Also,
8021 * we don't want to modify timeout based on TM cmds.
8022 */
8023 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8024 return;
8025 }
8026
8027 slot = cmd->cmd_slot;
8028
8029 /*
8030 * remove the cmd.
8031 */
8032 if (cmd == slots->m_slot[slot]) {
8033 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
8034 slots->m_slot[slot] = NULL;
8035 mpt->m_ncmds--;
8036
8037 /*
8038 * only decrement per target ncmds if command
8039 * has a target associated with it.
8040 */
8041 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8042 ptgt->m_t_ncmds--;
8043 /*
8044 * reset throttle if we just ran an untagged command
8045 * to a tagged target
8046 */
8047 if ((ptgt->m_t_ncmds == 0) &&
8048 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8049 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8050 }
8051
8052 /*
8053 * Remove this command from the active queue.
8054 */
8055 if (cmd->cmd_active_expiration != 0) {
8056 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8057 cmd_active_link);
8058 cmd->cmd_active_expiration = 0;
8059 }
8060 }
8061 }
8062
8063 /*
8064 * This is all we need to do for ioc commands.
8065 */
8066 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8067 mptsas_return_to_pool(mpt, cmd);
8068 return;
8069 }
8070
8071 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8072 }
8073
8074 /*
8075 * accept all cmds on the tx_waitq if any and then
8076 * start a fresh request from the top of the device queue.
8077 *
8078 * since there are always cmds queued on the tx_waitq, and rare cmds on
8079 * the instance waitq, so this function should not be invoked in the ISR,
8080 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8081 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8082 */
8083 static void
8084 mptsas_restart_hba(mptsas_t *mpt)
8085 {
8086 ASSERT(mutex_owned(&mpt->m_mutex));
8087
8088 mutex_enter(&mpt->m_tx_waitq_mutex);
8089 if (mpt->m_tx_waitq) {
8090 mptsas_accept_tx_waitq(mpt);
8091 }
8092 mutex_exit(&mpt->m_tx_waitq_mutex);
8093 mptsas_restart_waitq(mpt);
8094 }
8095
8096 /*
8097 * start a fresh request from the top of the device queue
8098 */
8099 static void
8100 mptsas_restart_waitq(mptsas_t *mpt)
8101 {
8102 mptsas_cmd_t *cmd, *next_cmd;
8103 mptsas_target_t *ptgt = NULL;
8104
8105 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8106
8107 ASSERT(mutex_owned(&mpt->m_mutex));
8108
8109 /*
8110 * If there is a reset delay, don't start any cmds. Otherwise, start
8111 * as many cmds as possible.
8112 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8113 * commands is m_max_requests - 2.
8114 */
8115 cmd = mpt->m_waitq;
8116
8117 while (cmd != NULL) {
8118 next_cmd = cmd->cmd_linkp;
8119 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8120 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8121 /*
8122 * passthru command get slot need
8123 * set CFLAG_PREPARED.
8124 */
8125 cmd->cmd_flags |= CFLAG_PREPARED;
8126 mptsas_waitq_delete(mpt, cmd);
8127 mptsas_start_passthru(mpt, cmd);
8128 }
8129 cmd = next_cmd;
8130 continue;
8131 }
8132 if (cmd->cmd_flags & CFLAG_CONFIG) {
8133 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8134 /*
8135 * Send the config page request and delete it
8136 * from the waitq.
8137 */
8138 cmd->cmd_flags |= CFLAG_PREPARED;
8139 mptsas_waitq_delete(mpt, cmd);
8140 mptsas_start_config_page_access(mpt, cmd);
8141 }
8142 cmd = next_cmd;
8143 continue;
8144 }
8145 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8146 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8147 /*
8148 * Send the FW Diag request and delete if from
8149 * the waitq.
8150 */
8151 cmd->cmd_flags |= CFLAG_PREPARED;
8152 mptsas_waitq_delete(mpt, cmd);
8153 mptsas_start_diag(mpt, cmd);
8154 }
8155 cmd = next_cmd;
8156 continue;
8157 }
8158
8159 ptgt = cmd->cmd_tgt_addr;
8160 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8161 (ptgt->m_t_ncmds == 0)) {
8162 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8163 }
8164 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8165 (ptgt && (ptgt->m_reset_delay == 0)) &&
8166 (ptgt && (ptgt->m_t_ncmds <
8167 ptgt->m_t_throttle))) {
8168 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8169 mptsas_waitq_delete(mpt, cmd);
8170 (void) mptsas_start_cmd(mpt, cmd);
8171 }
8172 }
8173 cmd = next_cmd;
8174 }
8175 }
8176 /*
8177 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8178 * Accept all those queued cmds before new cmd is accept so that the
8179 * cmds are sent in order.
8180 */
8181 static void
8182 mptsas_accept_tx_waitq(mptsas_t *mpt)
8183 {
8184 mptsas_cmd_t *cmd;
8185
8186 ASSERT(mutex_owned(&mpt->m_mutex));
8187 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8188
8189 /*
8190 * A Bus Reset could occur at any time and flush the tx_waitq,
8191 * so we cannot count on the tx_waitq to contain even one cmd.
8192 * And when the m_tx_waitq_mutex is released and run
8193 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8194 */
8195 cmd = mpt->m_tx_waitq;
8196 for (;;) {
8197 if ((cmd = mpt->m_tx_waitq) == NULL) {
8198 mpt->m_tx_draining = 0;
8199 break;
8200 }
8201 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8202 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8203 }
8204 cmd->cmd_linkp = NULL;
8205 mutex_exit(&mpt->m_tx_waitq_mutex);
8206 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8207 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8208 "to accept cmd on queue\n");
8209 mutex_enter(&mpt->m_tx_waitq_mutex);
8210 }
8211 }
8212
8213
8214 /*
8215 * mpt tag type lookup
8216 */
8217 static char mptsas_tag_lookup[] =
8218 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8219
8220 static int
8221 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8222 {
8223 struct scsi_pkt *pkt = CMD2PKT(cmd);
8224 uint32_t control = 0;
8225 caddr_t mem;
8226 pMpi2SCSIIORequest_t io_request;
8227 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8228 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8229 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8230 uint16_t SMID, io_flags = 0;
8231 uint32_t request_desc_low, request_desc_high;
8232 mptsas_cmd_t *c;
8233
8234 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
8235
8236 /*
8237 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8238 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8239 */
8240 SMID = cmd->cmd_slot;
8241
8242 /*
8243 * It is possible for back to back device reset to
8244 * happen before the reset delay has expired. That's
8245 * ok, just let the device reset go out on the bus.
8246 */
8247 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8248 ASSERT(ptgt->m_reset_delay == 0);
8249 }
8250
8251 /*
8252 * if a non-tagged cmd is submitted to an active tagged target
8253 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8254 * to be untagged
8255 */
8256 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8257 (ptgt->m_t_ncmds > 1) &&
8258 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8259 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8260 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8261 NDBG23(("target=%d, untagged cmd, start draining\n",
8262 ptgt->m_devhdl));
8263
8264 if (ptgt->m_reset_delay == 0) {
8265 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8266 }
8267
8268 mptsas_remove_cmd(mpt, cmd);
8269 cmd->cmd_pkt_flags |= FLAG_HEAD;
8270 mptsas_waitq_add(mpt, cmd);
8271 }
8272 return (DDI_FAILURE);
8273 }
8274
8275 /*
8276 * Set correct tag bits.
8277 */
8278 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8279 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8280 FLAG_TAGMASK) >> 12)]) {
8281 case MSG_SIMPLE_QTAG:
8282 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8283 break;
8284 case MSG_HEAD_QTAG:
8285 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8286 break;
8287 case MSG_ORDERED_QTAG:
8288 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8289 break;
8290 default:
8291 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8292 break;
8293 }
8294 } else {
8295 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8296 ptgt->m_t_throttle = 1;
8297 }
8298 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8299 }
8300
8301 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8302 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8303 }
8304
8305 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8306 io_request = (pMpi2SCSIIORequest_t)mem;
8307
8308 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8309 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8310 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8311 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8312 MPI2_FUNCTION_SCSI_IO_REQUEST);
8313
8314 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8315 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8316
8317 io_flags = cmd->cmd_cdblen;
8318 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8319 /*
8320 * setup the Scatter/Gather DMA list for this request
8321 */
8322 if (cmd->cmd_cookiec > 0) {
8323 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8324 } else {
8325 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8326 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8327 MPI2_SGE_FLAGS_END_OF_BUFFER |
8328 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8329 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8330 }
8331
8332 /*
8333 * save ARQ information
8334 */
8335 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8336 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8337 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8338 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8339 cmd->cmd_ext_arqcookie.dmac_address);
8340 } else {
8341 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8342 cmd->cmd_arqcookie.dmac_address);
8343 }
8344
8345 ddi_put32(acc_hdl, &io_request->Control, control);
8346
8347 NDBG31(("starting message=0x%p, with cmd=0x%p",
8348 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8349
8350 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8351
8352 /*
8353 * Build request descriptor and write it to the request desc post reg.
8354 */
8355 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8356 request_desc_high = ptgt->m_devhdl << 16;
8357 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8358
8359 /*
8360 * Start timeout.
8361 */
8362 cmd->cmd_active_expiration =
8363 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8364 #ifdef MPTSAS_TEST
8365 /*
8366 * Force timeouts to happen immediately.
8367 */
8368 if (mptsas_test_timeouts)
8369 cmd->cmd_active_expiration = gethrtime();
8370 #endif
8371 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8372 if (c == NULL ||
8373 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8374 /*
8375 * Common case is that this is the last pending expiration
8376 * (or queue is empty). Insert at head of the queue.
8377 */
8378 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8379 } else {
8380 /*
8381 * Queue is not empty and first element expires later than
8382 * this command. Search for element expiring sooner.
8383 */
8384 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8385 if (c->cmd_active_expiration <
8386 cmd->cmd_active_expiration) {
8387 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8388 break;
8389 }
8390 }
8391 if (c == NULL) {
8392 /*
8393 * No element found expiring sooner, append to
8394 * non-empty queue.
8395 */
8396 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8397 cmd_active_link);
8398 }
8399 }
8400
8401 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8402 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8403 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8404 return (DDI_FAILURE);
8405 }
8406 return (DDI_SUCCESS);
8407 }
8408
8409 /*
8410 * Select a helper thread to handle current doneq
8411 */
8412 static void
8413 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8414 {
8415 uint64_t t, i;
8416 uint32_t min = 0xffffffff;
8417 mptsas_doneq_thread_list_t *item;
8418
8419 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8420 item = &mpt->m_doneq_thread_id[i];
8421 /*
8422 * If the completed command on help thread[i] less than
8423 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8424 * pick a thread which has least completed command.
8425 */
8426
8427 mutex_enter(&item->mutex);
8428 if (item->len < mpt->m_doneq_thread_threshold) {
8429 t = i;
8430 mutex_exit(&item->mutex);
8431 break;
8432 }
8433 if (item->len < min) {
8434 min = item->len;
8435 t = i;
8436 }
8437 mutex_exit(&item->mutex);
8438 }
8439 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8440 mptsas_doneq_mv(mpt, t);
8441 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8442 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8443 }
8444
8445 /*
8446 * move the current global doneq to the doneq of thead[t]
8447 */
8448 static void
8449 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8450 {
8451 mptsas_cmd_t *cmd;
8452 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8453
8454 ASSERT(mutex_owned(&item->mutex));
8455 while ((cmd = mpt->m_doneq) != NULL) {
8456 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8457 mpt->m_donetail = &mpt->m_doneq;
8458 }
8459 cmd->cmd_linkp = NULL;
8460 *item->donetail = cmd;
8461 item->donetail = &cmd->cmd_linkp;
8462 mpt->m_doneq_len--;
8463 item->len++;
8464 }
8465 }
8466
8467 void
8468 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8469 {
8470 struct scsi_pkt *pkt = CMD2PKT(cmd);
8471
8472 /* Check all acc and dma handles */
8473 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8474 DDI_SUCCESS) ||
8475 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8476 DDI_SUCCESS) ||
8477 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8478 DDI_SUCCESS) ||
8479 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8480 DDI_SUCCESS) ||
8481 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8482 DDI_SUCCESS) ||
8483 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8484 DDI_SUCCESS) ||
8485 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8486 DDI_SUCCESS)) {
8487 ddi_fm_service_impact(mpt->m_dip,
8488 DDI_SERVICE_UNAFFECTED);
8489 ddi_fm_acc_err_clear(mpt->m_config_handle,
8490 DDI_FME_VER0);
8491 pkt->pkt_reason = CMD_TRAN_ERR;
8492 pkt->pkt_statistics = 0;
8493 }
8494 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8495 DDI_SUCCESS) ||
8496 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8497 DDI_SUCCESS) ||
8498 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8499 DDI_SUCCESS) ||
8500 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8501 DDI_SUCCESS) ||
8502 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8503 DDI_SUCCESS)) {
8504 ddi_fm_service_impact(mpt->m_dip,
8505 DDI_SERVICE_UNAFFECTED);
8506 pkt->pkt_reason = CMD_TRAN_ERR;
8507 pkt->pkt_statistics = 0;
8508 }
8509 if (cmd->cmd_dmahandle &&
8510 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8511 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8512 pkt->pkt_reason = CMD_TRAN_ERR;
8513 pkt->pkt_statistics = 0;
8514 }
8515 if ((cmd->cmd_extra_frames &&
8516 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8517 DDI_SUCCESS) ||
8518 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8519 DDI_SUCCESS)))) {
8520 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8521 pkt->pkt_reason = CMD_TRAN_ERR;
8522 pkt->pkt_statistics = 0;
8523 }
8524 if (cmd->cmd_arqhandle &&
8525 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8526 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8527 pkt->pkt_reason = CMD_TRAN_ERR;
8528 pkt->pkt_statistics = 0;
8529 }
8530 if (cmd->cmd_ext_arqhandle &&
8531 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8532 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8533 pkt->pkt_reason = CMD_TRAN_ERR;
8534 pkt->pkt_statistics = 0;
8535 }
8536 }
8537
8538 /*
8539 * These routines manipulate the queue of commands that
8540 * are waiting for their completion routines to be called.
8541 * The queue is usually in FIFO order but on an MP system
8542 * it's possible for the completion routines to get out
8543 * of order. If that's a problem you need to add a global
8544 * mutex around the code that calls the completion routine
8545 * in the interrupt handler.
8546 */
8547 static void
8548 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8549 {
8550 struct scsi_pkt *pkt = CMD2PKT(cmd);
8551
8552 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8553
8554 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8555 cmd->cmd_linkp = NULL;
8556 cmd->cmd_flags |= CFLAG_FINISHED;
8557 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8558
8559 mptsas_fma_check(mpt, cmd);
8560
8561 /*
8562 * only add scsi pkts that have completion routines to
8563 * the doneq. no intr cmds do not have callbacks.
8564 */
8565 if (pkt && (pkt->pkt_comp)) {
8566 *mpt->m_donetail = cmd;
8567 mpt->m_donetail = &cmd->cmd_linkp;
8568 mpt->m_doneq_len++;
8569 }
8570 }
8571
8572 static mptsas_cmd_t *
8573 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8574 {
8575 mptsas_cmd_t *cmd;
8576 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8577
8578 /* pop one off the done queue */
8579 if ((cmd = item->doneq) != NULL) {
8580 /* if the queue is now empty fix the tail pointer */
8581 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8582 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8583 item->donetail = &item->doneq;
8584 }
8585 cmd->cmd_linkp = NULL;
8586 item->len--;
8587 }
8588 return (cmd);
8589 }
8590
8591 static void
8592 mptsas_doneq_empty(mptsas_t *mpt)
8593 {
8594 if (mpt->m_doneq && !mpt->m_in_callback) {
8595 mptsas_cmd_t *cmd, *next;
8596 struct scsi_pkt *pkt;
8597
8598 mpt->m_in_callback = 1;
8599 cmd = mpt->m_doneq;
8600 mpt->m_doneq = NULL;
8601 mpt->m_donetail = &mpt->m_doneq;
8602 mpt->m_doneq_len = 0;
8603
8604 mutex_exit(&mpt->m_mutex);
8605 /*
8606 * run the completion routines of all the
8607 * completed commands
8608 */
8609 while (cmd != NULL) {
8610 next = cmd->cmd_linkp;
8611 cmd->cmd_linkp = NULL;
8612 /* run this command's completion routine */
8613 cmd->cmd_flags |= CFLAG_COMPLETED;
8614 pkt = CMD2PKT(cmd);
8615 mptsas_pkt_comp(pkt, cmd);
8616 cmd = next;
8617 }
8618 mutex_enter(&mpt->m_mutex);
8619 mpt->m_in_callback = 0;
8620 }
8621 }
8622
8623 /*
8624 * These routines manipulate the target's queue of pending requests
8625 */
8626 void
8627 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8628 {
8629 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8630 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8631 cmd->cmd_queued = TRUE;
8632 if (ptgt)
8633 ptgt->m_t_nwait++;
8634 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8635 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8636 mpt->m_waitqtail = &cmd->cmd_linkp;
8637 }
8638 mpt->m_waitq = cmd;
8639 } else {
8640 cmd->cmd_linkp = NULL;
8641 *(mpt->m_waitqtail) = cmd;
8642 mpt->m_waitqtail = &cmd->cmd_linkp;
8643 }
8644 }
8645
8646 static mptsas_cmd_t *
8647 mptsas_waitq_rm(mptsas_t *mpt)
8648 {
8649 mptsas_cmd_t *cmd;
8650 mptsas_target_t *ptgt;
8651 NDBG7(("mptsas_waitq_rm"));
8652
8653 MPTSAS_WAITQ_RM(mpt, cmd);
8654
8655 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8656 if (cmd) {
8657 ptgt = cmd->cmd_tgt_addr;
8658 if (ptgt) {
8659 ptgt->m_t_nwait--;
8660 ASSERT(ptgt->m_t_nwait >= 0);
8661 }
8662 }
8663 return (cmd);
8664 }
8665
8666 /*
8667 * remove specified cmd from the middle of the wait queue.
8668 */
8669 static void
8670 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8671 {
8672 mptsas_cmd_t *prevp = mpt->m_waitq;
8673 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8674
8675 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8676 (void *)mpt, (void *)cmd));
8677 if (ptgt) {
8678 ptgt->m_t_nwait--;
8679 ASSERT(ptgt->m_t_nwait >= 0);
8680 }
8681
8682 if (prevp == cmd) {
8683 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8684 mpt->m_waitqtail = &mpt->m_waitq;
8685
8686 cmd->cmd_linkp = NULL;
8687 cmd->cmd_queued = FALSE;
8688 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8689 (void *)mpt, (void *)cmd));
8690 return;
8691 }
8692
8693 while (prevp != NULL) {
8694 if (prevp->cmd_linkp == cmd) {
8695 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8696 mpt->m_waitqtail = &prevp->cmd_linkp;
8697
8698 cmd->cmd_linkp = NULL;
8699 cmd->cmd_queued = FALSE;
8700 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8701 (void *)mpt, (void *)cmd));
8702 return;
8703 }
8704 prevp = prevp->cmd_linkp;
8705 }
8706 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8707 }
8708
8709 static mptsas_cmd_t *
8710 mptsas_tx_waitq_rm(mptsas_t *mpt)
8711 {
8712 mptsas_cmd_t *cmd;
8713 NDBG7(("mptsas_tx_waitq_rm"));
8714
8715 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8716
8717 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8718
8719 return (cmd);
8720 }
8721
8722 /*
8723 * remove specified cmd from the middle of the tx_waitq.
8724 */
8725 static void
8726 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8727 {
8728 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8729
8730 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8731 (void *)mpt, (void *)cmd));
8732
8733 if (prevp == cmd) {
8734 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8735 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8736
8737 cmd->cmd_linkp = NULL;
8738 cmd->cmd_queued = FALSE;
8739 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8740 (void *)mpt, (void *)cmd));
8741 return;
8742 }
8743
8744 while (prevp != NULL) {
8745 if (prevp->cmd_linkp == cmd) {
8746 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8747 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8748
8749 cmd->cmd_linkp = NULL;
8750 cmd->cmd_queued = FALSE;
8751 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8752 (void *)mpt, (void *)cmd));
8753 return;
8754 }
8755 prevp = prevp->cmd_linkp;
8756 }
8757 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8758 }
8759
8760 /*
8761 * device and bus reset handling
8762 *
8763 * Notes:
8764 * - RESET_ALL: reset the controller
8765 * - RESET_TARGET: reset the target specified in scsi_address
8766 */
8767 static int
8768 mptsas_scsi_reset(struct scsi_address *ap, int level)
8769 {
8770 mptsas_t *mpt = ADDR2MPT(ap);
8771 int rval;
8772 mptsas_tgt_private_t *tgt_private;
8773 mptsas_target_t *ptgt = NULL;
8774
8775 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8776 ptgt = tgt_private->t_private;
8777 if (ptgt == NULL) {
8778 return (FALSE);
8779 }
8780 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8781 level));
8782
8783 mutex_enter(&mpt->m_mutex);
8784 /*
8785 * if we are not in panic set up a reset delay for this target
8786 */
8787 if (!ddi_in_panic()) {
8788 mptsas_setup_bus_reset_delay(mpt);
8789 } else {
8790 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8791 }
8792 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8793 mutex_exit(&mpt->m_mutex);
8794
8795 /*
8796 * The transport layer expect to only see TRUE and
8797 * FALSE. Therefore, we will adjust the return value
8798 * if mptsas_do_scsi_reset returns FAILED.
8799 */
8800 if (rval == FAILED)
8801 rval = FALSE;
8802 return (rval);
8803 }
8804
8805 static int
8806 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8807 {
8808 int rval = FALSE;
8809 uint8_t config, disk;
8810
8811 ASSERT(mutex_owned(&mpt->m_mutex));
8812
8813 if (mptsas_debug_resets) {
8814 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8815 devhdl);
8816 }
8817
8818 /*
8819 * Issue a Target Reset message to the target specified but not to a
8820 * disk making up a raid volume. Just look through the RAID config
8821 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8822 * list, then don't reset this target.
8823 */
8824 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8825 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8826 if (devhdl == mpt->m_raidconfig[config].
8827 m_physdisk_devhdl[disk]) {
8828 return (TRUE);
8829 }
8830 }
8831 }
8832
8833 rval = mptsas_ioc_task_management(mpt,
8834 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8835
8836 mptsas_doneq_empty(mpt);
8837 return (rval);
8838 }
8839
8840 static int
8841 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8842 void (*callback)(caddr_t), caddr_t arg)
8843 {
8844 mptsas_t *mpt = ADDR2MPT(ap);
8845
8846 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8847
8848 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8849 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8850 }
8851
8852 static int
8853 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8854 {
8855 dev_info_t *lun_dip = NULL;
8856
8857 ASSERT(sd != NULL);
8858 ASSERT(name != NULL);
8859 lun_dip = sd->sd_dev;
8860 ASSERT(lun_dip != NULL);
8861
8862 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8863 return (1);
8864 } else {
8865 return (0);
8866 }
8867 }
8868
8869 static int
8870 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8871 {
8872 return (mptsas_get_name(sd, name, len));
8873 }
8874
8875 void
8876 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8877 {
8878
8879 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8880
8881 /*
8882 * if the bus is draining/quiesced, no changes to the throttles
8883 * are allowed. Not allowing change of throttles during draining
8884 * limits error recovery but will reduce draining time
8885 *
8886 * all throttles should have been set to HOLD_THROTTLE
8887 */
8888 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8889 return;
8890 }
8891
8892 if (what == HOLD_THROTTLE) {
8893 ptgt->m_t_throttle = HOLD_THROTTLE;
8894 } else if (ptgt->m_reset_delay == 0) {
8895 ptgt->m_t_throttle = what;
8896 }
8897 }
8898
8899 /*
8900 * Clean up from a device reset.
8901 * For the case of target reset, this function clears the waitq of all
8902 * commands for a particular target. For the case of abort task set, this
8903 * function clears the waitq of all commonds for a particular target/lun.
8904 */
8905 static void
8906 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8907 {
8908 mptsas_slots_t *slots = mpt->m_active;
8909 mptsas_cmd_t *cmd, *next_cmd;
8910 int slot;
8911 uchar_t reason;
8912 uint_t stat;
8913 hrtime_t timestamp;
8914
8915 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8916
8917 timestamp = gethrtime();
8918
8919 /*
8920 * Make sure the I/O Controller has flushed all cmds
8921 * that are associated with this target for a target reset
8922 * and target/lun for abort task set.
8923 * Account for TM requests, which use the last SMID.
8924 */
8925 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8926 if ((cmd = slots->m_slot[slot]) == NULL)
8927 continue;
8928 reason = CMD_RESET;
8929 stat = STAT_DEV_RESET;
8930 switch (tasktype) {
8931 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8932 if (Tgt(cmd) == target) {
8933 if (cmd->cmd_active_expiration <= timestamp) {
8934 /*
8935 * When timeout requested, propagate
8936 * proper reason and statistics to
8937 * target drivers.
8938 */
8939 reason = CMD_TIMEOUT;
8940 stat |= STAT_TIMEOUT;
8941 }
8942 NDBG25(("mptsas_flush_target discovered non-"
8943 "NULL cmd in slot %d, tasktype 0x%x", slot,
8944 tasktype));
8945 mptsas_dump_cmd(mpt, cmd);
8946 mptsas_remove_cmd(mpt, cmd);
8947 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8948 mptsas_doneq_add(mpt, cmd);
8949 }
8950 break;
8951 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8952 reason = CMD_ABORTED;
8953 stat = STAT_ABORTED;
8954 /*FALLTHROUGH*/
8955 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8956 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8957
8958 NDBG25(("mptsas_flush_target discovered non-"
8959 "NULL cmd in slot %d, tasktype 0x%x", slot,
8960 tasktype));
8961 mptsas_dump_cmd(mpt, cmd);
8962 mptsas_remove_cmd(mpt, cmd);
8963 mptsas_set_pkt_reason(mpt, cmd, reason,
8964 stat);
8965 mptsas_doneq_add(mpt, cmd);
8966 }
8967 break;
8968 default:
8969 break;
8970 }
8971 }
8972
8973 /*
8974 * Flush the waitq and tx_waitq of this target's cmds
8975 */
8976 cmd = mpt->m_waitq;
8977
8978 reason = CMD_RESET;
8979 stat = STAT_DEV_RESET;
8980
8981 switch (tasktype) {
8982 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8983 while (cmd != NULL) {
8984 next_cmd = cmd->cmd_linkp;
8985 if (Tgt(cmd) == target) {
8986 mptsas_waitq_delete(mpt, cmd);
8987 mptsas_set_pkt_reason(mpt, cmd,
8988 reason, stat);
8989 mptsas_doneq_add(mpt, cmd);
8990 }
8991 cmd = next_cmd;
8992 }
8993 mutex_enter(&mpt->m_tx_waitq_mutex);
8994 cmd = mpt->m_tx_waitq;
8995 while (cmd != NULL) {
8996 next_cmd = cmd->cmd_linkp;
8997 if (Tgt(cmd) == target) {
8998 mptsas_tx_waitq_delete(mpt, cmd);
8999 mutex_exit(&mpt->m_tx_waitq_mutex);
9000 mptsas_set_pkt_reason(mpt, cmd,
9001 reason, stat);
9002 mptsas_doneq_add(mpt, cmd);
9003 mutex_enter(&mpt->m_tx_waitq_mutex);
9004 }
9005 cmd = next_cmd;
9006 }
9007 mutex_exit(&mpt->m_tx_waitq_mutex);
9008 break;
9009 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9010 reason = CMD_ABORTED;
9011 stat = STAT_ABORTED;
9012 /*FALLTHROUGH*/
9013 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9014 while (cmd != NULL) {
9015 next_cmd = cmd->cmd_linkp;
9016 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9017 mptsas_waitq_delete(mpt, cmd);
9018 mptsas_set_pkt_reason(mpt, cmd,
9019 reason, stat);
9020 mptsas_doneq_add(mpt, cmd);
9021 }
9022 cmd = next_cmd;
9023 }
9024 mutex_enter(&mpt->m_tx_waitq_mutex);
9025 cmd = mpt->m_tx_waitq;
9026 while (cmd != NULL) {
9027 next_cmd = cmd->cmd_linkp;
9028 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9029 mptsas_tx_waitq_delete(mpt, cmd);
9030 mutex_exit(&mpt->m_tx_waitq_mutex);
9031 mptsas_set_pkt_reason(mpt, cmd,
9032 reason, stat);
9033 mptsas_doneq_add(mpt, cmd);
9034 mutex_enter(&mpt->m_tx_waitq_mutex);
9035 }
9036 cmd = next_cmd;
9037 }
9038 mutex_exit(&mpt->m_tx_waitq_mutex);
9039 break;
9040 default:
9041 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9042 tasktype);
9043 break;
9044 }
9045 }
9046
9047 /*
9048 * Clean up hba state, abort all outstanding command and commands in waitq
9049 * reset timeout of all targets.
9050 */
9051 static void
9052 mptsas_flush_hba(mptsas_t *mpt)
9053 {
9054 mptsas_slots_t *slots = mpt->m_active;
9055 mptsas_cmd_t *cmd;
9056 int slot;
9057
9058 NDBG25(("mptsas_flush_hba"));
9059
9060 /*
9061 * The I/O Controller should have already sent back
9062 * all commands via the scsi I/O reply frame. Make
9063 * sure all commands have been flushed.
9064 * Account for TM request, which use the last SMID.
9065 */
9066 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9067 if ((cmd = slots->m_slot[slot]) == NULL)
9068 continue;
9069
9070 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9071 /*
9072 * Need to make sure to tell everyone that might be
9073 * waiting on this command that it's going to fail. If
9074 * we get here, this command will never timeout because
9075 * the active command table is going to be re-allocated,
9076 * so there will be nothing to check against a time out.
9077 * Instead, mark the command as failed due to reset.
9078 */
9079 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9080 STAT_BUS_RESET);
9081 if ((cmd->cmd_flags &
9082 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9083 cmd->cmd_flags |= CFLAG_FINISHED;
9084 cv_broadcast(&mpt->m_passthru_cv);
9085 cv_broadcast(&mpt->m_config_cv);
9086 cv_broadcast(&mpt->m_fw_diag_cv);
9087 }
9088 continue;
9089 }
9090
9091 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9092 slot));
9093 mptsas_dump_cmd(mpt, cmd);
9094
9095 mptsas_remove_cmd(mpt, cmd);
9096 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9097 mptsas_doneq_add(mpt, cmd);
9098 }
9099
9100 /*
9101 * Flush the waitq.
9102 */
9103 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9104 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9105 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9106 (cmd->cmd_flags & CFLAG_CONFIG) ||
9107 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9108 cmd->cmd_flags |= CFLAG_FINISHED;
9109 cv_broadcast(&mpt->m_passthru_cv);
9110 cv_broadcast(&mpt->m_config_cv);
9111 cv_broadcast(&mpt->m_fw_diag_cv);
9112 } else {
9113 mptsas_doneq_add(mpt, cmd);
9114 }
9115 }
9116
9117 /*
9118 * Flush the tx_waitq
9119 */
9120 mutex_enter(&mpt->m_tx_waitq_mutex);
9121 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9122 mutex_exit(&mpt->m_tx_waitq_mutex);
9123 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9124 mptsas_doneq_add(mpt, cmd);
9125 mutex_enter(&mpt->m_tx_waitq_mutex);
9126 }
9127 mutex_exit(&mpt->m_tx_waitq_mutex);
9128
9129 /*
9130 * Drain the taskqs prior to reallocating resources.
9131 */
9132 mutex_exit(&mpt->m_mutex);
9133 ddi_taskq_wait(mpt->m_event_taskq);
9134 ddi_taskq_wait(mpt->m_dr_taskq);
9135 mutex_enter(&mpt->m_mutex);
9136 }
9137
9138 /*
9139 * set pkt_reason and OR in pkt_statistics flag
9140 */
9141 static void
9142 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9143 uint_t stat)
9144 {
9145 #ifndef __lock_lint
9146 _NOTE(ARGUNUSED(mpt))
9147 #endif
9148
9149 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9150 (void *)cmd, reason, stat));
9151
9152 if (cmd) {
9153 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9154 cmd->cmd_pkt->pkt_reason = reason;
9155 }
9156 cmd->cmd_pkt->pkt_statistics |= stat;
9157 }
9158 }
9159
9160 static void
9161 mptsas_start_watch_reset_delay()
9162 {
9163 NDBG22(("mptsas_start_watch_reset_delay"));
9164
9165 mutex_enter(&mptsas_global_mutex);
9166 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9167 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9168 drv_usectohz((clock_t)
9169 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9170 ASSERT(mptsas_reset_watch != NULL);
9171 }
9172 mutex_exit(&mptsas_global_mutex);
9173 }
9174
9175 static void
9176 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9177 {
9178 mptsas_target_t *ptgt = NULL;
9179
9180 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9181
9182 NDBG22(("mptsas_setup_bus_reset_delay"));
9183 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9184 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9185 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9186 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9187 }
9188
9189 mptsas_start_watch_reset_delay();
9190 }
9191
9192 /*
9193 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9194 * mpt instance for active reset delays
9195 */
9196 static void
9197 mptsas_watch_reset_delay(void *arg)
9198 {
9199 #ifndef __lock_lint
9200 _NOTE(ARGUNUSED(arg))
9201 #endif
9202
9203 mptsas_t *mpt;
9204 int not_done = 0;
9205
9206 NDBG22(("mptsas_watch_reset_delay"));
9207
9208 mutex_enter(&mptsas_global_mutex);
9209 mptsas_reset_watch = 0;
9210 mutex_exit(&mptsas_global_mutex);
9211 rw_enter(&mptsas_global_rwlock, RW_READER);
9212 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9213 if (mpt->m_tran == 0) {
9214 continue;
9215 }
9216 mutex_enter(&mpt->m_mutex);
9217 not_done += mptsas_watch_reset_delay_subr(mpt);
9218 mutex_exit(&mpt->m_mutex);
9219 }
9220 rw_exit(&mptsas_global_rwlock);
9221
9222 if (not_done) {
9223 mptsas_start_watch_reset_delay();
9224 }
9225 }
9226
9227 static int
9228 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9229 {
9230 int done = 0;
9231 int restart = 0;
9232 mptsas_target_t *ptgt = NULL;
9233
9234 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9235
9236 ASSERT(mutex_owned(&mpt->m_mutex));
9237
9238 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9239 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9240 if (ptgt->m_reset_delay != 0) {
9241 ptgt->m_reset_delay -=
9242 MPTSAS_WATCH_RESET_DELAY_TICK;
9243 if (ptgt->m_reset_delay <= 0) {
9244 ptgt->m_reset_delay = 0;
9245 mptsas_set_throttle(mpt, ptgt,
9246 MAX_THROTTLE);
9247 restart++;
9248 } else {
9249 done = -1;
9250 }
9251 }
9252 }
9253
9254 if (restart > 0) {
9255 mptsas_restart_hba(mpt);
9256 }
9257 return (done);
9258 }
9259
9260 #ifdef MPTSAS_TEST
9261 static void
9262 mptsas_test_reset(mptsas_t *mpt, int target)
9263 {
9264 mptsas_target_t *ptgt = NULL;
9265
9266 if (mptsas_rtest == target) {
9267 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9268 mptsas_rtest = -1;
9269 }
9270 if (mptsas_rtest == -1) {
9271 NDBG22(("mptsas_test_reset success"));
9272 }
9273 }
9274 }
9275 #endif
9276
9277 /*
9278 * abort handling:
9279 *
9280 * Notes:
9281 * - if pkt is not NULL, abort just that command
9282 * - if pkt is NULL, abort all outstanding commands for target
9283 */
9284 static int
9285 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9286 {
9287 mptsas_t *mpt = ADDR2MPT(ap);
9288 int rval;
9289 mptsas_tgt_private_t *tgt_private;
9290 int target, lun;
9291
9292 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9293 tran_tgt_private;
9294 ASSERT(tgt_private != NULL);
9295 target = tgt_private->t_private->m_devhdl;
9296 lun = tgt_private->t_lun;
9297
9298 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9299
9300 mutex_enter(&mpt->m_mutex);
9301 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9302 mutex_exit(&mpt->m_mutex);
9303 return (rval);
9304 }
9305
9306 static int
9307 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9308 {
9309 mptsas_cmd_t *sp = NULL;
9310 mptsas_slots_t *slots = mpt->m_active;
9311 int rval = FALSE;
9312
9313 ASSERT(mutex_owned(&mpt->m_mutex));
9314
9315 /*
9316 * Abort the command pkt on the target/lun in ap. If pkt is
9317 * NULL, abort all outstanding commands on that target/lun.
9318 * If you can abort them, return 1, else return 0.
9319 * Each packet that's aborted should be sent back to the target
9320 * driver through the callback routine, with pkt_reason set to
9321 * CMD_ABORTED.
9322 *
9323 * abort cmd pkt on HBA hardware; clean out of outstanding
9324 * command lists, etc.
9325 */
9326 if (pkt != NULL) {
9327 /* abort the specified packet */
9328 sp = PKT2CMD(pkt);
9329
9330 if (sp->cmd_queued) {
9331 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9332 (void *)sp));
9333 mptsas_waitq_delete(mpt, sp);
9334 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9335 STAT_ABORTED);
9336 mptsas_doneq_add(mpt, sp);
9337 rval = TRUE;
9338 goto done;
9339 }
9340
9341 /*
9342 * Have mpt firmware abort this command
9343 */
9344
9345 if (slots->m_slot[sp->cmd_slot] != NULL) {
9346 rval = mptsas_ioc_task_management(mpt,
9347 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9348 lun, NULL, 0, 0);
9349
9350 /*
9351 * The transport layer expects only TRUE and FALSE.
9352 * Therefore, if mptsas_ioc_task_management returns
9353 * FAILED we will return FALSE.
9354 */
9355 if (rval == FAILED)
9356 rval = FALSE;
9357 goto done;
9358 }
9359 }
9360
9361 /*
9362 * If pkt is NULL then abort task set
9363 */
9364 rval = mptsas_ioc_task_management(mpt,
9365 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9366
9367 /*
9368 * The transport layer expects only TRUE and FALSE.
9369 * Therefore, if mptsas_ioc_task_management returns
9370 * FAILED we will return FALSE.
9371 */
9372 if (rval == FAILED)
9373 rval = FALSE;
9374
9375 #ifdef MPTSAS_TEST
9376 if (rval && mptsas_test_stop) {
9377 debug_enter("mptsas_do_scsi_abort");
9378 }
9379 #endif
9380
9381 done:
9382 mptsas_doneq_empty(mpt);
9383 return (rval);
9384 }
9385
9386 /*
9387 * capability handling:
9388 * (*tran_getcap). Get the capability named, and return its value.
9389 */
9390 static int
9391 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9392 {
9393 mptsas_t *mpt = ADDR2MPT(ap);
9394 int ckey;
9395 int rval = FALSE;
9396
9397 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9398 ap->a_target, cap, tgtonly));
9399
9400 mutex_enter(&mpt->m_mutex);
9401
9402 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9403 mutex_exit(&mpt->m_mutex);
9404 return (UNDEFINED);
9405 }
9406
9407 switch (ckey) {
9408 case SCSI_CAP_DMA_MAX:
9409 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9410 break;
9411 case SCSI_CAP_ARQ:
9412 rval = TRUE;
9413 break;
9414 case SCSI_CAP_MSG_OUT:
9415 case SCSI_CAP_PARITY:
9416 case SCSI_CAP_UNTAGGED_QING:
9417 rval = TRUE;
9418 break;
9419 case SCSI_CAP_TAGGED_QING:
9420 rval = TRUE;
9421 break;
9422 case SCSI_CAP_RESET_NOTIFICATION:
9423 rval = TRUE;
9424 break;
9425 case SCSI_CAP_LINKED_CMDS:
9426 rval = FALSE;
9427 break;
9428 case SCSI_CAP_QFULL_RETRIES:
9429 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9430 tran_tgt_private))->t_private->m_qfull_retries;
9431 break;
9432 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9433 rval = drv_hztousec(((mptsas_tgt_private_t *)
9434 (ap->a_hba_tran->tran_tgt_private))->
9435 t_private->m_qfull_retry_interval) / 1000;
9436 break;
9437 case SCSI_CAP_CDB_LEN:
9438 rval = CDB_GROUP4;
9439 break;
9440 case SCSI_CAP_INTERCONNECT_TYPE:
9441 rval = INTERCONNECT_SAS;
9442 break;
9443 case SCSI_CAP_TRAN_LAYER_RETRIES:
9444 if (mpt->m_ioc_capabilities &
9445 MPI2_IOCFACTS_CAPABILITY_TLR)
9446 rval = TRUE;
9447 else
9448 rval = FALSE;
9449 break;
9450 default:
9451 rval = UNDEFINED;
9452 break;
9453 }
9454
9455 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9456
9457 mutex_exit(&mpt->m_mutex);
9458 return (rval);
9459 }
9460
9461 /*
9462 * (*tran_setcap). Set the capability named to the value given.
9463 */
9464 static int
9465 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9466 {
9467 mptsas_t *mpt = ADDR2MPT(ap);
9468 int ckey;
9469 int rval = FALSE;
9470
9471 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9472 ap->a_target, cap, value, tgtonly));
9473
9474 if (!tgtonly) {
9475 return (rval);
9476 }
9477
9478 mutex_enter(&mpt->m_mutex);
9479
9480 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9481 mutex_exit(&mpt->m_mutex);
9482 return (UNDEFINED);
9483 }
9484
9485 switch (ckey) {
9486 case SCSI_CAP_DMA_MAX:
9487 case SCSI_CAP_MSG_OUT:
9488 case SCSI_CAP_PARITY:
9489 case SCSI_CAP_INITIATOR_ID:
9490 case SCSI_CAP_LINKED_CMDS:
9491 case SCSI_CAP_UNTAGGED_QING:
9492 case SCSI_CAP_RESET_NOTIFICATION:
9493 /*
9494 * None of these are settable via
9495 * the capability interface.
9496 */
9497 break;
9498 case SCSI_CAP_ARQ:
9499 /*
9500 * We cannot turn off arq so return false if asked to
9501 */
9502 if (value) {
9503 rval = TRUE;
9504 } else {
9505 rval = FALSE;
9506 }
9507 break;
9508 case SCSI_CAP_TAGGED_QING:
9509 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9510 (ap->a_hba_tran->tran_tgt_private))->t_private,
9511 MAX_THROTTLE);
9512 rval = TRUE;
9513 break;
9514 case SCSI_CAP_QFULL_RETRIES:
9515 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9516 t_private->m_qfull_retries = (uchar_t)value;
9517 rval = TRUE;
9518 break;
9519 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9520 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9521 t_private->m_qfull_retry_interval =
9522 drv_usectohz(value * 1000);
9523 rval = TRUE;
9524 break;
9525 default:
9526 rval = UNDEFINED;
9527 break;
9528 }
9529 mutex_exit(&mpt->m_mutex);
9530 return (rval);
9531 }
9532
9533 /*
9534 * Utility routine for mptsas_ifsetcap/ifgetcap
9535 */
9536 /*ARGSUSED*/
9537 static int
9538 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9539 {
9540 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9541
9542 if (!cap)
9543 return (FALSE);
9544
9545 *cidxp = scsi_hba_lookup_capstr(cap);
9546 return (TRUE);
9547 }
9548
9549 static int
9550 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9551 {
9552 mptsas_slots_t *old_active = mpt->m_active;
9553 mptsas_slots_t *new_active;
9554 size_t size;
9555
9556 /*
9557 * if there are active commands, then we cannot
9558 * change size of active slots array.
9559 */
9560 ASSERT(mpt->m_ncmds == 0);
9561
9562 size = MPTSAS_SLOTS_SIZE(mpt);
9563 new_active = kmem_zalloc(size, flag);
9564 if (new_active == NULL) {
9565 NDBG1(("new active alloc failed"));
9566 return (-1);
9567 }
9568 /*
9569 * Since SMID 0 is reserved and the TM slot is reserved, the
9570 * number of slots that can be used at any one time is
9571 * m_max_requests - 2.
9572 */
9573 new_active->m_n_normal = (mpt->m_max_requests - 2);
9574 new_active->m_size = size;
9575 new_active->m_rotor = 1;
9576 if (old_active)
9577 mptsas_free_active_slots(mpt);
9578 mpt->m_active = new_active;
9579
9580 return (0);
9581 }
9582
9583 static void
9584 mptsas_free_active_slots(mptsas_t *mpt)
9585 {
9586 mptsas_slots_t *active = mpt->m_active;
9587 size_t size;
9588
9589 if (active == NULL)
9590 return;
9591 size = active->m_size;
9592 kmem_free(active, size);
9593 mpt->m_active = NULL;
9594 }
9595
9596 /*
9597 * Error logging, printing, and debug print routines.
9598 */
9599 static char *mptsas_label = "mpt_sas";
9600
9601 /*PRINTFLIKE3*/
9602 void
9603 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9604 {
9605 dev_info_t *dev;
9606 va_list ap;
9607
9608 if (mpt) {
9609 dev = mpt->m_dip;
9610 } else {
9611 dev = 0;
9612 }
9613
9614 mutex_enter(&mptsas_log_mutex);
9615
9616 va_start(ap, fmt);
9617 (void) vsprintf(mptsas_log_buf, fmt, ap);
9618 va_end(ap);
9619
9620 if (level == CE_CONT) {
9621 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9622 } else {
9623 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9624 }
9625
9626 mutex_exit(&mptsas_log_mutex);
9627 }
9628
9629 #ifdef MPTSAS_DEBUG
9630 /*PRINTFLIKE1*/
9631 void
9632 mptsas_printf(char *fmt, ...)
9633 {
9634 dev_info_t *dev = 0;
9635 va_list ap;
9636
9637 mutex_enter(&mptsas_log_mutex);
9638
9639 va_start(ap, fmt);
9640 (void) vsprintf(mptsas_log_buf, fmt, ap);
9641 va_end(ap);
9642
9643 #ifdef PROM_PRINTF
9644 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9645 #else
9646 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
9647 #endif
9648 mutex_exit(&mptsas_log_mutex);
9649 }
9650 #endif
9651
9652 /*
9653 * timeout handling
9654 */
9655 static void
9656 mptsas_watch(void *arg)
9657 {
9658 #ifndef __lock_lint
9659 _NOTE(ARGUNUSED(arg))
9660 #endif
9661
9662 mptsas_t *mpt;
9663 uint32_t doorbell;
9664
9665 NDBG30(("mptsas_watch"));
9666
9667 rw_enter(&mptsas_global_rwlock, RW_READER);
9668 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9669
9670 mutex_enter(&mpt->m_mutex);
9671
9672 /* Skip device if not powered on */
9673 if (mpt->m_options & MPTSAS_OPT_PM) {
9674 if (mpt->m_power_level == PM_LEVEL_D0) {
9675 (void) pm_busy_component(mpt->m_dip, 0);
9676 mpt->m_busy = 1;
9677 } else {
9678 mutex_exit(&mpt->m_mutex);
9679 continue;
9680 }
9681 }
9682
9683 /*
9684 * Check if controller is in a FAULT state. If so, reset it.
9685 */
9686 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9687 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9688 doorbell &= MPI2_DOORBELL_DATA_MASK;
9689 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9690 "code: %04x", doorbell);
9691 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9692 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9693 mptsas_log(mpt, CE_WARN, "Reset failed"
9694 "after fault was detected");
9695 }
9696 }
9697
9698 /*
9699 * For now, always call mptsas_watchsubr.
9700 */
9701 mptsas_watchsubr(mpt);
9702
9703 if (mpt->m_options & MPTSAS_OPT_PM) {
9704 mpt->m_busy = 0;
9705 (void) pm_idle_component(mpt->m_dip, 0);
9706 }
9707
9708 mutex_exit(&mpt->m_mutex);
9709 }
9710 rw_exit(&mptsas_global_rwlock);
9711
9712 mutex_enter(&mptsas_global_mutex);
9713 if (mptsas_timeouts_enabled)
9714 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9715 mutex_exit(&mptsas_global_mutex);
9716 }
9717
9718 static void
9719 mptsas_watchsubr(mptsas_t *mpt)
9720 {
9721 int i;
9722 mptsas_cmd_t *cmd;
9723 mptsas_target_t *ptgt = NULL;
9724 hrtime_t timestamp = gethrtime();
9725
9726 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9727
9728 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9729
9730 #ifdef MPTSAS_TEST
9731 if (mptsas_enable_untagged) {
9732 mptsas_test_untagged++;
9733 }
9734 #endif
9735
9736 /*
9737 * Check for commands stuck in active slot
9738 * Account for TM requests, which use the last SMID.
9739 */
9740 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9741 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9742 if (cmd->cmd_active_expiration <= timestamp) {
9743 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9744 /*
9745 * There seems to be a command stuck
9746 * in the active slot. Drain throttle.
9747 */
9748 mptsas_set_throttle(mpt,
9749 cmd->cmd_tgt_addr,
9750 DRAIN_THROTTLE);
9751 } else if (cmd->cmd_flags &
9752 (CFLAG_PASSTHRU | CFLAG_CONFIG |
9753 CFLAG_FW_DIAG)) {
9754 /*
9755 * passthrough command timeout
9756 */
9757 cmd->cmd_flags |= (CFLAG_FINISHED |
9758 CFLAG_TIMEOUT);
9759 cv_broadcast(&mpt->m_passthru_cv);
9760 cv_broadcast(&mpt->m_config_cv);
9761 cv_broadcast(&mpt->m_fw_diag_cv);
9762 }
9763 }
9764 }
9765 }
9766
9767 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9768 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9769 /*
9770 * If we were draining due to a qfull condition,
9771 * go back to full throttle.
9772 */
9773 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9774 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9775 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9776 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9777 mptsas_restart_hba(mpt);
9778 }
9779
9780 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
9781 if (cmd == NULL)
9782 continue;
9783
9784 if (cmd->cmd_active_expiration <= timestamp) {
9785 /*
9786 * Earliest command timeout expired. Drain throttle.
9787 */
9788 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9789
9790 /*
9791 * Check for remaining commands.
9792 */
9793 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
9794 if (cmd->cmd_active_expiration > timestamp) {
9795 /*
9796 * Wait for remaining commands to complete or
9797 * time out.
9798 */
9799 NDBG23(("command timed out, pending drain"));
9800 continue;
9801 }
9802
9803 /*
9804 * All command timeouts expired.
9805 */
9806 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
9807 "expired with %d commands on target %d lun %d.",
9808 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
9809 ptgt->m_devhdl, Lun(cmd));
9810
9811 mptsas_cmd_timeout(mpt, ptgt);
9812 } else if (cmd->cmd_active_expiration <=
9813 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
9814 NDBG23(("pending timeout"));
9815 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9816 }
9817 }
9818 }
9819
9820 /*
9821 * timeout recovery
9822 */
9823 static void
9824 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
9825 {
9826 uint16_t devhdl;
9827 uint64_t sas_wwn;
9828 uint8_t phy;
9829 char wwn_str[MPTSAS_WWN_STRLEN];
9830
9831 devhdl = ptgt->m_devhdl;
9832 sas_wwn = ptgt->m_addr.mta_wwn;
9833 phy = ptgt->m_phynum;
9834 if (sas_wwn == 0) {
9835 (void) sprintf(wwn_str, "p%x", phy);
9836 } else {
9837 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
9838 }
9839
9840 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9841 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9842 "target %d %s.", devhdl, wwn_str);
9843
9844 /*
9845 * Abort all outstanding commands on the device.
9846 */
9847 NDBG29(("mptsas_cmd_timeout: device reset"));
9848 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9849 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9850 "recovery failed!", devhdl);
9851 }
9852 }
9853
9854 /*
9855 * Device / Hotplug control
9856 */
9857 static int
9858 mptsas_scsi_quiesce(dev_info_t *dip)
9859 {
9860 mptsas_t *mpt;
9861 scsi_hba_tran_t *tran;
9862
9863 tran = ddi_get_driver_private(dip);
9864 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9865 return (-1);
9866
9867 return (mptsas_quiesce_bus(mpt));
9868 }
9869
9870 static int
9871 mptsas_scsi_unquiesce(dev_info_t *dip)
9872 {
9873 mptsas_t *mpt;
9874 scsi_hba_tran_t *tran;
9875
9876 tran = ddi_get_driver_private(dip);
9877 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9878 return (-1);
9879
9880 return (mptsas_unquiesce_bus(mpt));
9881 }
9882
9883 static int
9884 mptsas_quiesce_bus(mptsas_t *mpt)
9885 {
9886 mptsas_target_t *ptgt = NULL;
9887
9888 NDBG28(("mptsas_quiesce_bus"));
9889 mutex_enter(&mpt->m_mutex);
9890
9891 /* Set all the throttles to zero */
9892 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9893 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9894 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9895 }
9896
9897 /* If there are any outstanding commands in the queue */
9898 if (mpt->m_ncmds) {
9899 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9900 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9901 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9902 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9903 /*
9904 * Quiesce has been interrupted
9905 */
9906 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9907 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9908 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9909 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9910 }
9911 mptsas_restart_hba(mpt);
9912 if (mpt->m_quiesce_timeid != 0) {
9913 timeout_id_t tid = mpt->m_quiesce_timeid;
9914 mpt->m_quiesce_timeid = 0;
9915 mutex_exit(&mpt->m_mutex);
9916 (void) untimeout(tid);
9917 return (-1);
9918 }
9919 mutex_exit(&mpt->m_mutex);
9920 return (-1);
9921 } else {
9922 /* Bus has been quiesced */
9923 ASSERT(mpt->m_quiesce_timeid == 0);
9924 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9925 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9926 mutex_exit(&mpt->m_mutex);
9927 return (0);
9928 }
9929 }
9930 /* Bus was not busy - QUIESCED */
9931 mutex_exit(&mpt->m_mutex);
9932
9933 return (0);
9934 }
9935
9936 static int
9937 mptsas_unquiesce_bus(mptsas_t *mpt)
9938 {
9939 mptsas_target_t *ptgt = NULL;
9940
9941 NDBG28(("mptsas_unquiesce_bus"));
9942 mutex_enter(&mpt->m_mutex);
9943 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9944 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9945 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9946 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9947 }
9948 mptsas_restart_hba(mpt);
9949 mutex_exit(&mpt->m_mutex);
9950 return (0);
9951 }
9952
9953 static void
9954 mptsas_ncmds_checkdrain(void *arg)
9955 {
9956 mptsas_t *mpt = arg;
9957 mptsas_target_t *ptgt = NULL;
9958
9959 mutex_enter(&mpt->m_mutex);
9960 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9961 mpt->m_quiesce_timeid = 0;
9962 if (mpt->m_ncmds == 0) {
9963 /* Command queue has been drained */
9964 cv_signal(&mpt->m_cv);
9965 } else {
9966 /*
9967 * The throttle may have been reset because
9968 * of a SCSI bus reset
9969 */
9970 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9971 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9972 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9973 }
9974
9975 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9976 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9977 drv_usectohz(1000000)));
9978 }
9979 }
9980 mutex_exit(&mpt->m_mutex);
9981 }
9982
9983 /*ARGSUSED*/
9984 static void
9985 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9986 {
9987 int i;
9988 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9989 char buf[128];
9990
9991 buf[0] = '\0';
9992 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9993 Tgt(cmd), Lun(cmd)));
9994 (void) sprintf(&buf[0], "\tcdb=[");
9995 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9996 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9997 }
9998 (void) sprintf(&buf[strlen(buf)], " ]");
9999 NDBG25(("?%s\n", buf));
10000 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10001 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10002 cmd->cmd_pkt->pkt_state));
10003 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10004 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10005 }
10006
10007 static void
10008 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10009 pMpi2SGESimple64_t sgep)
10010 {
10011 uint32_t sge_flags;
10012 uint32_t data_size, dataout_size;
10013 ddi_dma_cookie_t data_cookie;
10014 ddi_dma_cookie_t dataout_cookie;
10015
10016 data_size = pt->data_size;
10017 dataout_size = pt->dataout_size;
10018 data_cookie = pt->data_cookie;
10019 dataout_cookie = pt->dataout_cookie;
10020
10021 if (dataout_size) {
10022 sge_flags = dataout_size |
10023 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10024 MPI2_SGE_FLAGS_END_OF_BUFFER |
10025 MPI2_SGE_FLAGS_HOST_TO_IOC |
10026 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10027 MPI2_SGE_FLAGS_SHIFT);
10028 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10029 ddi_put32(acc_hdl, &sgep->Address.Low,
10030 (uint32_t)(dataout_cookie.dmac_laddress & 0xffffffffull));
10031 ddi_put32(acc_hdl, &sgep->Address.High,
10032 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10033 sgep++;
10034 }
10035 sge_flags = data_size;
10036 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10037 MPI2_SGE_FLAGS_LAST_ELEMENT |
10038 MPI2_SGE_FLAGS_END_OF_BUFFER |
10039 MPI2_SGE_FLAGS_END_OF_LIST |
10040 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10041 MPI2_SGE_FLAGS_SHIFT);
10042 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10043 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10044 MPI2_SGE_FLAGS_SHIFT);
10045 } else {
10046 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10047 MPI2_SGE_FLAGS_SHIFT);
10048 }
10049 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10050 ddi_put32(acc_hdl, &sgep->Address.Low,
10051 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10052 ddi_put32(acc_hdl, &sgep->Address.High,
10053 (uint32_t)(data_cookie.dmac_laddress >> 32));
10054 }
10055
10056 static void
10057 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10058 pMpi2IeeeSgeSimple64_t ieeesgep)
10059 {
10060 uint8_t sge_flags;
10061 uint32_t data_size, dataout_size;
10062 ddi_dma_cookie_t data_cookie;
10063 ddi_dma_cookie_t dataout_cookie;
10064
10065 data_size = pt->data_size;
10066 dataout_size = pt->dataout_size;
10067 data_cookie = pt->data_cookie;
10068 dataout_cookie = pt->dataout_cookie;
10069
10070 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10071 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10072 if (dataout_size) {
10073 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10074 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10075 (uint32_t)(dataout_cookie.dmac_laddress &
10076 0xffffffffull));
10077 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10078 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10079 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10080 ieeesgep++;
10081 }
10082 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10083 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10084 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10085 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10086 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10087 (uint32_t)(data_cookie.dmac_laddress >> 32));
10088 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10089 }
10090
10091 static void
10092 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10093 {
10094 caddr_t memp;
10095 pMPI2RequestHeader_t request_hdrp;
10096 struct scsi_pkt *pkt = cmd->cmd_pkt;
10097 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10098 uint32_t request_size;
10099 uint32_t request_desc_low, request_desc_high = 0;
10100 uint64_t sense_bufp;
10101 uint8_t desc_type;
10102 uint8_t *request, function;
10103 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10104 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10105
10106 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10107
10108 request = pt->request;
10109 request_size = pt->request_size;
10110
10111 /*
10112 * Store the passthrough message in memory location
10113 * corresponding to our slot number
10114 */
10115 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
10116 request_hdrp = (pMPI2RequestHeader_t)memp;
10117 bzero(memp, mpt->m_req_frame_size);
10118
10119 bcopy(request, memp, request_size);
10120
10121 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10122 "size=%d, in %d, out %d", request_hdrp->Function,
10123 request_hdrp->MsgFlags, request_size,
10124 pt->data_size, pt->dataout_size));
10125
10126 /*
10127 * Add an SGE, even if the length is zero.
10128 */
10129 if (mpt->m_MPI25 && pt->simple == 0) {
10130 mptsas_passthru_ieee_sge(acc_hdl, pt,
10131 (pMpi2IeeeSgeSimple64_t)
10132 ((uint8_t *)request_hdrp + pt->sgl_offset));
10133 } else {
10134 mptsas_passthru_sge(acc_hdl, pt,
10135 (pMpi2SGESimple64_t)
10136 ((uint8_t *)request_hdrp + pt->sgl_offset));
10137 }
10138
10139 function = request_hdrp->Function;
10140 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10141 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10142 pMpi2SCSIIORequest_t scsi_io_req;
10143
10144 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10145 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10146 /*
10147 * Put SGE for data and data_out buffer at the end of
10148 * scsi_io_request message header.(64 bytes in total)
10149 * Following above SGEs, the residual space will be
10150 * used by sense data.
10151 */
10152 ddi_put8(acc_hdl,
10153 &scsi_io_req->SenseBufferLength,
10154 (uint8_t)(request_size - 64));
10155
10156 sense_bufp = (uint32_t)(mpt->m_req_frame_dma_addr +
10157 (mpt->m_req_frame_size * cmd->cmd_slot) & 0xffffffffull);
10158 sense_bufp += 64;
10159 ddi_put32(acc_hdl,
10160 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
10161
10162 /*
10163 * Set SGLOffset0 value
10164 */
10165 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10166 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10167
10168 /*
10169 * Setup descriptor info. RAID passthrough must use the
10170 * default request descriptor which is already set, so if this
10171 * is a SCSI IO request, change the descriptor to SCSI IO.
10172 */
10173 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10174 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10175 request_desc_high = (ddi_get16(acc_hdl,
10176 &scsi_io_req->DevHandle) << 16);
10177 }
10178 }
10179
10180 /*
10181 * We must wait till the message has been completed before
10182 * beginning the next message so we wait for this one to
10183 * finish.
10184 */
10185 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10186 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10187 cmd->cmd_rfm = NULL;
10188 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10189 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10190 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10191 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10192 }
10193 }
10194
10195 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10196 static mptsas_pre_f mpi_pre_ioc_facts;
10197 static mptsas_pre_f mpi_pre_port_facts;
10198 static mptsas_pre_f mpi_pre_fw_download;
10199 static mptsas_pre_f mpi_pre_fw_25_download;
10200 static mptsas_pre_f mpi_pre_fw_upload;
10201 static mptsas_pre_f mpi_pre_fw_25_upload;
10202 static mptsas_pre_f mpi_pre_sata_passthrough;
10203 static mptsas_pre_f mpi_pre_smp_passthrough;
10204 static mptsas_pre_f mpi_pre_config;
10205 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10206 static mptsas_pre_f mpi_pre_scsi_io_req;
10207
10208 /*
10209 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10210 */
10211 static void
10212 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10213 {
10214 pMpi2FWDownloadTCSGE_t tcsge;
10215 pMpi2FWDownloadRequest req;
10216
10217 /*
10218 * If SAS3, call separate function.
10219 */
10220 if (mpt->m_MPI25) {
10221 mpi_pre_fw_25_download(mpt, pt);
10222 return;
10223 }
10224
10225 /*
10226 * User requests should come in with the Transaction
10227 * context element where the SGL will go. Putting the
10228 * SGL after that seems to work, but don't really know
10229 * why. Other drivers tend to create an extra SGL and
10230 * refer to the TCE through that.
10231 */
10232 req = (pMpi2FWDownloadRequest)pt->request;
10233 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10234 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10235 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10236 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10237 }
10238
10239 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10240 sizeof (*tcsge);
10241 if (pt->request_size != pt->sgl_offset)
10242 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10243 "0x%x, should be 0x%x, dataoutsz 0x%x",
10244 (int)pt->request_size, (int)pt->sgl_offset,
10245 (int)pt->dataout_size));
10246 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10247 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10248 "0x%x, should be 0x%x", pt->data_size,
10249 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10250 }
10251
10252 /*
10253 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10254 */
10255 static void
10256 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10257 {
10258 pMpi2FWDownloadTCSGE_t tcsge;
10259 pMpi2FWDownloadRequest req2;
10260 pMpi25FWDownloadRequest req25;
10261
10262 /*
10263 * User requests should come in with the Transaction
10264 * context element where the SGL will go. The new firmware
10265 * Doesn't use TCE and has space in the main request for
10266 * this information. So move to the right place.
10267 */
10268 req2 = (pMpi2FWDownloadRequest)pt->request;
10269 req25 = (pMpi25FWDownloadRequest)pt->request;
10270 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10271 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10272 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10273 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10274 }
10275 req25->ImageOffset = tcsge->ImageOffset;
10276 req25->ImageSize = tcsge->ImageSize;
10277
10278 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10279 if (pt->request_size != pt->sgl_offset)
10280 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10281 "0x%x, should be 0x%x, dataoutsz 0x%x",
10282 pt->request_size, pt->sgl_offset,
10283 pt->dataout_size));
10284 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10285 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10286 "0x%x, should be 0x%x", pt->data_size,
10287 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10288 }
10289
10290 /*
10291 * Prepare the pt for a SAS2 FW_UPLOAD request.
10292 */
10293 static void
10294 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10295 {
10296 pMpi2FWUploadTCSGE_t tcsge;
10297 pMpi2FWUploadRequest_t req;
10298
10299 /*
10300 * If SAS3, call separate function.
10301 */
10302 if (mpt->m_MPI25) {
10303 mpi_pre_fw_25_upload(mpt, pt);
10304 return;
10305 }
10306
10307 /*
10308 * User requests should come in with the Transaction
10309 * context element where the SGL will go. Putting the
10310 * SGL after that seems to work, but don't really know
10311 * why. Other drivers tend to create an extra SGL and
10312 * refer to the TCE through that.
10313 */
10314 req = (pMpi2FWUploadRequest_t)pt->request;
10315 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10316 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10317 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10318 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10319 }
10320
10321 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10322 sizeof (*tcsge);
10323 if (pt->request_size != pt->sgl_offset)
10324 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10325 "0x%x, should be 0x%x, dataoutsz 0x%x",
10326 pt->request_size, pt->sgl_offset,
10327 pt->dataout_size));
10328 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10329 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10330 "0x%x, should be 0x%x", pt->data_size,
10331 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10332 }
10333
10334 /*
10335 * Prepare the pt a SAS3 FW_UPLOAD request.
10336 */
10337 static void
10338 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10339 {
10340 pMpi2FWUploadTCSGE_t tcsge;
10341 pMpi2FWUploadRequest_t req2;
10342 pMpi25FWUploadRequest_t req25;
10343
10344 /*
10345 * User requests should come in with the Transaction
10346 * context element where the SGL will go. The new firmware
10347 * Doesn't use TCE and has space in the main request for
10348 * this information. So move to the right place.
10349 */
10350 req2 = (pMpi2FWUploadRequest_t)pt->request;
10351 req25 = (pMpi25FWUploadRequest_t)pt->request;
10352 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10353 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10354 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10355 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10356 }
10357 req25->ImageOffset = tcsge->ImageOffset;
10358 req25->ImageSize = tcsge->ImageSize;
10359
10360 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10361 if (pt->request_size != pt->sgl_offset)
10362 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10363 "0x%x, should be 0x%x, dataoutsz 0x%x",
10364 pt->request_size, pt->sgl_offset,
10365 pt->dataout_size));
10366 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10367 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10368 "0x%x, should be 0x%x", pt->data_size,
10369 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10370 }
10371
10372 /*
10373 * Prepare the pt for an IOC_FACTS request.
10374 */
10375 static void
10376 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10377 {
10378 #ifndef __lock_lint
10379 _NOTE(ARGUNUSED(mpt))
10380 #endif
10381 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10382 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10383 "0x%x, should be 0x%x, dataoutsz 0x%x",
10384 pt->request_size,
10385 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10386 pt->dataout_size));
10387 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10388 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10389 "0x%x, should be 0x%x", pt->data_size,
10390 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10391 pt->sgl_offset = (uint16_t)pt->request_size;
10392 }
10393
10394 /*
10395 * Prepare the pt for a PORT_FACTS request.
10396 */
10397 static void
10398 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10399 {
10400 #ifndef __lock_lint
10401 _NOTE(ARGUNUSED(mpt))
10402 #endif
10403 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10404 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10405 "0x%x, should be 0x%x, dataoutsz 0x%x",
10406 pt->request_size,
10407 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10408 pt->dataout_size));
10409 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10410 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10411 "0x%x, should be 0x%x", pt->data_size,
10412 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10413 pt->sgl_offset = (uint16_t)pt->request_size;
10414 }
10415
10416 /*
10417 * Prepare pt for a SATA_PASSTHROUGH request.
10418 */
10419 static void
10420 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10421 {
10422 #ifndef __lock_lint
10423 _NOTE(ARGUNUSED(mpt))
10424 #endif
10425 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10426 if (pt->request_size != pt->sgl_offset)
10427 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10428 "0x%x, should be 0x%x, dataoutsz 0x%x",
10429 pt->request_size, pt->sgl_offset,
10430 pt->dataout_size));
10431 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10432 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10433 "0x%x, should be 0x%x", pt->data_size,
10434 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10435 }
10436
10437 static void
10438 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10439 {
10440 #ifndef __lock_lint
10441 _NOTE(ARGUNUSED(mpt))
10442 #endif
10443 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10444 if (pt->request_size != pt->sgl_offset)
10445 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10446 "0x%x, should be 0x%x, dataoutsz 0x%x",
10447 pt->request_size, pt->sgl_offset,
10448 pt->dataout_size));
10449 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10450 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10451 "0x%x, should be 0x%x", pt->data_size,
10452 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10453 }
10454
10455 /*
10456 * Prepare pt for a CONFIG request.
10457 */
10458 static void
10459 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10460 {
10461 #ifndef __lock_lint
10462 _NOTE(ARGUNUSED(mpt))
10463 #endif
10464 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10465 if (pt->request_size != pt->sgl_offset)
10466 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10467 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10468 pt->sgl_offset, pt->dataout_size));
10469 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10470 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10471 "should be 0x%x", pt->data_size,
10472 (int)sizeof (MPI2_CONFIG_REPLY)));
10473 pt->simple = 1;
10474 }
10475
10476 /*
10477 * Prepare pt for a SCSI_IO_REQ request.
10478 */
10479 static void
10480 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10481 {
10482 #ifndef __lock_lint
10483 _NOTE(ARGUNUSED(mpt))
10484 #endif
10485 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10486 if (pt->request_size != pt->sgl_offset)
10487 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10488 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10489 pt->sgl_offset,
10490 pt->dataout_size));
10491 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10492 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10493 "should be 0x%x", pt->data_size,
10494 (int)sizeof (MPI2_SCSI_IO_REPLY)));
10495 }
10496
10497 /*
10498 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10499 */
10500 static void
10501 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10502 {
10503 #ifndef __lock_lint
10504 _NOTE(ARGUNUSED(mpt))
10505 #endif
10506 pt->sgl_offset = (uint16_t)pt->request_size;
10507 }
10508
10509 /*
10510 * A set of functions to prepare an mptsas_cmd for the various
10511 * supported requests.
10512 */
10513 static struct mptsas_func {
10514 U8 Function;
10515 char *Name;
10516 mptsas_pre_f *f_pre;
10517 } mptsas_func_list[] = {
10518 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
10519 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
10520 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
10521 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
10522 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
10523 mpi_pre_sata_passthrough },
10524 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
10525 mpi_pre_smp_passthrough},
10526 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
10527 mpi_pre_scsi_io_req},
10528 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
10529 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
10530 mpi_pre_sas_io_unit_control },
10531 { 0xFF, NULL, NULL } /* list end */
10532 };
10533
10534 static void
10535 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
10536 {
10537 pMPI2RequestHeader_t hdr;
10538 struct mptsas_func *f;
10539
10540 hdr = (pMPI2RequestHeader_t)pt->request;
10541
10542 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
10543 if (hdr->Function == f->Function) {
10544 f->f_pre(mpt, pt);
10545 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
10546 " sgl_offset 0x%x", f->Name,
10547 pt->sgl_offset));
10548 return;
10549 }
10550 }
10551 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
10552 " returning req_size 0x%x for sgl_offset",
10553 hdr->Function, pt->request_size));
10554 pt->sgl_offset = (uint16_t)pt->request_size;
10555 }
10556
10557
10558 static int
10559 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10560 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10561 uint32_t data_size, uint8_t direction, uint8_t *dataout,
10562 uint32_t dataout_size, short timeout, int mode)
10563 {
10564 mptsas_pt_request_t pt;
10565 mptsas_dma_alloc_state_t data_dma_state;
10566 mptsas_dma_alloc_state_t dataout_dma_state;
10567 caddr_t memp;
10568 mptsas_cmd_t *cmd = NULL;
10569 struct scsi_pkt *pkt;
10570 uint32_t reply_len = 0, sense_len = 0;
10571 pMPI2RequestHeader_t request_hdrp;
10572 pMPI2RequestHeader_t request_msg;
10573 pMPI2DefaultReply_t reply_msg;
10574 Mpi2SCSIIOReply_t rep_msg;
10575 int i, status = 0, pt_flags = 0, rv = 0;
10576 int rvalue;
10577 uint8_t function;
10578
10579 ASSERT(mutex_owned(&mpt->m_mutex));
10580
10581 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10582 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10583 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10584
10585 mutex_exit(&mpt->m_mutex);
10586 /*
10587 * copy in the request buffer since it could be used by
10588 * another thread when the pt request into waitq
10589 */
10590 if (ddi_copyin(request, request_msg, request_size, mode)) {
10591 mutex_enter(&mpt->m_mutex);
10592 status = EFAULT;
10593 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10594 goto out;
10595 }
10596 mutex_enter(&mpt->m_mutex);
10597
10598 function = request_msg->Function;
10599 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10600 pMpi2SCSITaskManagementRequest_t task;
10601 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10602 mptsas_setup_bus_reset_delay(mpt);
10603 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10604 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10605 mode);
10606
10607 if (rv != TRUE) {
10608 status = EIO;
10609 mptsas_log(mpt, CE_WARN, "task management failed");
10610 }
10611 goto out;
10612 }
10613
10614 if (data_size != 0) {
10615 data_dma_state.size = data_size;
10616 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10617 status = ENOMEM;
10618 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10619 "resource");
10620 goto out;
10621 }
10622 pt_flags |= MPTSAS_DATA_ALLOCATED;
10623 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10624 mutex_exit(&mpt->m_mutex);
10625 for (i = 0; i < data_size; i++) {
10626 if (ddi_copyin(data + i, (uint8_t *)
10627 data_dma_state.memp + i, 1, mode)) {
10628 mutex_enter(&mpt->m_mutex);
10629 status = EFAULT;
10630 mptsas_log(mpt, CE_WARN, "failed to "
10631 "copy read data");
10632 goto out;
10633 }
10634 }
10635 mutex_enter(&mpt->m_mutex);
10636 }
10637 }
10638 else
10639 bzero(&data_dma_state, sizeof (data_dma_state));
10640
10641 if (dataout_size != 0) {
10642 dataout_dma_state.size = dataout_size;
10643 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10644 status = ENOMEM;
10645 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10646 "resource");
10647 goto out;
10648 }
10649 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10650 mutex_exit(&mpt->m_mutex);
10651 for (i = 0; i < dataout_size; i++) {
10652 if (ddi_copyin(dataout + i, (uint8_t *)
10653 dataout_dma_state.memp + i, 1, mode)) {
10654 mutex_enter(&mpt->m_mutex);
10655 mptsas_log(mpt, CE_WARN, "failed to copy out"
10656 " data");
10657 status = EFAULT;
10658 goto out;
10659 }
10660 }
10661 mutex_enter(&mpt->m_mutex);
10662 }
10663 else
10664 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
10665
10666 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10667 status = EAGAIN;
10668 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10669 goto out;
10670 }
10671 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10672
10673 bzero((caddr_t)cmd, sizeof (*cmd));
10674 bzero((caddr_t)pkt, scsi_pkt_size());
10675 bzero((caddr_t)&pt, sizeof (pt));
10676
10677 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10678
10679 pt.request = (uint8_t *)request_msg;
10680 pt.direction = direction;
10681 pt.simple = 0;
10682 pt.request_size = request_size;
10683 pt.data_size = data_size;
10684 pt.dataout_size = dataout_size;
10685 pt.data_cookie = data_dma_state.cookie;
10686 pt.dataout_cookie = dataout_dma_state.cookie;
10687 mptsas_prep_sgl_offset(mpt, &pt);
10688
10689 /*
10690 * Form a blank cmd/pkt to store the acknowledgement message
10691 */
10692 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10693 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10694 pkt->pkt_ha_private = (opaque_t)&pt;
10695 pkt->pkt_flags = FLAG_HEAD;
10696 pkt->pkt_time = timeout;
10697 cmd->cmd_pkt = pkt;
10698 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10699
10700 /*
10701 * Save the command in a slot
10702 */
10703 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10704 /*
10705 * Once passthru command get slot, set cmd_flags
10706 * CFLAG_PREPARED.
10707 */
10708 cmd->cmd_flags |= CFLAG_PREPARED;
10709 mptsas_start_passthru(mpt, cmd);
10710 } else {
10711 mptsas_waitq_add(mpt, cmd);
10712 }
10713
10714 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10715 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10716 }
10717
10718 if (cmd->cmd_flags & CFLAG_PREPARED) {
10719 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10720 cmd->cmd_slot);
10721 request_hdrp = (pMPI2RequestHeader_t)memp;
10722 }
10723
10724 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10725 status = ETIMEDOUT;
10726 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10727 pt_flags |= MPTSAS_CMD_TIMEOUT;
10728 goto out;
10729 }
10730
10731 if (cmd->cmd_rfm) {
10732 /*
10733 * cmd_rfm is zero means the command reply is a CONTEXT
10734 * reply and no PCI Write to post the free reply SMFA
10735 * because no reply message frame is used.
10736 * cmd_rfm is non-zero means the reply is a ADDRESS
10737 * reply and reply message frame is used.
10738 */
10739 pt_flags |= MPTSAS_ADDRESS_REPLY;
10740 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10741 DDI_DMA_SYNC_FORCPU);
10742 reply_msg = (pMPI2DefaultReply_t)
10743 (mpt->m_reply_frame + (cmd->cmd_rfm -
10744 mpt->m_reply_frame_dma_addr));
10745 }
10746
10747 mptsas_fma_check(mpt, cmd);
10748 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10749 status = EAGAIN;
10750 mptsas_log(mpt, CE_WARN, "passthru fma error");
10751 goto out;
10752 }
10753 if (pkt->pkt_reason == CMD_RESET) {
10754 status = EAGAIN;
10755 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10756 goto out;
10757 }
10758
10759 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10760 status = EIO;
10761 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10762 goto out;
10763 }
10764
10765 mutex_exit(&mpt->m_mutex);
10766 if (cmd->cmd_flags & CFLAG_PREPARED) {
10767 function = request_hdrp->Function;
10768 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10769 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10770 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10771 sense_len = reply_size - reply_len;
10772 } else {
10773 reply_len = reply_size;
10774 sense_len = 0;
10775 }
10776
10777 for (i = 0; i < reply_len; i++) {
10778 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10779 mode)) {
10780 mutex_enter(&mpt->m_mutex);
10781 status = EFAULT;
10782 mptsas_log(mpt, CE_WARN, "failed to copy out "
10783 "reply data");
10784 goto out;
10785 }
10786 }
10787 for (i = 0; i < sense_len; i++) {
10788 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10789 reply + reply_len + i, 1, mode)) {
10790 mutex_enter(&mpt->m_mutex);
10791 status = EFAULT;
10792 mptsas_log(mpt, CE_WARN, "failed to copy out "
10793 "sense data");
10794 goto out;
10795 }
10796 }
10797 }
10798
10799 if (data_size) {
10800 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10801 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10802 DDI_DMA_SYNC_FORCPU);
10803 for (i = 0; i < data_size; i++) {
10804 if (ddi_copyout((uint8_t *)(
10805 data_dma_state.memp + i), data + i, 1,
10806 mode)) {
10807 mutex_enter(&mpt->m_mutex);
10808 status = EFAULT;
10809 mptsas_log(mpt, CE_WARN, "failed to "
10810 "copy out the reply data");
10811 goto out;
10812 }
10813 }
10814 }
10815 }
10816 mutex_enter(&mpt->m_mutex);
10817 out:
10818 /*
10819 * Put the reply frame back on the free queue, increment the free
10820 * index, and write the new index to the free index register. But only
10821 * if this reply is an ADDRESS reply.
10822 */
10823 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10824 ddi_put32(mpt->m_acc_free_queue_hdl,
10825 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10826 cmd->cmd_rfm);
10827 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10828 DDI_DMA_SYNC_FORDEV);
10829 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10830 mpt->m_free_index = 0;
10831 }
10832 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10833 mpt->m_free_index);
10834 }
10835 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10836 mptsas_remove_cmd(mpt, cmd);
10837 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10838 }
10839 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10840 mptsas_return_to_pool(mpt, cmd);
10841 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10842 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10843 DDI_SUCCESS) {
10844 ddi_fm_service_impact(mpt->m_dip,
10845 DDI_SERVICE_UNAFFECTED);
10846 status = EFAULT;
10847 }
10848 mptsas_dma_free(&data_dma_state);
10849 }
10850 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10851 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10852 DDI_SUCCESS) {
10853 ddi_fm_service_impact(mpt->m_dip,
10854 DDI_SERVICE_UNAFFECTED);
10855 status = EFAULT;
10856 }
10857 mptsas_dma_free(&dataout_dma_state);
10858 }
10859 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10860 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10861 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10862 }
10863 }
10864 if (request_msg)
10865 kmem_free(request_msg, request_size);
10866
10867 return (status);
10868 }
10869
10870 static int
10871 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10872 {
10873 /*
10874 * If timeout is 0, set timeout to default of 60 seconds.
10875 */
10876 if (data->Timeout == 0) {
10877 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10878 }
10879
10880 if (((data->DataSize == 0) &&
10881 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10882 ((data->DataSize != 0) &&
10883 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10884 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10885 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10886 (data->DataOutSize != 0))))) {
10887 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10888 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10889 } else {
10890 data->DataOutSize = 0;
10891 }
10892 /*
10893 * Send passthru request messages
10894 */
10895 return (mptsas_do_passthru(mpt,
10896 (uint8_t *)((uintptr_t)data->PtrRequest),
10897 (uint8_t *)((uintptr_t)data->PtrReply),
10898 (uint8_t *)((uintptr_t)data->PtrData),
10899 data->RequestSize, data->ReplySize,
10900 data->DataSize, (uint8_t)data->DataDirection,
10901 (uint8_t *)((uintptr_t)data->PtrDataOut),
10902 data->DataOutSize, data->Timeout, mode));
10903 } else {
10904 return (EINVAL);
10905 }
10906 }
10907
10908 static uint8_t
10909 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10910 {
10911 uint8_t index;
10912
10913 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10914 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10915 return (index);
10916 }
10917 }
10918
10919 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10920 }
10921
10922 static void
10923 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10924 {
10925 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10926 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10927 struct scsi_pkt *pkt = cmd->cmd_pkt;
10928 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10929 uint32_t request_desc_low, i;
10930
10931 ASSERT(mutex_owned(&mpt->m_mutex));
10932
10933 /*
10934 * Form the diag message depending on the post or release function.
10935 */
10936 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10937 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10938 (mpt->m_req_frame + (mpt->m_req_frame_size *
10939 cmd->cmd_slot));
10940 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10941 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10942 diag->function);
10943 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10944 diag->pBuffer->buffer_type);
10945 ddi_put8(mpt->m_acc_req_frame_hdl,
10946 &pDiag_post_msg->ExtendedType,
10947 diag->pBuffer->extended_type);
10948 ddi_put32(mpt->m_acc_req_frame_hdl,
10949 &pDiag_post_msg->BufferLength,
10950 diag->pBuffer->buffer_data.size);
10951 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10952 i++) {
10953 ddi_put32(mpt->m_acc_req_frame_hdl,
10954 &pDiag_post_msg->ProductSpecific[i],
10955 diag->pBuffer->product_specific[i]);
10956 }
10957 ddi_put32(mpt->m_acc_req_frame_hdl,
10958 &pDiag_post_msg->BufferAddress.Low,
10959 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10960 & 0xffffffffull));
10961 ddi_put32(mpt->m_acc_req_frame_hdl,
10962 &pDiag_post_msg->BufferAddress.High,
10963 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10964 >> 32));
10965 } else {
10966 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10967 (mpt->m_req_frame + (mpt->m_req_frame_size *
10968 cmd->cmd_slot));
10969 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10970 ddi_put8(mpt->m_acc_req_frame_hdl,
10971 &pDiag_release_msg->Function, diag->function);
10972 ddi_put8(mpt->m_acc_req_frame_hdl,
10973 &pDiag_release_msg->BufferType,
10974 diag->pBuffer->buffer_type);
10975 }
10976
10977 /*
10978 * Send the message
10979 */
10980 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10981 DDI_DMA_SYNC_FORDEV);
10982 request_desc_low = (cmd->cmd_slot << 16) +
10983 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10984 cmd->cmd_rfm = NULL;
10985 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10986 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10987 DDI_SUCCESS) ||
10988 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10989 DDI_SUCCESS)) {
10990 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10991 }
10992 }
10993
10994 static int
10995 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10996 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10997 {
10998 mptsas_diag_request_t diag;
10999 int status, slot_num, post_flags = 0;
11000 mptsas_cmd_t *cmd = NULL;
11001 struct scsi_pkt *pkt;
11002 pMpi2DiagBufferPostReply_t reply;
11003 uint16_t iocstatus;
11004 uint32_t iocloginfo, transfer_length;
11005
11006 /*
11007 * If buffer is not enabled, just leave.
11008 */
11009 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11010 if (!pBuffer->enabled) {
11011 status = DDI_FAILURE;
11012 goto out;
11013 }
11014
11015 /*
11016 * Clear some flags initially.
11017 */
11018 pBuffer->force_release = FALSE;
11019 pBuffer->valid_data = FALSE;
11020 pBuffer->owned_by_firmware = FALSE;
11021
11022 /*
11023 * Get a cmd buffer from the cmd buffer pool
11024 */
11025 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11026 status = DDI_FAILURE;
11027 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11028 goto out;
11029 }
11030 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11031
11032 bzero((caddr_t)cmd, sizeof (*cmd));
11033 bzero((caddr_t)pkt, scsi_pkt_size());
11034
11035 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11036
11037 diag.pBuffer = pBuffer;
11038 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11039
11040 /*
11041 * Form a blank cmd/pkt to store the acknowledgement message
11042 */
11043 pkt->pkt_ha_private = (opaque_t)&diag;
11044 pkt->pkt_flags = FLAG_HEAD;
11045 pkt->pkt_time = 60;
11046 cmd->cmd_pkt = pkt;
11047 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11048
11049 /*
11050 * Save the command in a slot
11051 */
11052 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11053 /*
11054 * Once passthru command get slot, set cmd_flags
11055 * CFLAG_PREPARED.
11056 */
11057 cmd->cmd_flags |= CFLAG_PREPARED;
11058 mptsas_start_diag(mpt, cmd);
11059 } else {
11060 mptsas_waitq_add(mpt, cmd);
11061 }
11062
11063 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11064 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11065 }
11066
11067 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11068 status = DDI_FAILURE;
11069 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11070 goto out;
11071 }
11072
11073 /*
11074 * cmd_rfm points to the reply message if a reply was given. Check the
11075 * IOCStatus to make sure everything went OK with the FW diag request
11076 * and set buffer flags.
11077 */
11078 if (cmd->cmd_rfm) {
11079 post_flags |= MPTSAS_ADDRESS_REPLY;
11080 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11081 DDI_DMA_SYNC_FORCPU);
11082 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11083 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11084
11085 /*
11086 * Get the reply message data
11087 */
11088 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11089 &reply->IOCStatus);
11090 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11091 &reply->IOCLogInfo);
11092 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11093 &reply->TransferLength);
11094
11095 /*
11096 * If post failed quit.
11097 */
11098 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11099 status = DDI_FAILURE;
11100 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11101 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11102 iocloginfo, transfer_length));
11103 goto out;
11104 }
11105
11106 /*
11107 * Post was successful.
11108 */
11109 pBuffer->valid_data = TRUE;
11110 pBuffer->owned_by_firmware = TRUE;
11111 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11112 status = DDI_SUCCESS;
11113 }
11114
11115 out:
11116 /*
11117 * Put the reply frame back on the free queue, increment the free
11118 * index, and write the new index to the free index register. But only
11119 * if this reply is an ADDRESS reply.
11120 */
11121 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11122 ddi_put32(mpt->m_acc_free_queue_hdl,
11123 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11124 cmd->cmd_rfm);
11125 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11126 DDI_DMA_SYNC_FORDEV);
11127 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11128 mpt->m_free_index = 0;
11129 }
11130 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11131 mpt->m_free_index);
11132 }
11133 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11134 mptsas_remove_cmd(mpt, cmd);
11135 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11136 }
11137 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11138 mptsas_return_to_pool(mpt, cmd);
11139 }
11140
11141 return (status);
11142 }
11143
11144 static int
11145 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11146 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11147 uint32_t diag_type)
11148 {
11149 mptsas_diag_request_t diag;
11150 int status, slot_num, rel_flags = 0;
11151 mptsas_cmd_t *cmd = NULL;
11152 struct scsi_pkt *pkt;
11153 pMpi2DiagReleaseReply_t reply;
11154 uint16_t iocstatus;
11155 uint32_t iocloginfo;
11156
11157 /*
11158 * If buffer is not enabled, just leave.
11159 */
11160 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11161 if (!pBuffer->enabled) {
11162 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11163 "by the IOC");
11164 status = DDI_FAILURE;
11165 goto out;
11166 }
11167
11168 /*
11169 * Clear some flags initially.
11170 */
11171 pBuffer->force_release = FALSE;
11172 pBuffer->valid_data = FALSE;
11173 pBuffer->owned_by_firmware = FALSE;
11174
11175 /*
11176 * Get a cmd buffer from the cmd buffer pool
11177 */
11178 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11179 status = DDI_FAILURE;
11180 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11181 "Diag");
11182 goto out;
11183 }
11184 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11185
11186 bzero((caddr_t)cmd, sizeof (*cmd));
11187 bzero((caddr_t)pkt, scsi_pkt_size());
11188
11189 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11190
11191 diag.pBuffer = pBuffer;
11192 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11193
11194 /*
11195 * Form a blank cmd/pkt to store the acknowledgement message
11196 */
11197 pkt->pkt_ha_private = (opaque_t)&diag;
11198 pkt->pkt_flags = FLAG_HEAD;
11199 pkt->pkt_time = 60;
11200 cmd->cmd_pkt = pkt;
11201 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11202
11203 /*
11204 * Save the command in a slot
11205 */
11206 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11207 /*
11208 * Once passthru command get slot, set cmd_flags
11209 * CFLAG_PREPARED.
11210 */
11211 cmd->cmd_flags |= CFLAG_PREPARED;
11212 mptsas_start_diag(mpt, cmd);
11213 } else {
11214 mptsas_waitq_add(mpt, cmd);
11215 }
11216
11217 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11218 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11219 }
11220
11221 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11222 status = DDI_FAILURE;
11223 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11224 goto out;
11225 }
11226
11227 /*
11228 * cmd_rfm points to the reply message if a reply was given. Check the
11229 * IOCStatus to make sure everything went OK with the FW diag request
11230 * and set buffer flags.
11231 */
11232 if (cmd->cmd_rfm) {
11233 rel_flags |= MPTSAS_ADDRESS_REPLY;
11234 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11235 DDI_DMA_SYNC_FORCPU);
11236 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11237 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11238
11239 /*
11240 * Get the reply message data
11241 */
11242 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11243 &reply->IOCStatus);
11244 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11245 &reply->IOCLogInfo);
11246
11247 /*
11248 * If release failed quit.
11249 */
11250 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11251 pBuffer->owned_by_firmware) {
11252 status = DDI_FAILURE;
11253 NDBG13(("release FW Diag Buffer failed: "
11254 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11255 iocloginfo));
11256 goto out;
11257 }
11258
11259 /*
11260 * Release was successful.
11261 */
11262 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11263 status = DDI_SUCCESS;
11264
11265 /*
11266 * If this was for an UNREGISTER diag type command, clear the
11267 * unique ID.
11268 */
11269 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11270 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11271 }
11272 }
11273
11274 out:
11275 /*
11276 * Put the reply frame back on the free queue, increment the free
11277 * index, and write the new index to the free index register. But only
11278 * if this reply is an ADDRESS reply.
11279 */
11280 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11281 ddi_put32(mpt->m_acc_free_queue_hdl,
11282 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11283 cmd->cmd_rfm);
11284 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11285 DDI_DMA_SYNC_FORDEV);
11286 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11287 mpt->m_free_index = 0;
11288 }
11289 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11290 mpt->m_free_index);
11291 }
11292 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11293 mptsas_remove_cmd(mpt, cmd);
11294 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11295 }
11296 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11297 mptsas_return_to_pool(mpt, cmd);
11298 }
11299
11300 return (status);
11301 }
11302
11303 static int
11304 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11305 uint32_t *return_code)
11306 {
11307 mptsas_fw_diagnostic_buffer_t *pBuffer;
11308 uint8_t extended_type, buffer_type, i;
11309 uint32_t buffer_size;
11310 uint32_t unique_id;
11311 int status;
11312
11313 ASSERT(mutex_owned(&mpt->m_mutex));
11314
11315 extended_type = diag_register->ExtendedType;
11316 buffer_type = diag_register->BufferType;
11317 buffer_size = diag_register->RequestedBufferSize;
11318 unique_id = diag_register->UniqueId;
11319
11320 /*
11321 * Check for valid buffer type
11322 */
11323 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11324 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11325 return (DDI_FAILURE);
11326 }
11327
11328 /*
11329 * Get the current buffer and look up the unique ID. The unique ID
11330 * should not be found. If it is, the ID is already in use.
11331 */
11332 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11333 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11334 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11335 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11336 return (DDI_FAILURE);
11337 }
11338
11339 /*
11340 * The buffer's unique ID should not be registered yet, and the given
11341 * unique ID cannot be 0.
11342 */
11343 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11344 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11345 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11346 return (DDI_FAILURE);
11347 }
11348
11349 /*
11350 * If this buffer is already posted as immediate, just change owner.
11351 */
11352 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11353 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11354 pBuffer->immediate = FALSE;
11355 pBuffer->unique_id = unique_id;
11356 return (DDI_SUCCESS);
11357 }
11358
11359 /*
11360 * Post a new buffer after checking if it's enabled. The DMA buffer
11361 * that is allocated will be contiguous (sgl_len = 1).
11362 */
11363 if (!pBuffer->enabled) {
11364 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11365 return (DDI_FAILURE);
11366 }
11367 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11368 pBuffer->buffer_data.size = buffer_size;
11369 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11370 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11371 "diag buffer: size = %d bytes", buffer_size);
11372 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11373 return (DDI_FAILURE);
11374 }
11375
11376 /*
11377 * Copy the given info to the diag buffer and post the buffer.
11378 */
11379 pBuffer->buffer_type = buffer_type;
11380 pBuffer->immediate = FALSE;
11381 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11382 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11383 i++) {
11384 pBuffer->product_specific[i] =
11385 diag_register->ProductSpecific[i];
11386 }
11387 }
11388 pBuffer->extended_type = extended_type;
11389 pBuffer->unique_id = unique_id;
11390 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11391
11392 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11393 DDI_SUCCESS) {
11394 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11395 "mptsas_diag_register.");
11396 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11397 status = DDI_FAILURE;
11398 }
11399
11400 /*
11401 * In case there was a failure, free the DMA buffer.
11402 */
11403 if (status == DDI_FAILURE) {
11404 mptsas_dma_free(&pBuffer->buffer_data);
11405 }
11406
11407 return (status);
11408 }
11409
11410 static int
11411 mptsas_diag_unregister(mptsas_t *mpt,
11412 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11413 {
11414 mptsas_fw_diagnostic_buffer_t *pBuffer;
11415 uint8_t i;
11416 uint32_t unique_id;
11417 int status;
11418
11419 ASSERT(mutex_owned(&mpt->m_mutex));
11420
11421 unique_id = diag_unregister->UniqueId;
11422
11423 /*
11424 * Get the current buffer and look up the unique ID. The unique ID
11425 * should be there.
11426 */
11427 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11428 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11429 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11430 return (DDI_FAILURE);
11431 }
11432
11433 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11434
11435 /*
11436 * Try to release the buffer from FW before freeing it. If release
11437 * fails, don't free the DMA buffer in case FW tries to access it
11438 * later. If buffer is not owned by firmware, can't release it.
11439 */
11440 if (!pBuffer->owned_by_firmware) {
11441 status = DDI_SUCCESS;
11442 } else {
11443 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11444 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11445 }
11446
11447 /*
11448 * At this point, return the current status no matter what happens with
11449 * the DMA buffer.
11450 */
11451 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11452 if (status == DDI_SUCCESS) {
11453 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11454 DDI_SUCCESS) {
11455 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11456 "in mptsas_diag_unregister.");
11457 ddi_fm_service_impact(mpt->m_dip,
11458 DDI_SERVICE_UNAFFECTED);
11459 }
11460 mptsas_dma_free(&pBuffer->buffer_data);
11461 }
11462
11463 return (status);
11464 }
11465
11466 static int
11467 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11468 uint32_t *return_code)
11469 {
11470 mptsas_fw_diagnostic_buffer_t *pBuffer;
11471 uint8_t i;
11472 uint32_t unique_id;
11473
11474 ASSERT(mutex_owned(&mpt->m_mutex));
11475
11476 unique_id = diag_query->UniqueId;
11477
11478 /*
11479 * If ID is valid, query on ID.
11480 * If ID is invalid, query on buffer type.
11481 */
11482 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11483 i = diag_query->BufferType;
11484 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11485 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11486 return (DDI_FAILURE);
11487 }
11488 } else {
11489 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11490 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11491 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11492 return (DDI_FAILURE);
11493 }
11494 }
11495
11496 /*
11497 * Fill query structure with the diag buffer info.
11498 */
11499 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11500 diag_query->BufferType = pBuffer->buffer_type;
11501 diag_query->ExtendedType = pBuffer->extended_type;
11502 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11503 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11504 i++) {
11505 diag_query->ProductSpecific[i] =
11506 pBuffer->product_specific[i];
11507 }
11508 }
11509 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11510 diag_query->DriverAddedBufferSize = 0;
11511 diag_query->UniqueId = pBuffer->unique_id;
11512 diag_query->ApplicationFlags = 0;
11513 diag_query->DiagnosticFlags = 0;
11514
11515 /*
11516 * Set/Clear application flags
11517 */
11518 if (pBuffer->immediate) {
11519 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11520 } else {
11521 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11522 }
11523 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11524 diag_query->ApplicationFlags |=
11525 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11526 } else {
11527 diag_query->ApplicationFlags &=
11528 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11529 }
11530 if (pBuffer->owned_by_firmware) {
11531 diag_query->ApplicationFlags |=
11532 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11533 } else {
11534 diag_query->ApplicationFlags &=
11535 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11536 }
11537
11538 return (DDI_SUCCESS);
11539 }
11540
11541 static int
11542 mptsas_diag_read_buffer(mptsas_t *mpt,
11543 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11544 uint32_t *return_code, int ioctl_mode)
11545 {
11546 mptsas_fw_diagnostic_buffer_t *pBuffer;
11547 uint8_t i, *pData;
11548 uint32_t unique_id, byte;
11549 int status;
11550
11551 ASSERT(mutex_owned(&mpt->m_mutex));
11552
11553 unique_id = diag_read_buffer->UniqueId;
11554
11555 /*
11556 * Get the current buffer and look up the unique ID. The unique ID
11557 * should be there.
11558 */
11559 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11560 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11561 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11562 return (DDI_FAILURE);
11563 }
11564
11565 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11566
11567 /*
11568 * Make sure requested read is within limits
11569 */
11570 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11571 pBuffer->buffer_data.size) {
11572 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11573 return (DDI_FAILURE);
11574 }
11575
11576 /*
11577 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11578 * buffer that was allocated is one contiguous buffer.
11579 */
11580 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11581 diag_read_buffer->StartingOffset);
11582 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11583 DDI_DMA_SYNC_FORCPU);
11584 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11585 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11586 != 0) {
11587 return (DDI_FAILURE);
11588 }
11589 }
11590 diag_read_buffer->Status = 0;
11591
11592 /*
11593 * Set or clear the Force Release flag.
11594 */
11595 if (pBuffer->force_release) {
11596 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11597 } else {
11598 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11599 }
11600
11601 /*
11602 * If buffer is to be reregistered, make sure it's not already owned by
11603 * firmware first.
11604 */
11605 status = DDI_SUCCESS;
11606 if (!pBuffer->owned_by_firmware) {
11607 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11608 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11609 return_code);
11610 }
11611 }
11612
11613 return (status);
11614 }
11615
11616 static int
11617 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11618 uint32_t *return_code)
11619 {
11620 mptsas_fw_diagnostic_buffer_t *pBuffer;
11621 uint8_t i;
11622 uint32_t unique_id;
11623 int status;
11624
11625 ASSERT(mutex_owned(&mpt->m_mutex));
11626
11627 unique_id = diag_release->UniqueId;
11628
11629 /*
11630 * Get the current buffer and look up the unique ID. The unique ID
11631 * should be there.
11632 */
11633 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11634 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11635 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11636 return (DDI_FAILURE);
11637 }
11638
11639 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11640
11641 /*
11642 * If buffer is not owned by firmware, it's already been released.
11643 */
11644 if (!pBuffer->owned_by_firmware) {
11645 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11646 return (DDI_FAILURE);
11647 }
11648
11649 /*
11650 * Release the buffer.
11651 */
11652 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11653 MPTSAS_FW_DIAG_TYPE_RELEASE);
11654 return (status);
11655 }
11656
11657 static int
11658 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11659 uint32_t length, uint32_t *return_code, int ioctl_mode)
11660 {
11661 mptsas_fw_diag_register_t diag_register;
11662 mptsas_fw_diag_unregister_t diag_unregister;
11663 mptsas_fw_diag_query_t diag_query;
11664 mptsas_diag_read_buffer_t diag_read_buffer;
11665 mptsas_fw_diag_release_t diag_release;
11666 int status = DDI_SUCCESS;
11667 uint32_t original_return_code, read_buf_len;
11668
11669 ASSERT(mutex_owned(&mpt->m_mutex));
11670
11671 original_return_code = *return_code;
11672 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11673
11674 switch (action) {
11675 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11676 if (!length) {
11677 *return_code =
11678 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11679 status = DDI_FAILURE;
11680 break;
11681 }
11682 if (ddi_copyin(diag_action, &diag_register,
11683 sizeof (diag_register), ioctl_mode) != 0) {
11684 return (DDI_FAILURE);
11685 }
11686 status = mptsas_diag_register(mpt, &diag_register,
11687 return_code);
11688 break;
11689
11690 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11691 if (length < sizeof (diag_unregister)) {
11692 *return_code =
11693 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11694 status = DDI_FAILURE;
11695 break;
11696 }
11697 if (ddi_copyin(diag_action, &diag_unregister,
11698 sizeof (diag_unregister), ioctl_mode) != 0) {
11699 return (DDI_FAILURE);
11700 }
11701 status = mptsas_diag_unregister(mpt, &diag_unregister,
11702 return_code);
11703 break;
11704
11705 case MPTSAS_FW_DIAG_TYPE_QUERY:
11706 if (length < sizeof (diag_query)) {
11707 *return_code =
11708 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11709 status = DDI_FAILURE;
11710 break;
11711 }
11712 if (ddi_copyin(diag_action, &diag_query,
11713 sizeof (diag_query), ioctl_mode) != 0) {
11714 return (DDI_FAILURE);
11715 }
11716 status = mptsas_diag_query(mpt, &diag_query,
11717 return_code);
11718 if (status == DDI_SUCCESS) {
11719 if (ddi_copyout(&diag_query, diag_action,
11720 sizeof (diag_query), ioctl_mode) != 0) {
11721 return (DDI_FAILURE);
11722 }
11723 }
11724 break;
11725
11726 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11727 if (ddi_copyin(diag_action, &diag_read_buffer,
11728 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11729 return (DDI_FAILURE);
11730 }
11731 read_buf_len = sizeof (diag_read_buffer) -
11732 sizeof (diag_read_buffer.DataBuffer) +
11733 diag_read_buffer.BytesToRead;
11734 if (length < read_buf_len) {
11735 *return_code =
11736 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11737 status = DDI_FAILURE;
11738 break;
11739 }
11740 status = mptsas_diag_read_buffer(mpt,
11741 &diag_read_buffer, diag_action +
11742 sizeof (diag_read_buffer) - 4, return_code,
11743 ioctl_mode);
11744 if (status == DDI_SUCCESS) {
11745 if (ddi_copyout(&diag_read_buffer, diag_action,
11746 sizeof (diag_read_buffer) - 4, ioctl_mode)
11747 != 0) {
11748 return (DDI_FAILURE);
11749 }
11750 }
11751 break;
11752
11753 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11754 if (length < sizeof (diag_release)) {
11755 *return_code =
11756 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11757 status = DDI_FAILURE;
11758 break;
11759 }
11760 if (ddi_copyin(diag_action, &diag_release,
11761 sizeof (diag_release), ioctl_mode) != 0) {
11762 return (DDI_FAILURE);
11763 }
11764 status = mptsas_diag_release(mpt, &diag_release,
11765 return_code);
11766 break;
11767
11768 default:
11769 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11770 status = DDI_FAILURE;
11771 break;
11772 }
11773
11774 if ((status == DDI_FAILURE) &&
11775 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11776 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11777 status = DDI_SUCCESS;
11778 }
11779
11780 return (status);
11781 }
11782
11783 static int
11784 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11785 {
11786 int status;
11787 mptsas_diag_action_t driver_data;
11788
11789 ASSERT(mutex_owned(&mpt->m_mutex));
11790
11791 /*
11792 * Copy the user data to a driver data buffer.
11793 */
11794 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11795 mode) == 0) {
11796 /*
11797 * Send diag action request if Action is valid
11798 */
11799 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11800 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11801 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11802 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11803 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11804 status = mptsas_do_diag_action(mpt, driver_data.Action,
11805 (void *)(uintptr_t)driver_data.PtrDiagAction,
11806 driver_data.Length, &driver_data.ReturnCode,
11807 mode);
11808 if (status == DDI_SUCCESS) {
11809 if (ddi_copyout(&driver_data.ReturnCode,
11810 &user_data->ReturnCode,
11811 sizeof (user_data->ReturnCode), mode)
11812 != 0) {
11813 status = EFAULT;
11814 } else {
11815 status = 0;
11816 }
11817 } else {
11818 status = EIO;
11819 }
11820 } else {
11821 status = EINVAL;
11822 }
11823 } else {
11824 status = EFAULT;
11825 }
11826
11827 return (status);
11828 }
11829
11830 /*
11831 * This routine handles the "event query" ioctl.
11832 */
11833 static int
11834 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11835 int *rval)
11836 {
11837 int status;
11838 mptsas_event_query_t driverdata;
11839 uint8_t i;
11840
11841 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11842
11843 mutex_enter(&mpt->m_mutex);
11844 for (i = 0; i < 4; i++) {
11845 driverdata.Types[i] = mpt->m_event_mask[i];
11846 }
11847 mutex_exit(&mpt->m_mutex);
11848
11849 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11850 status = EFAULT;
11851 } else {
11852 *rval = MPTIOCTL_STATUS_GOOD;
11853 status = 0;
11854 }
11855
11856 return (status);
11857 }
11858
11859 /*
11860 * This routine handles the "event enable" ioctl.
11861 */
11862 static int
11863 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11864 int *rval)
11865 {
11866 int status;
11867 mptsas_event_enable_t driverdata;
11868 uint8_t i;
11869
11870 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11871 mutex_enter(&mpt->m_mutex);
11872 for (i = 0; i < 4; i++) {
11873 mpt->m_event_mask[i] = driverdata.Types[i];
11874 }
11875 mutex_exit(&mpt->m_mutex);
11876
11877 *rval = MPTIOCTL_STATUS_GOOD;
11878 status = 0;
11879 } else {
11880 status = EFAULT;
11881 }
11882 return (status);
11883 }
11884
11885 /*
11886 * This routine handles the "event report" ioctl.
11887 */
11888 static int
11889 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11890 int *rval)
11891 {
11892 int status;
11893 mptsas_event_report_t driverdata;
11894
11895 mutex_enter(&mpt->m_mutex);
11896
11897 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11898 mode) == 0) {
11899 if (driverdata.Size >= sizeof (mpt->m_events)) {
11900 if (ddi_copyout(mpt->m_events, data->Events,
11901 sizeof (mpt->m_events), mode) != 0) {
11902 status = EFAULT;
11903 } else {
11904 if (driverdata.Size > sizeof (mpt->m_events)) {
11905 driverdata.Size =
11906 sizeof (mpt->m_events);
11907 if (ddi_copyout(&driverdata.Size,
11908 &data->Size,
11909 sizeof (driverdata.Size),
11910 mode) != 0) {
11911 status = EFAULT;
11912 } else {
11913 *rval = MPTIOCTL_STATUS_GOOD;
11914 status = 0;
11915 }
11916 } else {
11917 *rval = MPTIOCTL_STATUS_GOOD;
11918 status = 0;
11919 }
11920 }
11921 } else {
11922 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11923 status = 0;
11924 }
11925 } else {
11926 status = EFAULT;
11927 }
11928
11929 mutex_exit(&mpt->m_mutex);
11930 return (status);
11931 }
11932
11933 static void
11934 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11935 {
11936 int *reg_data;
11937 uint_t reglen;
11938
11939 /*
11940 * Lookup the 'reg' property and extract the other data
11941 */
11942 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11943 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11944 DDI_PROP_SUCCESS) {
11945 /*
11946 * Extract the PCI data from the 'reg' property first DWORD.
11947 * The entry looks like the following:
11948 * First DWORD:
11949 * Bits 0 - 7 8-bit Register number
11950 * Bits 8 - 10 3-bit Function number
11951 * Bits 11 - 15 5-bit Device number
11952 * Bits 16 - 23 8-bit Bus number
11953 * Bits 24 - 25 2-bit Address Space type identifier
11954 *
11955 */
11956 adapter_data->PciInformation.u.bits.BusNumber =
11957 (reg_data[0] & 0x00FF0000) >> 16;
11958 adapter_data->PciInformation.u.bits.DeviceNumber =
11959 (reg_data[0] & 0x0000F800) >> 11;
11960 adapter_data->PciInformation.u.bits.FunctionNumber =
11961 (reg_data[0] & 0x00000700) >> 8;
11962 ddi_prop_free((void *)reg_data);
11963 } else {
11964 /*
11965 * If we can't determine the PCI data then we fill in FF's for
11966 * the data to indicate this.
11967 */
11968 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11969 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11970 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11971 }
11972
11973 /*
11974 * Saved in the mpt->m_fwversion
11975 */
11976 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11977 }
11978
11979 static void
11980 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11981 {
11982 char *driver_verstr = MPTSAS_MOD_STRING;
11983
11984 mptsas_lookup_pci_data(mpt, adapter_data);
11985 adapter_data->AdapterType = mpt->m_MPI25 ?
11986 MPTIOCTL_ADAPTER_TYPE_SAS3 :
11987 MPTIOCTL_ADAPTER_TYPE_SAS2;
11988 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11989 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11990 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11991 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11992 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11993 adapter_data->BiosVersion = 0;
11994 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11995 }
11996
11997 static void
11998 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11999 {
12000 int *reg_data, i;
12001 uint_t reglen;
12002
12003 /*
12004 * Lookup the 'reg' property and extract the other data
12005 */
12006 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12007 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12008 DDI_PROP_SUCCESS) {
12009 /*
12010 * Extract the PCI data from the 'reg' property first DWORD.
12011 * The entry looks like the following:
12012 * First DWORD:
12013 * Bits 8 - 10 3-bit Function number
12014 * Bits 11 - 15 5-bit Device number
12015 * Bits 16 - 23 8-bit Bus number
12016 */
12017 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12018 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12019 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12020 ddi_prop_free((void *)reg_data);
12021 } else {
12022 /*
12023 * If we can't determine the PCI info then we fill in FF's for
12024 * the data to indicate this.
12025 */
12026 pci_info->BusNumber = 0xFFFFFFFF;
12027 pci_info->DeviceNumber = 0xFF;
12028 pci_info->FunctionNumber = 0xFF;
12029 }
12030
12031 /*
12032 * Now get the interrupt vector and the pci header. The vector can
12033 * only be 0 right now. The header is the first 256 bytes of config
12034 * space.
12035 */
12036 pci_info->InterruptVector = 0;
12037 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12038 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12039 i);
12040 }
12041 }
12042
12043 static int
12044 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12045 {
12046 int status = 0;
12047 mptsas_reg_access_t driverdata;
12048
12049 mutex_enter(&mpt->m_mutex);
12050 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12051 switch (driverdata.Command) {
12052 /*
12053 * IO access is not supported.
12054 */
12055 case REG_IO_READ:
12056 case REG_IO_WRITE:
12057 mptsas_log(mpt, CE_WARN, "IO access is not "
12058 "supported. Use memory access.");
12059 status = EINVAL;
12060 break;
12061
12062 case REG_MEM_READ:
12063 driverdata.RegData = ddi_get32(mpt->m_datap,
12064 (uint32_t *)(void *)mpt->m_reg +
12065 driverdata.RegOffset);
12066 if (ddi_copyout(&driverdata.RegData,
12067 &data->RegData,
12068 sizeof (driverdata.RegData), mode) != 0) {
12069 mptsas_log(mpt, CE_WARN, "Register "
12070 "Read Failed");
12071 status = EFAULT;
12072 }
12073 break;
12074
12075 case REG_MEM_WRITE:
12076 ddi_put32(mpt->m_datap,
12077 (uint32_t *)(void *)mpt->m_reg +
12078 driverdata.RegOffset,
12079 driverdata.RegData);
12080 break;
12081
12082 default:
12083 status = EINVAL;
12084 break;
12085 }
12086 } else {
12087 status = EFAULT;
12088 }
12089
12090 mutex_exit(&mpt->m_mutex);
12091 return (status);
12092 }
12093
12094 static int
12095 led_control(mptsas_t *mpt, intptr_t data, int mode)
12096 {
12097 int ret = 0;
12098 mptsas_led_control_t lc;
12099 mptsas_target_t *ptgt;
12100
12101 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12102 return (EFAULT);
12103 }
12104
12105 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12106 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12107 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12108 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12109 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12110 lc.LedStatus != 1)) {
12111 return (EINVAL);
12112 }
12113
12114 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12115 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12116 return (EACCES);
12117
12118 /* Locate the target we're interrogating... */
12119 mutex_enter(&mpt->m_mutex);
12120 ptgt = refhash_linear_search(mpt->m_targets,
12121 mptsas_target_eval_slot, &lc);
12122 if (ptgt == NULL) {
12123 /* We could not find a target for that enclosure/slot. */
12124 mutex_exit(&mpt->m_mutex);
12125 return (ENOENT);
12126 }
12127
12128 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12129 /* Update our internal LED state. */
12130 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
12131 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
12132
12133 /* Flush it to the controller. */
12134 ret = mptsas_flush_led_status(mpt, ptgt);
12135 mutex_exit(&mpt->m_mutex);
12136 return (ret);
12137 }
12138
12139 /* Return our internal LED state. */
12140 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12141 mutex_exit(&mpt->m_mutex);
12142
12143 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12144 return (EFAULT);
12145 }
12146
12147 return (0);
12148 }
12149
12150 static int
12151 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12152 {
12153 uint16_t i = 0;
12154 uint16_t count = 0;
12155 int ret = 0;
12156 mptsas_target_t *ptgt;
12157 mptsas_disk_info_t *di;
12158 STRUCT_DECL(mptsas_get_disk_info, gdi);
12159
12160 if ((mode & FREAD) == 0)
12161 return (EACCES);
12162
12163 STRUCT_INIT(gdi, get_udatamodel());
12164
12165 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12166 mode) != 0) {
12167 return (EFAULT);
12168 }
12169
12170 /* Find out how many targets there are. */
12171 mutex_enter(&mpt->m_mutex);
12172 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12173 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12174 count++;
12175 }
12176 mutex_exit(&mpt->m_mutex);
12177
12178 /*
12179 * If we haven't been asked to copy out information on each target,
12180 * then just return the count.
12181 */
12182 STRUCT_FSET(gdi, DiskCount, count);
12183 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12184 goto copy_out;
12185
12186 /*
12187 * If we haven't been given a large enough buffer to copy out into,
12188 * let the caller know.
12189 */
12190 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12191 count * sizeof (mptsas_disk_info_t)) {
12192 ret = ENOSPC;
12193 goto copy_out;
12194 }
12195
12196 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12197
12198 mutex_enter(&mpt->m_mutex);
12199 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12200 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12201 if (i >= count) {
12202 /*
12203 * The number of targets changed while we weren't
12204 * looking, so give up.
12205 */
12206 refhash_rele(mpt->m_targets, ptgt);
12207 mutex_exit(&mpt->m_mutex);
12208 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12209 return (EAGAIN);
12210 }
12211 di[i].Instance = mpt->m_instance;
12212 di[i].Enclosure = ptgt->m_enclosure;
12213 di[i].Slot = ptgt->m_slot_num;
12214 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12215 i++;
12216 }
12217 mutex_exit(&mpt->m_mutex);
12218 STRUCT_FSET(gdi, DiskCount, i);
12219
12220 /* Copy out the disk information to the caller. */
12221 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12222 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12223 ret = EFAULT;
12224 }
12225
12226 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12227
12228 copy_out:
12229 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12230 mode) != 0) {
12231 ret = EFAULT;
12232 }
12233
12234 return (ret);
12235 }
12236
12237 static int
12238 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12239 int *rval)
12240 {
12241 int status = 0;
12242 mptsas_t *mpt;
12243 mptsas_update_flash_t flashdata;
12244 mptsas_pass_thru_t passthru_data;
12245 mptsas_adapter_data_t adapter_data;
12246 mptsas_pci_info_t pci_info;
12247 int copylen;
12248
12249 int iport_flag = 0;
12250 dev_info_t *dip = NULL;
12251 mptsas_phymask_t phymask = 0;
12252 struct devctl_iocdata *dcp = NULL;
12253 char *addr = NULL;
12254 mptsas_target_t *ptgt = NULL;
12255
12256 *rval = MPTIOCTL_STATUS_GOOD;
12257 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12258 return (EPERM);
12259 }
12260
12261 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12262 if (mpt == NULL) {
12263 /*
12264 * Called from iport node, get the states
12265 */
12266 iport_flag = 1;
12267 dip = mptsas_get_dip_from_dev(dev, &phymask);
12268 if (dip == NULL) {
12269 return (ENXIO);
12270 }
12271 mpt = DIP2MPT(dip);
12272 }
12273 /* Make sure power level is D0 before accessing registers */
12274 mutex_enter(&mpt->m_mutex);
12275 if (mpt->m_options & MPTSAS_OPT_PM) {
12276 (void) pm_busy_component(mpt->m_dip, 0);
12277 if (mpt->m_power_level != PM_LEVEL_D0) {
12278 mutex_exit(&mpt->m_mutex);
12279 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12280 DDI_SUCCESS) {
12281 mptsas_log(mpt, CE_WARN,
12282 "mptsas%d: mptsas_ioctl: Raise power "
12283 "request failed.", mpt->m_instance);
12284 (void) pm_idle_component(mpt->m_dip, 0);
12285 return (ENXIO);
12286 }
12287 } else {
12288 mutex_exit(&mpt->m_mutex);
12289 }
12290 } else {
12291 mutex_exit(&mpt->m_mutex);
12292 }
12293
12294 if (iport_flag) {
12295 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12296 if (status != 0) {
12297 goto out;
12298 }
12299 /*
12300 * The following code control the OK2RM LED, it doesn't affect
12301 * the ioctl return status.
12302 */
12303 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12304 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12305 if (ndi_dc_allochdl((void *)data, &dcp) !=
12306 NDI_SUCCESS) {
12307 goto out;
12308 }
12309 addr = ndi_dc_getaddr(dcp);
12310 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12311 if (ptgt == NULL) {
12312 NDBG14(("mptsas_ioctl led control: tgt %s not "
12313 "found", addr));
12314 ndi_dc_freehdl(dcp);
12315 goto out;
12316 }
12317 mutex_enter(&mpt->m_mutex);
12318 if (cmd == DEVCTL_DEVICE_ONLINE) {
12319 ptgt->m_tgt_unconfigured = 0;
12320 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12321 ptgt->m_tgt_unconfigured = 1;
12322 }
12323 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12324 ptgt->m_led_status |=
12325 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12326 } else {
12327 ptgt->m_led_status &=
12328 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12329 }
12330 (void) mptsas_flush_led_status(mpt, ptgt);
12331 mutex_exit(&mpt->m_mutex);
12332 ndi_dc_freehdl(dcp);
12333 }
12334 goto out;
12335 }
12336 switch (cmd) {
12337 case MPTIOCTL_GET_DISK_INFO:
12338 status = get_disk_info(mpt, data, mode);
12339 break;
12340 case MPTIOCTL_LED_CONTROL:
12341 status = led_control(mpt, data, mode);
12342 break;
12343 case MPTIOCTL_UPDATE_FLASH:
12344 if (ddi_copyin((void *)data, &flashdata,
12345 sizeof (struct mptsas_update_flash), mode)) {
12346 status = EFAULT;
12347 break;
12348 }
12349
12350 mutex_enter(&mpt->m_mutex);
12351 if (mptsas_update_flash(mpt,
12352 (caddr_t)(long)flashdata.PtrBuffer,
12353 flashdata.ImageSize, flashdata.ImageType, mode)) {
12354 status = EFAULT;
12355 }
12356
12357 /*
12358 * Reset the chip to start using the new
12359 * firmware. Reset if failed also.
12360 */
12361 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12362 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12363 status = EFAULT;
12364 }
12365 mutex_exit(&mpt->m_mutex);
12366 break;
12367 case MPTIOCTL_PASS_THRU:
12368 /*
12369 * The user has requested to pass through a command to
12370 * be executed by the MPT firmware. Call our routine
12371 * which does this. Only allow one passthru IOCTL at
12372 * one time. Other threads will block on
12373 * m_passthru_mutex, which is of adaptive variant.
12374 */
12375 if (ddi_copyin((void *)data, &passthru_data,
12376 sizeof (mptsas_pass_thru_t), mode)) {
12377 status = EFAULT;
12378 break;
12379 }
12380 mutex_enter(&mpt->m_passthru_mutex);
12381 mutex_enter(&mpt->m_mutex);
12382 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12383 mutex_exit(&mpt->m_mutex);
12384 mutex_exit(&mpt->m_passthru_mutex);
12385
12386 break;
12387 case MPTIOCTL_GET_ADAPTER_DATA:
12388 /*
12389 * The user has requested to read adapter data. Call
12390 * our routine which does this.
12391 */
12392 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12393 if (ddi_copyin((void *)data, (void *)&adapter_data,
12394 sizeof (mptsas_adapter_data_t), mode)) {
12395 status = EFAULT;
12396 break;
12397 }
12398 if (adapter_data.StructureLength >=
12399 sizeof (mptsas_adapter_data_t)) {
12400 adapter_data.StructureLength = (uint32_t)
12401 sizeof (mptsas_adapter_data_t);
12402 copylen = sizeof (mptsas_adapter_data_t);
12403 mutex_enter(&mpt->m_mutex);
12404 mptsas_read_adapter_data(mpt, &adapter_data);
12405 mutex_exit(&mpt->m_mutex);
12406 } else {
12407 adapter_data.StructureLength = (uint32_t)
12408 sizeof (mptsas_adapter_data_t);
12409 copylen = sizeof (adapter_data.StructureLength);
12410 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12411 }
12412 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12413 copylen, mode) != 0) {
12414 status = EFAULT;
12415 }
12416 break;
12417 case MPTIOCTL_GET_PCI_INFO:
12418 /*
12419 * The user has requested to read pci info. Call
12420 * our routine which does this.
12421 */
12422 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12423 mutex_enter(&mpt->m_mutex);
12424 mptsas_read_pci_info(mpt, &pci_info);
12425 mutex_exit(&mpt->m_mutex);
12426 if (ddi_copyout((void *)(&pci_info), (void *)data,
12427 sizeof (mptsas_pci_info_t), mode) != 0) {
12428 status = EFAULT;
12429 }
12430 break;
12431 case MPTIOCTL_RESET_ADAPTER:
12432 mutex_enter(&mpt->m_mutex);
12433 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12434 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12435 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12436 "failed");
12437 status = EFAULT;
12438 }
12439 mutex_exit(&mpt->m_mutex);
12440 break;
12441 case MPTIOCTL_DIAG_ACTION:
12442 /*
12443 * The user has done a diag buffer action. Call our
12444 * routine which does this. Only allow one diag action
12445 * at one time.
12446 */
12447 mutex_enter(&mpt->m_mutex);
12448 if (mpt->m_diag_action_in_progress) {
12449 mutex_exit(&mpt->m_mutex);
12450 return (EBUSY);
12451 }
12452 mpt->m_diag_action_in_progress = 1;
12453 status = mptsas_diag_action(mpt,
12454 (mptsas_diag_action_t *)data, mode);
12455 mpt->m_diag_action_in_progress = 0;
12456 mutex_exit(&mpt->m_mutex);
12457 break;
12458 case MPTIOCTL_EVENT_QUERY:
12459 /*
12460 * The user has done an event query. Call our routine
12461 * which does this.
12462 */
12463 status = mptsas_event_query(mpt,
12464 (mptsas_event_query_t *)data, mode, rval);
12465 break;
12466 case MPTIOCTL_EVENT_ENABLE:
12467 /*
12468 * The user has done an event enable. Call our routine
12469 * which does this.
12470 */
12471 status = mptsas_event_enable(mpt,
12472 (mptsas_event_enable_t *)data, mode, rval);
12473 break;
12474 case MPTIOCTL_EVENT_REPORT:
12475 /*
12476 * The user has done an event report. Call our routine
12477 * which does this.
12478 */
12479 status = mptsas_event_report(mpt,
12480 (mptsas_event_report_t *)data, mode, rval);
12481 break;
12482 case MPTIOCTL_REG_ACCESS:
12483 /*
12484 * The user has requested register access. Call our
12485 * routine which does this.
12486 */
12487 status = mptsas_reg_access(mpt,
12488 (mptsas_reg_access_t *)data, mode);
12489 break;
12490 default:
12491 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12492 rval);
12493 break;
12494 }
12495
12496 out:
12497 return (status);
12498 }
12499
12500 int
12501 mptsas_restart_ioc(mptsas_t *mpt)
12502 {
12503 int rval = DDI_SUCCESS;
12504 mptsas_target_t *ptgt = NULL;
12505
12506 ASSERT(mutex_owned(&mpt->m_mutex));
12507
12508 /*
12509 * Set a flag telling I/O path that we're processing a reset. This is
12510 * needed because after the reset is complete, the hash table still
12511 * needs to be rebuilt. If I/Os are started before the hash table is
12512 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12513 * so that they can be retried.
12514 */
12515 mpt->m_in_reset = TRUE;
12516
12517 /*
12518 * Set all throttles to HOLD
12519 */
12520 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12521 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12522 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12523 }
12524
12525 /*
12526 * Disable interrupts
12527 */
12528 MPTSAS_DISABLE_INTR(mpt);
12529
12530 /*
12531 * Abort all commands: outstanding commands, commands in waitq and
12532 * tx_waitq.
12533 */
12534 mptsas_flush_hba(mpt);
12535
12536 /*
12537 * Reinitialize the chip.
12538 */
12539 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12540 rval = DDI_FAILURE;
12541 }
12542
12543 /*
12544 * Enable interrupts again
12545 */
12546 MPTSAS_ENABLE_INTR(mpt);
12547
12548 /*
12549 * If mptsas_init_chip was successful, update the driver data.
12550 */
12551 if (rval == DDI_SUCCESS) {
12552 mptsas_update_driver_data(mpt);
12553 }
12554
12555 /*
12556 * Reset the throttles
12557 */
12558 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12559 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12560 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12561 }
12562
12563 mptsas_doneq_empty(mpt);
12564 mptsas_restart_hba(mpt);
12565
12566 if (rval != DDI_SUCCESS) {
12567 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12568 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12569 }
12570
12571 /*
12572 * Clear the reset flag so that I/Os can continue.
12573 */
12574 mpt->m_in_reset = FALSE;
12575
12576 return (rval);
12577 }
12578
12579 static int
12580 mptsas_init_chip(mptsas_t *mpt, int first_time)
12581 {
12582 ddi_dma_cookie_t cookie;
12583 uint32_t i;
12584 int rval;
12585
12586 /*
12587 * Check to see if the firmware image is valid
12588 */
12589 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12590 MPI2_DIAG_FLASH_BAD_SIG) {
12591 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12592 goto fail;
12593 }
12594
12595 /*
12596 * Reset the chip
12597 */
12598 rval = mptsas_ioc_reset(mpt, first_time);
12599 if (rval == MPTSAS_RESET_FAIL) {
12600 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12601 goto fail;
12602 }
12603
12604 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12605 goto mur;
12606 }
12607 /*
12608 * Setup configuration space
12609 */
12610 if (mptsas_config_space_init(mpt) == FALSE) {
12611 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12612 "failed!");
12613 goto fail;
12614 }
12615
12616 /*
12617 * IOC facts can change after a diag reset so all buffers that are
12618 * based on these numbers must be de-allocated and re-allocated. Get
12619 * new IOC facts each time chip is initialized.
12620 */
12621 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12622 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12623 goto fail;
12624 }
12625
12626 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
12627 mptsas_target_addr_hash, mptsas_target_addr_cmp,
12628 mptsas_target_free, sizeof (mptsas_target_t),
12629 offsetof(mptsas_target_t, m_link),
12630 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
12631
12632 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12633 goto fail;
12634 }
12635 /*
12636 * Allocate request message frames, reply free queue, reply descriptor
12637 * post queue, and reply message frames using latest IOC facts.
12638 */
12639 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12640 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12641 goto fail;
12642 }
12643 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12644 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12645 goto fail;
12646 }
12647 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12648 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12649 goto fail;
12650 }
12651 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12652 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12653 goto fail;
12654 }
12655
12656 mur:
12657 /*
12658 * Re-Initialize ioc to operational state
12659 */
12660 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12661 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12662 goto fail;
12663 }
12664
12665 mptsas_alloc_reply_args(mpt);
12666
12667 /*
12668 * Initialize reply post index. Reply free index is initialized after
12669 * the next loop.
12670 */
12671 mpt->m_post_index = 0;
12672
12673 /*
12674 * Initialize the Reply Free Queue with the physical addresses of our
12675 * reply frames.
12676 */
12677 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
12678 for (i = 0; i < mpt->m_max_replies; i++) {
12679 ddi_put32(mpt->m_acc_free_queue_hdl,
12680 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12681 cookie.dmac_address);
12682 cookie.dmac_address += mpt->m_reply_frame_size;
12683 }
12684 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12685 DDI_DMA_SYNC_FORDEV);
12686
12687 /*
12688 * Initialize the reply free index to one past the last frame on the
12689 * queue. This will signify that the queue is empty to start with.
12690 */
12691 mpt->m_free_index = i;
12692 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12693
12694 /*
12695 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12696 */
12697 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12698 ddi_put64(mpt->m_acc_post_queue_hdl,
12699 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12700 0xFFFFFFFFFFFFFFFF);
12701 }
12702 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12703 DDI_DMA_SYNC_FORDEV);
12704
12705 /*
12706 * Enable ports
12707 */
12708 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12709 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12710 goto fail;
12711 }
12712
12713 /*
12714 * enable events
12715 */
12716 if (mptsas_ioc_enable_event_notification(mpt)) {
12717 goto fail;
12718 }
12719
12720 /*
12721 * We need checks in attach and these.
12722 * chip_init is called in mult. places
12723 */
12724
12725 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12726 DDI_SUCCESS) ||
12727 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12728 DDI_SUCCESS) ||
12729 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12730 DDI_SUCCESS) ||
12731 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12732 DDI_SUCCESS) ||
12733 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12734 DDI_SUCCESS)) {
12735 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12736 goto fail;
12737 }
12738
12739 /* Check all acc handles */
12740 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12741 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12742 DDI_SUCCESS) ||
12743 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12744 DDI_SUCCESS) ||
12745 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12746 DDI_SUCCESS) ||
12747 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12748 DDI_SUCCESS) ||
12749 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12750 DDI_SUCCESS) ||
12751 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12752 DDI_SUCCESS)) {
12753 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12754 goto fail;
12755 }
12756
12757 return (DDI_SUCCESS);
12758
12759 fail:
12760 return (DDI_FAILURE);
12761 }
12762
12763 static int
12764 mptsas_get_pci_cap(mptsas_t *mpt)
12765 {
12766 ushort_t caps_ptr, cap, cap_count;
12767
12768 if (mpt->m_config_handle == NULL)
12769 return (FALSE);
12770 /*
12771 * Check if capabilities list is supported and if so,
12772 * get initial capabilities pointer and clear bits 0,1.
12773 */
12774 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12775 & PCI_STAT_CAP) {
12776 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12777 PCI_CONF_CAP_PTR), 4);
12778 } else {
12779 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12780 }
12781
12782 /*
12783 * Walk capabilities if supported.
12784 */
12785 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12786
12787 /*
12788 * Check that we haven't exceeded the maximum number of
12789 * capabilities and that the pointer is in a valid range.
12790 */
12791 if (++cap_count > 48) {
12792 mptsas_log(mpt, CE_WARN,
12793 "too many device capabilities.\n");
12794 break;
12795 }
12796 if (caps_ptr < 64) {
12797 mptsas_log(mpt, CE_WARN,
12798 "capabilities pointer 0x%x out of range.\n",
12799 caps_ptr);
12800 break;
12801 }
12802
12803 /*
12804 * Get next capability and check that it is valid.
12805 * For now, we only support power management.
12806 */
12807 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12808 switch (cap) {
12809 case PCI_CAP_ID_PM:
12810 mptsas_log(mpt, CE_NOTE,
12811 "?mptsas%d supports power management.\n",
12812 mpt->m_instance);
12813 mpt->m_options |= MPTSAS_OPT_PM;
12814
12815 /* Save PMCSR offset */
12816 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12817 break;
12818 /*
12819 * The following capabilities are valid. Any others
12820 * will cause a message to be logged.
12821 */
12822 case PCI_CAP_ID_VPD:
12823 case PCI_CAP_ID_MSI:
12824 case PCI_CAP_ID_PCIX:
12825 case PCI_CAP_ID_PCI_E:
12826 case PCI_CAP_ID_MSI_X:
12827 break;
12828 default:
12829 mptsas_log(mpt, CE_NOTE,
12830 "?mptsas%d unrecognized capability "
12831 "0x%x.\n", mpt->m_instance, cap);
12832 break;
12833 }
12834
12835 /*
12836 * Get next capabilities pointer and clear bits 0,1.
12837 */
12838 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12839 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12840 }
12841 return (TRUE);
12842 }
12843
12844 static int
12845 mptsas_init_pm(mptsas_t *mpt)
12846 {
12847 char pmc_name[16];
12848 char *pmc[] = {
12849 NULL,
12850 "0=Off (PCI D3 State)",
12851 "3=On (PCI D0 State)",
12852 NULL
12853 };
12854 uint16_t pmcsr_stat;
12855
12856 if (mptsas_get_pci_cap(mpt) == FALSE) {
12857 return (DDI_FAILURE);
12858 }
12859 /*
12860 * If PCI's capability does not support PM, then don't need
12861 * to registe the pm-components
12862 */
12863 if (!(mpt->m_options & MPTSAS_OPT_PM))
12864 return (DDI_SUCCESS);
12865 /*
12866 * If power management is supported by this chip, create
12867 * pm-components property for the power management framework
12868 */
12869 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12870 pmc[0] = pmc_name;
12871 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12872 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12873 mpt->m_options &= ~MPTSAS_OPT_PM;
12874 mptsas_log(mpt, CE_WARN,
12875 "mptsas%d: pm-component property creation failed.",
12876 mpt->m_instance);
12877 return (DDI_FAILURE);
12878 }
12879
12880 /*
12881 * Power on device.
12882 */
12883 (void) pm_busy_component(mpt->m_dip, 0);
12884 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12885 mpt->m_pmcsr_offset);
12886 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12887 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12888 mpt->m_instance);
12889 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12890 PCI_PMCSR_D0);
12891 }
12892 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12893 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12894 return (DDI_FAILURE);
12895 }
12896 mpt->m_power_level = PM_LEVEL_D0;
12897 /*
12898 * Set pm idle delay.
12899 */
12900 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12901 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12902
12903 return (DDI_SUCCESS);
12904 }
12905
12906 static int
12907 mptsas_register_intrs(mptsas_t *mpt)
12908 {
12909 dev_info_t *dip;
12910 int intr_types;
12911
12912 dip = mpt->m_dip;
12913
12914 /* Get supported interrupt types */
12915 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12916 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12917 "failed\n");
12918 return (FALSE);
12919 }
12920
12921 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12922
12923 /*
12924 * Try MSI, but fall back to FIXED
12925 */
12926 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12927 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12928 NDBG0(("Using MSI interrupt type"));
12929 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12930 return (TRUE);
12931 }
12932 }
12933 if (intr_types & DDI_INTR_TYPE_FIXED) {
12934 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12935 NDBG0(("Using FIXED interrupt type"));
12936 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12937 return (TRUE);
12938 } else {
12939 NDBG0(("FIXED interrupt registration failed"));
12940 return (FALSE);
12941 }
12942 }
12943
12944 return (FALSE);
12945 }
12946
12947 static void
12948 mptsas_unregister_intrs(mptsas_t *mpt)
12949 {
12950 mptsas_rem_intrs(mpt);
12951 }
12952
12953 /*
12954 * mptsas_add_intrs:
12955 *
12956 * Register FIXED or MSI interrupts.
12957 */
12958 static int
12959 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12960 {
12961 dev_info_t *dip = mpt->m_dip;
12962 int avail, actual, count = 0;
12963 int i, flag, ret;
12964
12965 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12966
12967 /* Get number of interrupts */
12968 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12969 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12970 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12971 "ret %d count %d\n", ret, count);
12972
12973 return (DDI_FAILURE);
12974 }
12975
12976 /* Get number of available interrupts */
12977 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12978 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12979 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12980 "ret %d avail %d\n", ret, avail);
12981
12982 return (DDI_FAILURE);
12983 }
12984
12985 if (avail < count) {
12986 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12987 "navail() returned %d", count, avail);
12988 }
12989
12990 /* Mpt only have one interrupt routine */
12991 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12992 count = 1;
12993 }
12994
12995 /* Allocate an array of interrupt handles */
12996 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12997 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12998
12999 flag = DDI_INTR_ALLOC_NORMAL;
13000
13001 /* call ddi_intr_alloc() */
13002 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13003 count, &actual, flag);
13004
13005 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13006 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13007 ret);
13008 kmem_free(mpt->m_htable, mpt->m_intr_size);
13009 return (DDI_FAILURE);
13010 }
13011
13012 /* use interrupt count returned or abort? */
13013 if (actual < count) {
13014 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13015 count, actual);
13016 }
13017
13018 mpt->m_intr_cnt = actual;
13019
13020 /*
13021 * Get priority for first msi, assume remaining are all the same
13022 */
13023 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13024 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13025 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13026
13027 /* Free already allocated intr */
13028 for (i = 0; i < actual; i++) {
13029 (void) ddi_intr_free(mpt->m_htable[i]);
13030 }
13031
13032 kmem_free(mpt->m_htable, mpt->m_intr_size);
13033 return (DDI_FAILURE);
13034 }
13035
13036 /* Test for high level mutex */
13037 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13038 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13039 "Hi level interrupt not supported\n");
13040
13041 /* Free already allocated intr */
13042 for (i = 0; i < actual; i++) {
13043 (void) ddi_intr_free(mpt->m_htable[i]);
13044 }
13045
13046 kmem_free(mpt->m_htable, mpt->m_intr_size);
13047 return (DDI_FAILURE);
13048 }
13049
13050 /* Call ddi_intr_add_handler() */
13051 for (i = 0; i < actual; i++) {
13052 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13053 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13054 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13055 "failed %d\n", ret);
13056
13057 /* Free already allocated intr */
13058 for (i = 0; i < actual; i++) {
13059 (void) ddi_intr_free(mpt->m_htable[i]);
13060 }
13061
13062 kmem_free(mpt->m_htable, mpt->m_intr_size);
13063 return (DDI_FAILURE);
13064 }
13065 }
13066
13067 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13068 != DDI_SUCCESS) {
13069 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13070
13071 /* Free already allocated intr */
13072 for (i = 0; i < actual; i++) {
13073 (void) ddi_intr_free(mpt->m_htable[i]);
13074 }
13075
13076 kmem_free(mpt->m_htable, mpt->m_intr_size);
13077 return (DDI_FAILURE);
13078 }
13079
13080 /*
13081 * Enable interrupts
13082 */
13083 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13084 /* Call ddi_intr_block_enable() for MSI interrupts */
13085 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13086 } else {
13087 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13088 for (i = 0; i < mpt->m_intr_cnt; i++) {
13089 (void) ddi_intr_enable(mpt->m_htable[i]);
13090 }
13091 }
13092 return (DDI_SUCCESS);
13093 }
13094
13095 /*
13096 * mptsas_rem_intrs:
13097 *
13098 * Unregister FIXED or MSI interrupts
13099 */
13100 static void
13101 mptsas_rem_intrs(mptsas_t *mpt)
13102 {
13103 int i;
13104
13105 NDBG6(("mptsas_rem_intrs"));
13106
13107 /* Disable all interrupts */
13108 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13109 /* Call ddi_intr_block_disable() */
13110 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13111 } else {
13112 for (i = 0; i < mpt->m_intr_cnt; i++) {
13113 (void) ddi_intr_disable(mpt->m_htable[i]);
13114 }
13115 }
13116
13117 /* Call ddi_intr_remove_handler() */
13118 for (i = 0; i < mpt->m_intr_cnt; i++) {
13119 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13120 (void) ddi_intr_free(mpt->m_htable[i]);
13121 }
13122
13123 kmem_free(mpt->m_htable, mpt->m_intr_size);
13124 }
13125
13126 /*
13127 * The IO fault service error handling callback function
13128 */
13129 /*ARGSUSED*/
13130 static int
13131 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13132 {
13133 /*
13134 * as the driver can always deal with an error in any dma or
13135 * access handle, we can just return the fme_status value.
13136 */
13137 pci_ereport_post(dip, err, NULL);
13138 return (err->fme_status);
13139 }
13140
13141 /*
13142 * mptsas_fm_init - initialize fma capabilities and register with IO
13143 * fault services.
13144 */
13145 static void
13146 mptsas_fm_init(mptsas_t *mpt)
13147 {
13148 /*
13149 * Need to change iblock to priority for new MSI intr
13150 */
13151 ddi_iblock_cookie_t fm_ibc;
13152
13153 /* Only register with IO Fault Services if we have some capability */
13154 if (mpt->m_fm_capabilities) {
13155 /* Adjust access and dma attributes for FMA */
13156 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13157 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13158 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13159
13160 /*
13161 * Register capabilities with IO Fault Services.
13162 * mpt->m_fm_capabilities will be updated to indicate
13163 * capabilities actually supported (not requested.)
13164 */
13165 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13166
13167 /*
13168 * Initialize pci ereport capabilities if ereport
13169 * capable (should always be.)
13170 */
13171 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13172 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13173 pci_ereport_setup(mpt->m_dip);
13174 }
13175
13176 /*
13177 * Register error callback if error callback capable.
13178 */
13179 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13180 ddi_fm_handler_register(mpt->m_dip,
13181 mptsas_fm_error_cb, (void *) mpt);
13182 }
13183 }
13184 }
13185
13186 /*
13187 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13188 * fault services.
13189 *
13190 */
13191 static void
13192 mptsas_fm_fini(mptsas_t *mpt)
13193 {
13194 /* Only unregister FMA capabilities if registered */
13195 if (mpt->m_fm_capabilities) {
13196
13197 /*
13198 * Un-register error callback if error callback capable.
13199 */
13200
13201 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13202 ddi_fm_handler_unregister(mpt->m_dip);
13203 }
13204
13205 /*
13206 * Release any resources allocated by pci_ereport_setup()
13207 */
13208
13209 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13210 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13211 pci_ereport_teardown(mpt->m_dip);
13212 }
13213
13214 /* Unregister from IO Fault Services */
13215 ddi_fm_fini(mpt->m_dip);
13216
13217 /* Adjust access and dma attributes for FMA */
13218 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13219 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13220 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13221
13222 }
13223 }
13224
13225 int
13226 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13227 {
13228 ddi_fm_error_t de;
13229
13230 if (handle == NULL)
13231 return (DDI_FAILURE);
13232 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13233 return (de.fme_status);
13234 }
13235
13236 int
13237 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13238 {
13239 ddi_fm_error_t de;
13240
13241 if (handle == NULL)
13242 return (DDI_FAILURE);
13243 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13244 return (de.fme_status);
13245 }
13246
13247 void
13248 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13249 {
13250 uint64_t ena;
13251 char buf[FM_MAX_CLASS];
13252
13253 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13254 ena = fm_ena_generate(0, FM_ENA_FMT1);
13255 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13256 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13257 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13258 }
13259 }
13260
13261 static int
13262 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13263 uint16_t *dev_handle, mptsas_target_t **pptgt)
13264 {
13265 int rval;
13266 uint32_t dev_info;
13267 uint64_t sas_wwn;
13268 mptsas_phymask_t phymask;
13269 uint8_t physport, phynum, config, disk;
13270 uint64_t devicename;
13271 uint16_t pdev_hdl;
13272 mptsas_target_t *tmp_tgt = NULL;
13273 uint16_t bay_num, enclosure;
13274
13275 ASSERT(*pptgt == NULL);
13276
13277 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13278 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13279 &bay_num, &enclosure);
13280 if (rval != DDI_SUCCESS) {
13281 rval = DEV_INFO_FAIL_PAGE0;
13282 return (rval);
13283 }
13284
13285 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13286 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13287 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13288 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13289 return (rval);
13290 }
13291
13292 /*
13293 * Check if the dev handle is for a Phys Disk. If so, set return value
13294 * and exit. Don't add Phys Disks to hash.
13295 */
13296 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13297 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13298 if (*dev_handle == mpt->m_raidconfig[config].
13299 m_physdisk_devhdl[disk]) {
13300 rval = DEV_INFO_PHYS_DISK;
13301 return (rval);
13302 }
13303 }
13304 }
13305
13306 /*
13307 * Get SATA Device Name from SAS device page0 for
13308 * sata device, if device name doesn't exist, set mta_wwn to
13309 * 0 for direct attached SATA. For the device behind the expander
13310 * we still can use STP address assigned by expander.
13311 */
13312 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13313 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13314 mutex_exit(&mpt->m_mutex);
13315 /* alloc a tmp_tgt to send the cmd */
13316 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13317 KM_SLEEP);
13318 tmp_tgt->m_devhdl = *dev_handle;
13319 tmp_tgt->m_deviceinfo = dev_info;
13320 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13321 tmp_tgt->m_qfull_retry_interval =
13322 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13323 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13324 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13325 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13326 mutex_enter(&mpt->m_mutex);
13327 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13328 sas_wwn = devicename;
13329 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13330 sas_wwn = 0;
13331 }
13332 }
13333
13334 phymask = mptsas_physport_to_phymask(mpt, physport);
13335 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
13336 dev_info, phymask, phynum);
13337 if (*pptgt == NULL) {
13338 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13339 "structure!");
13340 rval = DEV_INFO_FAIL_ALLOC;
13341 return (rval);
13342 }
13343 (*pptgt)->m_enclosure = enclosure;
13344 (*pptgt)->m_slot_num = bay_num;
13345 return (DEV_INFO_SUCCESS);
13346 }
13347
13348 uint64_t
13349 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13350 {
13351 uint64_t sata_guid = 0, *pwwn = NULL;
13352 int target = ptgt->m_devhdl;
13353 uchar_t *inq83 = NULL;
13354 int inq83_len = 0xFF;
13355 uchar_t *dblk = NULL;
13356 int inq83_retry = 3;
13357 int rval = DDI_FAILURE;
13358
13359 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13360
13361 inq83_retry:
13362 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13363 inq83_len, NULL, 1);
13364 if (rval != DDI_SUCCESS) {
13365 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13366 "0x83 for target:%x, lun:%x failed!", target, lun);
13367 goto out;
13368 }
13369 /* According to SAT2, the first descriptor is logic unit name */
13370 dblk = &inq83[4];
13371 if ((dblk[1] & 0x30) != 0) {
13372 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13373 goto out;
13374 }
13375 pwwn = (uint64_t *)(void *)(&dblk[4]);
13376 if ((dblk[4] & 0xf0) == 0x50) {
13377 sata_guid = BE_64(*pwwn);
13378 goto out;
13379 } else if (dblk[4] == 'A') {
13380 NDBG20(("SATA drive has no NAA format GUID."));
13381 goto out;
13382 } else {
13383 /* The data is not ready, wait and retry */
13384 inq83_retry--;
13385 if (inq83_retry <= 0) {
13386 goto out;
13387 }
13388 NDBG20(("The GUID is not ready, retry..."));
13389 delay(1 * drv_usectohz(1000000));
13390 goto inq83_retry;
13391 }
13392 out:
13393 kmem_free(inq83, inq83_len);
13394 return (sata_guid);
13395 }
13396
13397 static int
13398 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13399 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13400 {
13401 uchar_t cdb[CDB_GROUP0];
13402 struct scsi_address ap;
13403 struct buf *data_bp = NULL;
13404 int resid = 0;
13405 int ret = DDI_FAILURE;
13406
13407 ASSERT(len <= 0xffff);
13408
13409 ap.a_target = MPTSAS_INVALID_DEVHDL;
13410 ap.a_lun = (uchar_t)(lun);
13411 ap.a_hba_tran = mpt->m_tran;
13412
13413 data_bp = scsi_alloc_consistent_buf(&ap,
13414 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13415 if (data_bp == NULL) {
13416 return (ret);
13417 }
13418 bzero(cdb, CDB_GROUP0);
13419 cdb[0] = SCMD_INQUIRY;
13420 cdb[1] = evpd;
13421 cdb[2] = page;
13422 cdb[3] = (len & 0xff00) >> 8;
13423 cdb[4] = (len & 0x00ff);
13424 cdb[5] = 0;
13425
13426 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13427 &resid);
13428 if (ret == DDI_SUCCESS) {
13429 if (reallen) {
13430 *reallen = len - resid;
13431 }
13432 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13433 }
13434 if (data_bp) {
13435 scsi_free_consistent_buf(data_bp);
13436 }
13437 return (ret);
13438 }
13439
13440 static int
13441 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13442 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13443 int *resid)
13444 {
13445 struct scsi_pkt *pktp = NULL;
13446 scsi_hba_tran_t *tran_clone = NULL;
13447 mptsas_tgt_private_t *tgt_private = NULL;
13448 int ret = DDI_FAILURE;
13449
13450 /*
13451 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13452 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13453 * to simulate the cmds from sd
13454 */
13455 tran_clone = kmem_alloc(
13456 sizeof (scsi_hba_tran_t), KM_SLEEP);
13457 if (tran_clone == NULL) {
13458 goto out;
13459 }
13460 bcopy((caddr_t)mpt->m_tran,
13461 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13462 tgt_private = kmem_alloc(
13463 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13464 if (tgt_private == NULL) {
13465 goto out;
13466 }
13467 tgt_private->t_lun = ap->a_lun;
13468 tgt_private->t_private = ptgt;
13469 tran_clone->tran_tgt_private = tgt_private;
13470 ap->a_hba_tran = tran_clone;
13471
13472 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13473 data_bp, cdblen, sizeof (struct scsi_arq_status),
13474 0, PKT_CONSISTENT, NULL, NULL);
13475 if (pktp == NULL) {
13476 goto out;
13477 }
13478 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13479 pktp->pkt_flags = FLAG_NOPARITY;
13480 if (scsi_poll(pktp) < 0) {
13481 goto out;
13482 }
13483 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13484 goto out;
13485 }
13486 if (resid != NULL) {
13487 *resid = pktp->pkt_resid;
13488 }
13489
13490 ret = DDI_SUCCESS;
13491 out:
13492 if (pktp) {
13493 scsi_destroy_pkt(pktp);
13494 }
13495 if (tran_clone) {
13496 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13497 }
13498 if (tgt_private) {
13499 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13500 }
13501 return (ret);
13502 }
13503 static int
13504 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13505 {
13506 char *cp = NULL;
13507 char *ptr = NULL;
13508 size_t s = 0;
13509 char *wwid_str = NULL;
13510 char *lun_str = NULL;
13511 long lunnum;
13512 long phyid = -1;
13513 int rc = DDI_FAILURE;
13514
13515 ptr = name;
13516 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13517 ptr++;
13518 if ((cp = strchr(ptr, ',')) == NULL) {
13519 return (DDI_FAILURE);
13520 }
13521
13522 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13523 s = (uintptr_t)cp - (uintptr_t)ptr;
13524
13525 bcopy(ptr, wwid_str, s);
13526 wwid_str[s] = '\0';
13527
13528 ptr = ++cp;
13529
13530 if ((cp = strchr(ptr, '\0')) == NULL) {
13531 goto out;
13532 }
13533 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13534 s = (uintptr_t)cp - (uintptr_t)ptr;
13535
13536 bcopy(ptr, lun_str, s);
13537 lun_str[s] = '\0';
13538
13539 if (name[0] == 'p') {
13540 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13541 } else {
13542 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13543 }
13544 if (rc != DDI_SUCCESS)
13545 goto out;
13546
13547 if (phyid != -1) {
13548 ASSERT(phyid < MPTSAS_MAX_PHYS);
13549 *phy = (uint8_t)phyid;
13550 }
13551 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13552 if (rc != 0)
13553 goto out;
13554
13555 *lun = (int)lunnum;
13556 rc = DDI_SUCCESS;
13557 out:
13558 if (wwid_str)
13559 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13560 if (lun_str)
13561 kmem_free(lun_str, SCSI_MAXNAMELEN);
13562
13563 return (rc);
13564 }
13565
13566 /*
13567 * mptsas_parse_smp_name() is to parse sas wwn string
13568 * which format is "wWWN"
13569 */
13570 static int
13571 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13572 {
13573 char *ptr = name;
13574
13575 if (*ptr != 'w') {
13576 return (DDI_FAILURE);
13577 }
13578
13579 ptr++;
13580 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13581 return (DDI_FAILURE);
13582 }
13583 return (DDI_SUCCESS);
13584 }
13585
13586 static int
13587 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13588 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13589 {
13590 int ret = NDI_FAILURE;
13591 int circ = 0;
13592 int circ1 = 0;
13593 mptsas_t *mpt;
13594 char *ptr = NULL;
13595 char *devnm = NULL;
13596 uint64_t wwid = 0;
13597 uint8_t phy = 0xFF;
13598 int lun = 0;
13599 uint_t mflags = flag;
13600 int bconfig = TRUE;
13601
13602 if (scsi_hba_iport_unit_address(pdip) == 0) {
13603 return (DDI_FAILURE);
13604 }
13605
13606 mpt = DIP2MPT(pdip);
13607 if (!mpt) {
13608 return (DDI_FAILURE);
13609 }
13610 /*
13611 * Hold the nexus across the bus_config
13612 */
13613 ndi_devi_enter(scsi_vhci_dip, &circ);
13614 ndi_devi_enter(pdip, &circ1);
13615 switch (op) {
13616 case BUS_CONFIG_ONE:
13617 /* parse wwid/target name out of name given */
13618 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13619 ret = NDI_FAILURE;
13620 break;
13621 }
13622 ptr++;
13623 if (strncmp((char *)arg, "smp", 3) == 0) {
13624 /*
13625 * This is a SMP target device
13626 */
13627 ret = mptsas_parse_smp_name(ptr, &wwid);
13628 if (ret != DDI_SUCCESS) {
13629 ret = NDI_FAILURE;
13630 break;
13631 }
13632 ret = mptsas_config_smp(pdip, wwid, childp);
13633 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13634 /*
13635 * OBP could pass down a non-canonical form
13636 * bootpath without LUN part when LUN is 0.
13637 * So driver need adjust the string.
13638 */
13639 if (strchr(ptr, ',') == NULL) {
13640 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13641 (void) sprintf(devnm, "%s,0", (char *)arg);
13642 ptr = strchr(devnm, '@');
13643 ptr++;
13644 }
13645
13646 /*
13647 * The device path is wWWID format and the device
13648 * is not SMP target device.
13649 */
13650 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13651 if (ret != DDI_SUCCESS) {
13652 ret = NDI_FAILURE;
13653 break;
13654 }
13655 *childp = NULL;
13656 if (ptr[0] == 'w') {
13657 ret = mptsas_config_one_addr(pdip, wwid,
13658 lun, childp);
13659 } else if (ptr[0] == 'p') {
13660 ret = mptsas_config_one_phy(pdip, phy, lun,
13661 childp);
13662 }
13663
13664 /*
13665 * If this is CD/DVD device in OBP path, the
13666 * ndi_busop_bus_config can be skipped as config one
13667 * operation is done above.
13668 */
13669 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13670 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13671 (strncmp((char *)arg, "disk", 4) == 0)) {
13672 bconfig = FALSE;
13673 ndi_hold_devi(*childp);
13674 }
13675 } else {
13676 ret = NDI_FAILURE;
13677 break;
13678 }
13679
13680 /*
13681 * DDI group instructed us to use this flag.
13682 */
13683 mflags |= NDI_MDI_FALLBACK;
13684 break;
13685 case BUS_CONFIG_DRIVER:
13686 case BUS_CONFIG_ALL:
13687 mptsas_config_all(pdip);
13688 ret = NDI_SUCCESS;
13689 break;
13690 }
13691
13692 if ((ret == NDI_SUCCESS) && bconfig) {
13693 ret = ndi_busop_bus_config(pdip, mflags, op,
13694 (devnm == NULL) ? arg : devnm, childp, 0);
13695 }
13696
13697 ndi_devi_exit(pdip, circ1);
13698 ndi_devi_exit(scsi_vhci_dip, circ);
13699 if (devnm != NULL)
13700 kmem_free(devnm, SCSI_MAXNAMELEN);
13701 return (ret);
13702 }
13703
13704 static int
13705 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13706 mptsas_target_t *ptgt)
13707 {
13708 int rval = DDI_FAILURE;
13709 struct scsi_inquiry *sd_inq = NULL;
13710 mptsas_t *mpt = DIP2MPT(pdip);
13711
13712 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13713
13714 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13715 SUN_INQSIZE, 0, (uchar_t)0);
13716
13717 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13718 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13719 } else {
13720 rval = DDI_FAILURE;
13721 }
13722
13723 kmem_free(sd_inq, SUN_INQSIZE);
13724 return (rval);
13725 }
13726
13727 static int
13728 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13729 dev_info_t **lundip)
13730 {
13731 int rval;
13732 mptsas_t *mpt = DIP2MPT(pdip);
13733 int phymask;
13734 mptsas_target_t *ptgt = NULL;
13735
13736 /*
13737 * Get the physical port associated to the iport
13738 */
13739 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13740 "phymask", 0);
13741
13742 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13743 if (ptgt == NULL) {
13744 /*
13745 * didn't match any device by searching
13746 */
13747 return (DDI_FAILURE);
13748 }
13749 /*
13750 * If the LUN already exists and the status is online,
13751 * we just return the pointer to dev_info_t directly.
13752 * For the mdi_pathinfo node, we'll handle it in
13753 * mptsas_create_virt_lun()
13754 * TODO should be also in mptsas_handle_dr
13755 */
13756
13757 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13758 if (*lundip != NULL) {
13759 /*
13760 * TODO Another senario is, we hotplug the same disk
13761 * on the same slot, the devhdl changed, is this
13762 * possible?
13763 * tgt_private->t_private != ptgt
13764 */
13765 if (sasaddr != ptgt->m_addr.mta_wwn) {
13766 /*
13767 * The device has changed although the devhdl is the
13768 * same (Enclosure mapping mode, change drive on the
13769 * same slot)
13770 */
13771 return (DDI_FAILURE);
13772 }
13773 return (DDI_SUCCESS);
13774 }
13775
13776 if (phymask == 0) {
13777 /*
13778 * Configure IR volume
13779 */
13780 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13781 return (rval);
13782 }
13783 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13784
13785 return (rval);
13786 }
13787
13788 static int
13789 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13790 dev_info_t **lundip)
13791 {
13792 int rval;
13793 mptsas_t *mpt = DIP2MPT(pdip);
13794 mptsas_phymask_t phymask;
13795 mptsas_target_t *ptgt = NULL;
13796
13797 /*
13798 * Get the physical port associated to the iport
13799 */
13800 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13801 "phymask", 0);
13802
13803 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13804 if (ptgt == NULL) {
13805 /*
13806 * didn't match any device by searching
13807 */
13808 return (DDI_FAILURE);
13809 }
13810
13811 /*
13812 * If the LUN already exists and the status is online,
13813 * we just return the pointer to dev_info_t directly.
13814 * For the mdi_pathinfo node, we'll handle it in
13815 * mptsas_create_virt_lun().
13816 */
13817
13818 *lundip = mptsas_find_child_phy(pdip, phy);
13819 if (*lundip != NULL) {
13820 return (DDI_SUCCESS);
13821 }
13822
13823 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13824
13825 return (rval);
13826 }
13827
13828 static int
13829 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13830 uint8_t *lun_addr_type)
13831 {
13832 uint32_t lun_idx = 0;
13833
13834 ASSERT(lun_num != NULL);
13835 ASSERT(lun_addr_type != NULL);
13836
13837 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13838 /* determine report luns addressing type */
13839 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13840 /*
13841 * Vendors in the field have been found to be concatenating
13842 * bus/target/lun to equal the complete lun value instead
13843 * of switching to flat space addressing
13844 */
13845 /* 00b - peripheral device addressing method */
13846 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13847 /* FALLTHRU */
13848 /* 10b - logical unit addressing method */
13849 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13850 /* FALLTHRU */
13851 /* 01b - flat space addressing method */
13852 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13853 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13854 *lun_addr_type = (buf[lun_idx] &
13855 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13856 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13857 *lun_num |= buf[lun_idx + 1];
13858 return (DDI_SUCCESS);
13859 default:
13860 return (DDI_FAILURE);
13861 }
13862 }
13863
13864 static int
13865 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13866 {
13867 struct buf *repluns_bp = NULL;
13868 struct scsi_address ap;
13869 uchar_t cdb[CDB_GROUP5];
13870 int ret = DDI_FAILURE;
13871 int retry = 0;
13872 int lun_list_len = 0;
13873 uint16_t lun_num = 0;
13874 uint8_t lun_addr_type = 0;
13875 uint32_t lun_cnt = 0;
13876 uint32_t lun_total = 0;
13877 dev_info_t *cdip = NULL;
13878 uint16_t *saved_repluns = NULL;
13879 char *buffer = NULL;
13880 int buf_len = 128;
13881 mptsas_t *mpt = DIP2MPT(pdip);
13882 uint64_t sas_wwn = 0;
13883 uint8_t phy = 0xFF;
13884 uint32_t dev_info = 0;
13885
13886 mutex_enter(&mpt->m_mutex);
13887 sas_wwn = ptgt->m_addr.mta_wwn;
13888 phy = ptgt->m_phynum;
13889 dev_info = ptgt->m_deviceinfo;
13890 mutex_exit(&mpt->m_mutex);
13891
13892 if (sas_wwn == 0) {
13893 /*
13894 * It's a SATA without Device Name
13895 * So don't try multi-LUNs
13896 */
13897 if (mptsas_find_child_phy(pdip, phy)) {
13898 return (DDI_SUCCESS);
13899 } else {
13900 /*
13901 * need configure and create node
13902 */
13903 return (DDI_FAILURE);
13904 }
13905 }
13906
13907 /*
13908 * WWN (SAS address or Device Name exist)
13909 */
13910 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13911 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13912 /*
13913 * SATA device with Device Name
13914 * So don't try multi-LUNs
13915 */
13916 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13917 return (DDI_SUCCESS);
13918 } else {
13919 return (DDI_FAILURE);
13920 }
13921 }
13922
13923 do {
13924 ap.a_target = MPTSAS_INVALID_DEVHDL;
13925 ap.a_lun = 0;
13926 ap.a_hba_tran = mpt->m_tran;
13927 repluns_bp = scsi_alloc_consistent_buf(&ap,
13928 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13929 if (repluns_bp == NULL) {
13930 retry++;
13931 continue;
13932 }
13933 bzero(cdb, CDB_GROUP5);
13934 cdb[0] = SCMD_REPORT_LUNS;
13935 cdb[6] = (buf_len & 0xff000000) >> 24;
13936 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13937 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13938 cdb[9] = (buf_len & 0x000000ff);
13939
13940 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13941 repluns_bp, NULL);
13942 if (ret != DDI_SUCCESS) {
13943 scsi_free_consistent_buf(repluns_bp);
13944 retry++;
13945 continue;
13946 }
13947 lun_list_len = BE_32(*(int *)((void *)(
13948 repluns_bp->b_un.b_addr)));
13949 if (buf_len >= lun_list_len + 8) {
13950 ret = DDI_SUCCESS;
13951 break;
13952 }
13953 scsi_free_consistent_buf(repluns_bp);
13954 buf_len = lun_list_len + 8;
13955
13956 } while (retry < 3);
13957
13958 if (ret != DDI_SUCCESS)
13959 return (ret);
13960 buffer = (char *)repluns_bp->b_un.b_addr;
13961 /*
13962 * find out the number of luns returned by the SCSI ReportLun call
13963 * and allocate buffer space
13964 */
13965 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13966 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13967 if (saved_repluns == NULL) {
13968 scsi_free_consistent_buf(repluns_bp);
13969 return (DDI_FAILURE);
13970 }
13971 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13972 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13973 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13974 continue;
13975 }
13976 saved_repluns[lun_cnt] = lun_num;
13977 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13978 ret = DDI_SUCCESS;
13979 else
13980 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13981 ptgt);
13982 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13983 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13984 MPTSAS_DEV_GONE);
13985 }
13986 }
13987 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13988 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13989 scsi_free_consistent_buf(repluns_bp);
13990 return (DDI_SUCCESS);
13991 }
13992
13993 static int
13994 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13995 {
13996 int rval = DDI_FAILURE;
13997 struct scsi_inquiry *sd_inq = NULL;
13998 mptsas_t *mpt = DIP2MPT(pdip);
13999 mptsas_target_t *ptgt = NULL;
14000
14001 mutex_enter(&mpt->m_mutex);
14002 ptgt = refhash_linear_search(mpt->m_targets,
14003 mptsas_target_eval_devhdl, &target);
14004 mutex_exit(&mpt->m_mutex);
14005 if (ptgt == NULL) {
14006 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14007 "not found.", target);
14008 return (rval);
14009 }
14010
14011 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14012 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14013 SUN_INQSIZE, 0, (uchar_t)0);
14014
14015 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14016 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14017 0);
14018 } else {
14019 rval = DDI_FAILURE;
14020 }
14021
14022 kmem_free(sd_inq, SUN_INQSIZE);
14023 return (rval);
14024 }
14025
14026 /*
14027 * configure all RAID volumes for virtual iport
14028 */
14029 static void
14030 mptsas_config_all_viport(dev_info_t *pdip)
14031 {
14032 mptsas_t *mpt = DIP2MPT(pdip);
14033 int config, vol;
14034 int target;
14035 dev_info_t *lundip = NULL;
14036
14037 /*
14038 * Get latest RAID info and search for any Volume DevHandles. If any
14039 * are found, configure the volume.
14040 */
14041 mutex_enter(&mpt->m_mutex);
14042 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14043 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14044 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14045 == 1) {
14046 target = mpt->m_raidconfig[config].
14047 m_raidvol[vol].m_raidhandle;
14048 mutex_exit(&mpt->m_mutex);
14049 (void) mptsas_config_raid(pdip, target,
14050 &lundip);
14051 mutex_enter(&mpt->m_mutex);
14052 }
14053 }
14054 }
14055 mutex_exit(&mpt->m_mutex);
14056 }
14057
14058 static void
14059 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14060 int lun_cnt, mptsas_target_t *ptgt)
14061 {
14062 dev_info_t *child = NULL, *savechild = NULL;
14063 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14064 uint64_t sas_wwn, wwid;
14065 uint8_t phy;
14066 int lun;
14067 int i;
14068 int find;
14069 char *addr;
14070 char *nodename;
14071 mptsas_t *mpt = DIP2MPT(pdip);
14072
14073 mutex_enter(&mpt->m_mutex);
14074 wwid = ptgt->m_addr.mta_wwn;
14075 mutex_exit(&mpt->m_mutex);
14076
14077 child = ddi_get_child(pdip);
14078 while (child) {
14079 find = 0;
14080 savechild = child;
14081 child = ddi_get_next_sibling(child);
14082
14083 nodename = ddi_node_name(savechild);
14084 if (strcmp(nodename, "smp") == 0) {
14085 continue;
14086 }
14087
14088 addr = ddi_get_name_addr(savechild);
14089 if (addr == NULL) {
14090 continue;
14091 }
14092
14093 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14094 DDI_SUCCESS) {
14095 continue;
14096 }
14097
14098 if (wwid == sas_wwn) {
14099 for (i = 0; i < lun_cnt; i++) {
14100 if (repluns[i] == lun) {
14101 find = 1;
14102 break;
14103 }
14104 }
14105 } else {
14106 continue;
14107 }
14108 if (find == 0) {
14109 /*
14110 * The lun has not been there already
14111 */
14112 (void) mptsas_offline_lun(pdip, savechild, NULL,
14113 NDI_DEVI_REMOVE);
14114 }
14115 }
14116
14117 pip = mdi_get_next_client_path(pdip, NULL);
14118 while (pip) {
14119 find = 0;
14120 savepip = pip;
14121 addr = MDI_PI(pip)->pi_addr;
14122
14123 pip = mdi_get_next_client_path(pdip, pip);
14124
14125 if (addr == NULL) {
14126 continue;
14127 }
14128
14129 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14130 &lun) != DDI_SUCCESS) {
14131 continue;
14132 }
14133
14134 if (sas_wwn == wwid) {
14135 for (i = 0; i < lun_cnt; i++) {
14136 if (repluns[i] == lun) {
14137 find = 1;
14138 break;
14139 }
14140 }
14141 } else {
14142 continue;
14143 }
14144
14145 if (find == 0) {
14146 /*
14147 * The lun has not been there already
14148 */
14149 (void) mptsas_offline_lun(pdip, NULL, savepip,
14150 NDI_DEVI_REMOVE);
14151 }
14152 }
14153 }
14154
14155 void
14156 mptsas_update_hashtab(struct mptsas *mpt)
14157 {
14158 uint32_t page_address;
14159 int rval = 0;
14160 uint16_t dev_handle;
14161 mptsas_target_t *ptgt = NULL;
14162 mptsas_smp_t smp_node;
14163
14164 /*
14165 * Get latest RAID info.
14166 */
14167 (void) mptsas_get_raid_info(mpt);
14168
14169 dev_handle = mpt->m_smp_devhdl;
14170 for (; mpt->m_done_traverse_smp == 0; ) {
14171 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14172 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14173 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14174 != DDI_SUCCESS) {
14175 break;
14176 }
14177 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14178 (void) mptsas_smp_alloc(mpt, &smp_node);
14179 }
14180
14181 /*
14182 * Config target devices
14183 */
14184 dev_handle = mpt->m_dev_handle;
14185
14186 /*
14187 * Do loop to get sas device page 0 by GetNextHandle till the
14188 * the last handle. If the sas device is a SATA/SSP target,
14189 * we try to config it.
14190 */
14191 for (; mpt->m_done_traverse_dev == 0; ) {
14192 ptgt = NULL;
14193 page_address =
14194 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14195 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14196 (uint32_t)dev_handle;
14197 rval = mptsas_get_target_device_info(mpt, page_address,
14198 &dev_handle, &ptgt);
14199 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14200 (rval == DEV_INFO_FAIL_ALLOC)) {
14201 break;
14202 }
14203
14204 mpt->m_dev_handle = dev_handle;
14205 }
14206
14207 }
14208
14209 void
14210 mptsas_update_driver_data(struct mptsas *mpt)
14211 {
14212 mptsas_target_t *tp;
14213 mptsas_smp_t *sp;
14214
14215 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14216
14217 /*
14218 * TODO after hard reset, update the driver data structures
14219 * 1. update port/phymask mapping table mpt->m_phy_info
14220 * 2. invalid all the entries in hash table
14221 * m_devhdl = 0xffff and m_deviceinfo = 0
14222 * 3. call sas_device_page/expander_page to update hash table
14223 */
14224 mptsas_update_phymask(mpt);
14225 /*
14226 * Invalid the existing entries
14227 *
14228 * XXX - It seems like we should just delete everything here. We are
14229 * holding the lock and are about to refresh all the targets in both
14230 * hashes anyway. Given the path we're in, what outstanding async
14231 * event could possibly be trying to reference one of these things
14232 * without taking the lock, and how would that be useful anyway?
14233 */
14234 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14235 tp = refhash_next(mpt->m_targets, tp)) {
14236 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14237 tp->m_deviceinfo = 0;
14238 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14239 }
14240 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14241 sp = refhash_next(mpt->m_smp_targets, sp)) {
14242 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14243 sp->m_deviceinfo = 0;
14244 }
14245 mpt->m_done_traverse_dev = 0;
14246 mpt->m_done_traverse_smp = 0;
14247 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14248 mptsas_update_hashtab(mpt);
14249 }
14250
14251 static void
14252 mptsas_config_all(dev_info_t *pdip)
14253 {
14254 dev_info_t *smpdip = NULL;
14255 mptsas_t *mpt = DIP2MPT(pdip);
14256 int phymask = 0;
14257 mptsas_phymask_t phy_mask;
14258 mptsas_target_t *ptgt = NULL;
14259 mptsas_smp_t *psmp;
14260
14261 /*
14262 * Get the phymask associated to the iport
14263 */
14264 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14265 "phymask", 0);
14266
14267 /*
14268 * Enumerate RAID volumes here (phymask == 0).
14269 */
14270 if (phymask == 0) {
14271 mptsas_config_all_viport(pdip);
14272 return;
14273 }
14274
14275 mutex_enter(&mpt->m_mutex);
14276
14277 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14278 mptsas_update_hashtab(mpt);
14279 }
14280
14281 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14282 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14283 phy_mask = psmp->m_addr.mta_phymask;
14284 if (phy_mask == phymask) {
14285 smpdip = NULL;
14286 mutex_exit(&mpt->m_mutex);
14287 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14288 mutex_enter(&mpt->m_mutex);
14289 }
14290 }
14291
14292 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14293 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14294 phy_mask = ptgt->m_addr.mta_phymask;
14295 if (phy_mask == phymask) {
14296 mutex_exit(&mpt->m_mutex);
14297 (void) mptsas_config_target(pdip, ptgt);
14298 mutex_enter(&mpt->m_mutex);
14299 }
14300 }
14301 mutex_exit(&mpt->m_mutex);
14302 }
14303
14304 static int
14305 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14306 {
14307 int rval = DDI_FAILURE;
14308 dev_info_t *tdip;
14309
14310 rval = mptsas_config_luns(pdip, ptgt);
14311 if (rval != DDI_SUCCESS) {
14312 /*
14313 * The return value means the SCMD_REPORT_LUNS
14314 * did not execute successfully. The target maybe
14315 * doesn't support such command.
14316 */
14317 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14318 }
14319 return (rval);
14320 }
14321
14322 /*
14323 * Return fail if not all the childs/paths are freed.
14324 * if there is any path under the HBA, the return value will be always fail
14325 * because we didn't call mdi_pi_free for path
14326 */
14327 static int
14328 mptsas_offline_target(dev_info_t *pdip, char *name)
14329 {
14330 dev_info_t *child = NULL, *prechild = NULL;
14331 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14332 int tmp_rval, rval = DDI_SUCCESS;
14333 char *addr, *cp;
14334 size_t s;
14335 mptsas_t *mpt = DIP2MPT(pdip);
14336
14337 child = ddi_get_child(pdip);
14338 while (child) {
14339 addr = ddi_get_name_addr(child);
14340 prechild = child;
14341 child = ddi_get_next_sibling(child);
14342
14343 if (addr == NULL) {
14344 continue;
14345 }
14346 if ((cp = strchr(addr, ',')) == NULL) {
14347 continue;
14348 }
14349
14350 s = (uintptr_t)cp - (uintptr_t)addr;
14351
14352 if (strncmp(addr, name, s) != 0) {
14353 continue;
14354 }
14355
14356 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14357 NDI_DEVI_REMOVE);
14358 if (tmp_rval != DDI_SUCCESS) {
14359 rval = DDI_FAILURE;
14360 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14361 prechild, MPTSAS_DEV_GONE) !=
14362 DDI_PROP_SUCCESS) {
14363 mptsas_log(mpt, CE_WARN, "mptsas driver "
14364 "unable to create property for "
14365 "SAS %s (MPTSAS_DEV_GONE)", addr);
14366 }
14367 }
14368 }
14369
14370 pip = mdi_get_next_client_path(pdip, NULL);
14371 while (pip) {
14372 addr = MDI_PI(pip)->pi_addr;
14373 savepip = pip;
14374 pip = mdi_get_next_client_path(pdip, pip);
14375 if (addr == NULL) {
14376 continue;
14377 }
14378
14379 if ((cp = strchr(addr, ',')) == NULL) {
14380 continue;
14381 }
14382
14383 s = (uintptr_t)cp - (uintptr_t)addr;
14384
14385 if (strncmp(addr, name, s) != 0) {
14386 continue;
14387 }
14388
14389 (void) mptsas_offline_lun(pdip, NULL, savepip,
14390 NDI_DEVI_REMOVE);
14391 /*
14392 * driver will not invoke mdi_pi_free, so path will not
14393 * be freed forever, return DDI_FAILURE.
14394 */
14395 rval = DDI_FAILURE;
14396 }
14397 return (rval);
14398 }
14399
14400 static int
14401 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14402 mdi_pathinfo_t *rpip, uint_t flags)
14403 {
14404 int rval = DDI_FAILURE;
14405 char *devname;
14406 dev_info_t *cdip, *parent;
14407
14408 if (rpip != NULL) {
14409 parent = scsi_vhci_dip;
14410 cdip = mdi_pi_get_client(rpip);
14411 } else if (rdip != NULL) {
14412 parent = pdip;
14413 cdip = rdip;
14414 } else {
14415 return (DDI_FAILURE);
14416 }
14417
14418 /*
14419 * Make sure node is attached otherwise
14420 * it won't have related cache nodes to
14421 * clean up. i_ddi_devi_attached is
14422 * similiar to i_ddi_node_state(cdip) >=
14423 * DS_ATTACHED.
14424 */
14425 if (i_ddi_devi_attached(cdip)) {
14426
14427 /* Get full devname */
14428 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14429 (void) ddi_deviname(cdip, devname);
14430 /* Clean cache */
14431 (void) devfs_clean(parent, devname + 1,
14432 DV_CLEAN_FORCE);
14433 kmem_free(devname, MAXNAMELEN + 1);
14434 }
14435 if (rpip != NULL) {
14436 if (MDI_PI_IS_OFFLINE(rpip)) {
14437 rval = DDI_SUCCESS;
14438 } else {
14439 rval = mdi_pi_offline(rpip, 0);
14440 }
14441 } else {
14442 rval = ndi_devi_offline(cdip, flags);
14443 }
14444
14445 return (rval);
14446 }
14447
14448 static dev_info_t *
14449 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14450 {
14451 dev_info_t *child = NULL;
14452 char *smp_wwn = NULL;
14453
14454 child = ddi_get_child(parent);
14455 while (child) {
14456 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14457 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14458 != DDI_SUCCESS) {
14459 child = ddi_get_next_sibling(child);
14460 continue;
14461 }
14462
14463 if (strcmp(smp_wwn, str_wwn) == 0) {
14464 ddi_prop_free(smp_wwn);
14465 break;
14466 }
14467 child = ddi_get_next_sibling(child);
14468 ddi_prop_free(smp_wwn);
14469 }
14470 return (child);
14471 }
14472
14473 static int
14474 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14475 {
14476 int rval = DDI_FAILURE;
14477 char *devname;
14478 char wwn_str[MPTSAS_WWN_STRLEN];
14479 dev_info_t *cdip;
14480
14481 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14482
14483 cdip = mptsas_find_smp_child(pdip, wwn_str);
14484
14485 if (cdip == NULL)
14486 return (DDI_SUCCESS);
14487
14488 /*
14489 * Make sure node is attached otherwise
14490 * it won't have related cache nodes to
14491 * clean up. i_ddi_devi_attached is
14492 * similiar to i_ddi_node_state(cdip) >=
14493 * DS_ATTACHED.
14494 */
14495 if (i_ddi_devi_attached(cdip)) {
14496
14497 /* Get full devname */
14498 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14499 (void) ddi_deviname(cdip, devname);
14500 /* Clean cache */
14501 (void) devfs_clean(pdip, devname + 1,
14502 DV_CLEAN_FORCE);
14503 kmem_free(devname, MAXNAMELEN + 1);
14504 }
14505
14506 rval = ndi_devi_offline(cdip, flags);
14507
14508 return (rval);
14509 }
14510
14511 static dev_info_t *
14512 mptsas_find_child(dev_info_t *pdip, char *name)
14513 {
14514 dev_info_t *child = NULL;
14515 char *rname = NULL;
14516 int rval = DDI_FAILURE;
14517
14518 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14519
14520 child = ddi_get_child(pdip);
14521 while (child) {
14522 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14523 if (rval != DDI_SUCCESS) {
14524 child = ddi_get_next_sibling(child);
14525 bzero(rname, SCSI_MAXNAMELEN);
14526 continue;
14527 }
14528
14529 if (strcmp(rname, name) == 0) {
14530 break;
14531 }
14532 child = ddi_get_next_sibling(child);
14533 bzero(rname, SCSI_MAXNAMELEN);
14534 }
14535
14536 kmem_free(rname, SCSI_MAXNAMELEN);
14537
14538 return (child);
14539 }
14540
14541
14542 static dev_info_t *
14543 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14544 {
14545 dev_info_t *child = NULL;
14546 char *name = NULL;
14547 char *addr = NULL;
14548
14549 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14550 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14551 (void) sprintf(name, "%016"PRIx64, sasaddr);
14552 (void) sprintf(addr, "w%s,%x", name, lun);
14553 child = mptsas_find_child(pdip, addr);
14554 kmem_free(name, SCSI_MAXNAMELEN);
14555 kmem_free(addr, SCSI_MAXNAMELEN);
14556 return (child);
14557 }
14558
14559 static dev_info_t *
14560 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14561 {
14562 dev_info_t *child;
14563 char *addr;
14564
14565 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14566 (void) sprintf(addr, "p%x,0", phy);
14567 child = mptsas_find_child(pdip, addr);
14568 kmem_free(addr, SCSI_MAXNAMELEN);
14569 return (child);
14570 }
14571
14572 static mdi_pathinfo_t *
14573 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14574 {
14575 mdi_pathinfo_t *path;
14576 char *addr = NULL;
14577
14578 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14579 (void) sprintf(addr, "p%x,0", phy);
14580 path = mdi_pi_find(pdip, NULL, addr);
14581 kmem_free(addr, SCSI_MAXNAMELEN);
14582 return (path);
14583 }
14584
14585 static mdi_pathinfo_t *
14586 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14587 {
14588 mdi_pathinfo_t *path;
14589 char *name = NULL;
14590 char *addr = NULL;
14591
14592 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14593 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14594 (void) sprintf(name, "%016"PRIx64, sasaddr);
14595 (void) sprintf(addr, "w%s,%x", name, lun);
14596 path = mdi_pi_find(parent, NULL, addr);
14597 kmem_free(name, SCSI_MAXNAMELEN);
14598 kmem_free(addr, SCSI_MAXNAMELEN);
14599
14600 return (path);
14601 }
14602
14603 static int
14604 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14605 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14606 {
14607 int i = 0;
14608 uchar_t *inq83 = NULL;
14609 int inq83_len1 = 0xFF;
14610 int inq83_len = 0;
14611 int rval = DDI_FAILURE;
14612 ddi_devid_t devid;
14613 char *guid = NULL;
14614 int target = ptgt->m_devhdl;
14615 mdi_pathinfo_t *pip = NULL;
14616 mptsas_t *mpt = DIP2MPT(pdip);
14617
14618 /*
14619 * For DVD/CD ROM and tape devices and optical
14620 * devices, we won't try to enumerate them under
14621 * scsi_vhci, so no need to try page83
14622 */
14623 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14624 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14625 sd_inq->inq_dtype == DTYPE_ESI))
14626 goto create_lun;
14627
14628 /*
14629 * The LCA returns good SCSI status, but corrupt page 83 data the first
14630 * time it is queried. The solution is to keep trying to request page83
14631 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14632 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14633 * give up to get VPD page at this stage and fail the enumeration.
14634 */
14635
14636 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14637
14638 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14639 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14640 inq83_len1, &inq83_len, 1);
14641 if (rval != 0) {
14642 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14643 "0x83 for target:%x, lun:%x failed!", target, lun);
14644 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14645 goto create_lun;
14646 goto out;
14647 }
14648 /*
14649 * create DEVID from inquiry data
14650 */
14651 if ((rval = ddi_devid_scsi_encode(
14652 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14653 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14654 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14655 /*
14656 * extract GUID from DEVID
14657 */
14658 guid = ddi_devid_to_guid(devid);
14659
14660 /*
14661 * Do not enable MPXIO if the strlen(guid) is greater
14662 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14663 * handled by framework later.
14664 */
14665 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14666 ddi_devid_free_guid(guid);
14667 guid = NULL;
14668 if (mpt->m_mpxio_enable == TRUE) {
14669 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14670 "lun:%x doesn't have a valid GUID, "
14671 "multipathing for this drive is "
14672 "not enabled", target, lun);
14673 }
14674 }
14675
14676 /*
14677 * devid no longer needed
14678 */
14679 ddi_devid_free(devid);
14680 break;
14681 } else if (rval == DDI_NOT_WELL_FORMED) {
14682 /*
14683 * return value of ddi_devid_scsi_encode equal to
14684 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14685 * to retry inquiry page 0x83 and get GUID.
14686 */
14687 NDBG20(("Not well formed devid, retry..."));
14688 delay(1 * drv_usectohz(1000000));
14689 continue;
14690 } else {
14691 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14692 "path target:%x, lun:%x", target, lun);
14693 rval = DDI_FAILURE;
14694 goto create_lun;
14695 }
14696 }
14697
14698 if (i == mptsas_inq83_retry_timeout) {
14699 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14700 "for path target:%x, lun:%x", target, lun);
14701 }
14702
14703 rval = DDI_FAILURE;
14704
14705 create_lun:
14706 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14707 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14708 ptgt, lun);
14709 }
14710 if (rval != DDI_SUCCESS) {
14711 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14712 ptgt, lun);
14713
14714 }
14715 out:
14716 if (guid != NULL) {
14717 /*
14718 * guid no longer needed
14719 */
14720 ddi_devid_free_guid(guid);
14721 }
14722 if (inq83 != NULL)
14723 kmem_free(inq83, inq83_len1);
14724 return (rval);
14725 }
14726
14727 static int
14728 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14729 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14730 {
14731 int target;
14732 char *nodename = NULL;
14733 char **compatible = NULL;
14734 int ncompatible = 0;
14735 int mdi_rtn = MDI_FAILURE;
14736 int rval = DDI_FAILURE;
14737 char *old_guid = NULL;
14738 mptsas_t *mpt = DIP2MPT(pdip);
14739 char *lun_addr = NULL;
14740 char *wwn_str = NULL;
14741 char *attached_wwn_str = NULL;
14742 char *component = NULL;
14743 uint8_t phy = 0xFF;
14744 uint64_t sas_wwn;
14745 int64_t lun64 = 0;
14746 uint32_t devinfo;
14747 uint16_t dev_hdl;
14748 uint16_t pdev_hdl;
14749 uint64_t dev_sas_wwn;
14750 uint64_t pdev_sas_wwn;
14751 uint32_t pdev_info;
14752 uint8_t physport;
14753 uint8_t phy_id;
14754 uint32_t page_address;
14755 uint16_t bay_num, enclosure;
14756 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14757 uint32_t dev_info;
14758
14759 mutex_enter(&mpt->m_mutex);
14760 target = ptgt->m_devhdl;
14761 sas_wwn = ptgt->m_addr.mta_wwn;
14762 devinfo = ptgt->m_deviceinfo;
14763 phy = ptgt->m_phynum;
14764 mutex_exit(&mpt->m_mutex);
14765
14766 if (sas_wwn) {
14767 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14768 } else {
14769 *pip = mptsas_find_path_phy(pdip, phy);
14770 }
14771
14772 if (*pip != NULL) {
14773 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14774 ASSERT(*lun_dip != NULL);
14775 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14776 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14777 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14778 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14779 /*
14780 * Same path back online again.
14781 */
14782 (void) ddi_prop_free(old_guid);
14783 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14784 (!MDI_PI_IS_STANDBY(*pip)) &&
14785 (ptgt->m_tgt_unconfigured == 0)) {
14786 rval = mdi_pi_online(*pip, 0);
14787 mutex_enter(&mpt->m_mutex);
14788 ptgt->m_led_status = 0;
14789 (void) mptsas_flush_led_status(mpt,
14790 ptgt);
14791 mutex_exit(&mpt->m_mutex);
14792 } else {
14793 rval = DDI_SUCCESS;
14794 }
14795 if (rval != DDI_SUCCESS) {
14796 mptsas_log(mpt, CE_WARN, "path:target: "
14797 "%x, lun:%x online failed!", target,
14798 lun);
14799 *pip = NULL;
14800 *lun_dip = NULL;
14801 }
14802 return (rval);
14803 } else {
14804 /*
14805 * The GUID of the LUN has changed which maybe
14806 * because customer mapped another volume to the
14807 * same LUN.
14808 */
14809 mptsas_log(mpt, CE_WARN, "The GUID of the "
14810 "target:%x, lun:%x was changed, maybe "
14811 "because someone mapped another volume "
14812 "to the same LUN", target, lun);
14813 (void) ddi_prop_free(old_guid);
14814 if (!MDI_PI_IS_OFFLINE(*pip)) {
14815 rval = mdi_pi_offline(*pip, 0);
14816 if (rval != MDI_SUCCESS) {
14817 mptsas_log(mpt, CE_WARN, "path:"
14818 "target:%x, lun:%x offline "
14819 "failed!", target, lun);
14820 *pip = NULL;
14821 *lun_dip = NULL;
14822 return (DDI_FAILURE);
14823 }
14824 }
14825 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14826 mptsas_log(mpt, CE_WARN, "path:target:"
14827 "%x, lun:%x free failed!", target,
14828 lun);
14829 *pip = NULL;
14830 *lun_dip = NULL;
14831 return (DDI_FAILURE);
14832 }
14833 }
14834 } else {
14835 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14836 "property for path:target:%x, lun:%x", target, lun);
14837 *pip = NULL;
14838 *lun_dip = NULL;
14839 return (DDI_FAILURE);
14840 }
14841 }
14842 scsi_hba_nodename_compatible_get(inq, NULL,
14843 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14844
14845 /*
14846 * if nodename can't be determined then print a message and skip it
14847 */
14848 if (nodename == NULL) {
14849 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14850 "driver for target%d lun %d dtype:0x%02x", target, lun,
14851 inq->inq_dtype);
14852 return (DDI_FAILURE);
14853 }
14854
14855 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14856 /* The property is needed by MPAPI */
14857 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14858
14859 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14860 if (guid) {
14861 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14862 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14863 } else {
14864 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14865 (void) sprintf(wwn_str, "p%x", phy);
14866 }
14867
14868 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14869 guid, lun_addr, compatible, ncompatible,
14870 0, pip);
14871 if (mdi_rtn == MDI_SUCCESS) {
14872
14873 if (mdi_prop_update_string(*pip, MDI_GUID,
14874 guid) != DDI_SUCCESS) {
14875 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14876 "create prop for target %d lun %d (MDI_GUID)",
14877 target, lun);
14878 mdi_rtn = MDI_FAILURE;
14879 goto virt_create_done;
14880 }
14881
14882 if (mdi_prop_update_int(*pip, LUN_PROP,
14883 lun) != DDI_SUCCESS) {
14884 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14885 "create prop for target %d lun %d (LUN_PROP)",
14886 target, lun);
14887 mdi_rtn = MDI_FAILURE;
14888 goto virt_create_done;
14889 }
14890 lun64 = (int64_t)lun;
14891 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14892 lun64) != DDI_SUCCESS) {
14893 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14894 "create prop for target %d (LUN64_PROP)",
14895 target);
14896 mdi_rtn = MDI_FAILURE;
14897 goto virt_create_done;
14898 }
14899 if (mdi_prop_update_string_array(*pip, "compatible",
14900 compatible, ncompatible) !=
14901 DDI_PROP_SUCCESS) {
14902 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14903 "create prop for target %d lun %d (COMPATIBLE)",
14904 target, lun);
14905 mdi_rtn = MDI_FAILURE;
14906 goto virt_create_done;
14907 }
14908 if (sas_wwn && (mdi_prop_update_string(*pip,
14909 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14910 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14911 "create prop for target %d lun %d "
14912 "(target-port)", target, lun);
14913 mdi_rtn = MDI_FAILURE;
14914 goto virt_create_done;
14915 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14916 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14917 /*
14918 * Direct attached SATA device without DeviceName
14919 */
14920 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14921 "create prop for SAS target %d lun %d "
14922 "(sata-phy)", target, lun);
14923 mdi_rtn = MDI_FAILURE;
14924 goto virt_create_done;
14925 }
14926 mutex_enter(&mpt->m_mutex);
14927
14928 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14929 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14930 (uint32_t)ptgt->m_devhdl;
14931 rval = mptsas_get_sas_device_page0(mpt, page_address,
14932 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14933 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14934 if (rval != DDI_SUCCESS) {
14935 mutex_exit(&mpt->m_mutex);
14936 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14937 "parent device for handle %d", page_address);
14938 mdi_rtn = MDI_FAILURE;
14939 goto virt_create_done;
14940 }
14941
14942 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14943 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14944 rval = mptsas_get_sas_device_page0(mpt, page_address,
14945 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14946 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14947 if (rval != DDI_SUCCESS) {
14948 mutex_exit(&mpt->m_mutex);
14949 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14950 "device info for handle %d", page_address);
14951 mdi_rtn = MDI_FAILURE;
14952 goto virt_create_done;
14953 }
14954
14955 mutex_exit(&mpt->m_mutex);
14956
14957 /*
14958 * If this device direct attached to the controller
14959 * set the attached-port to the base wwid
14960 */
14961 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14962 != DEVINFO_DIRECT_ATTACHED) {
14963 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14964 pdev_sas_wwn);
14965 } else {
14966 /*
14967 * Update the iport's attached-port to guid
14968 */
14969 if (sas_wwn == 0) {
14970 (void) sprintf(wwn_str, "p%x", phy);
14971 } else {
14972 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14973 }
14974 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14975 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14976 DDI_PROP_SUCCESS) {
14977 mptsas_log(mpt, CE_WARN,
14978 "mptsas unable to create "
14979 "property for iport target-port"
14980 " %s (sas_wwn)",
14981 wwn_str);
14982 mdi_rtn = MDI_FAILURE;
14983 goto virt_create_done;
14984 }
14985
14986 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14987 mpt->un.m_base_wwid);
14988 }
14989
14990 if (mdi_prop_update_string(*pip,
14991 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14992 DDI_PROP_SUCCESS) {
14993 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14994 "property for iport attached-port %s (sas_wwn)",
14995 attached_wwn_str);
14996 mdi_rtn = MDI_FAILURE;
14997 goto virt_create_done;
14998 }
14999
15000
15001 if (inq->inq_dtype == 0) {
15002 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15003 /*
15004 * set obp path for pathinfo
15005 */
15006 (void) snprintf(component, MAXPATHLEN,
15007 "disk@%s", lun_addr);
15008
15009 if (mdi_pi_pathname_obp_set(*pip, component) !=
15010 DDI_SUCCESS) {
15011 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15012 "unable to set obp-path for object %s",
15013 component);
15014 mdi_rtn = MDI_FAILURE;
15015 goto virt_create_done;
15016 }
15017 }
15018
15019 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15020 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15021 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15022 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15023 "pm-capable", 1)) !=
15024 DDI_PROP_SUCCESS) {
15025 mptsas_log(mpt, CE_WARN, "mptsas driver"
15026 "failed to create pm-capable "
15027 "property, target %d", target);
15028 mdi_rtn = MDI_FAILURE;
15029 goto virt_create_done;
15030 }
15031 }
15032 /*
15033 * Create the phy-num property
15034 */
15035 if (mdi_prop_update_int(*pip, "phy-num",
15036 ptgt->m_phynum) != DDI_SUCCESS) {
15037 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15038 "create phy-num property for target %d lun %d",
15039 target, lun);
15040 mdi_rtn = MDI_FAILURE;
15041 goto virt_create_done;
15042 }
15043 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15044 mdi_rtn = mdi_pi_online(*pip, 0);
15045 if (mdi_rtn == MDI_SUCCESS) {
15046 mutex_enter(&mpt->m_mutex);
15047 ptgt->m_led_status = 0;
15048 (void) mptsas_flush_led_status(mpt, ptgt);
15049 mutex_exit(&mpt->m_mutex);
15050 }
15051 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15052 mdi_rtn = MDI_FAILURE;
15053 }
15054 virt_create_done:
15055 if (*pip && mdi_rtn != MDI_SUCCESS) {
15056 (void) mdi_pi_free(*pip, 0);
15057 *pip = NULL;
15058 *lun_dip = NULL;
15059 }
15060 }
15061
15062 scsi_hba_nodename_compatible_free(nodename, compatible);
15063 if (lun_addr != NULL) {
15064 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15065 }
15066 if (wwn_str != NULL) {
15067 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15068 }
15069 if (component != NULL) {
15070 kmem_free(component, MAXPATHLEN);
15071 }
15072
15073 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15074 }
15075
15076 static int
15077 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15078 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15079 {
15080 int target;
15081 int rval;
15082 int ndi_rtn = NDI_FAILURE;
15083 uint64_t be_sas_wwn;
15084 char *nodename = NULL;
15085 char **compatible = NULL;
15086 int ncompatible = 0;
15087 int instance = 0;
15088 mptsas_t *mpt = DIP2MPT(pdip);
15089 char *wwn_str = NULL;
15090 char *component = NULL;
15091 char *attached_wwn_str = NULL;
15092 uint8_t phy = 0xFF;
15093 uint64_t sas_wwn;
15094 uint32_t devinfo;
15095 uint16_t dev_hdl;
15096 uint16_t pdev_hdl;
15097 uint64_t pdev_sas_wwn;
15098 uint64_t dev_sas_wwn;
15099 uint32_t pdev_info;
15100 uint8_t physport;
15101 uint8_t phy_id;
15102 uint32_t page_address;
15103 uint16_t bay_num, enclosure;
15104 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15105 uint32_t dev_info;
15106 int64_t lun64 = 0;
15107
15108 mutex_enter(&mpt->m_mutex);
15109 target = ptgt->m_devhdl;
15110 sas_wwn = ptgt->m_addr.mta_wwn;
15111 devinfo = ptgt->m_deviceinfo;
15112 phy = ptgt->m_phynum;
15113 mutex_exit(&mpt->m_mutex);
15114
15115 /*
15116 * generate compatible property with binding-set "mpt"
15117 */
15118 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15119 &nodename, &compatible, &ncompatible);
15120
15121 /*
15122 * if nodename can't be determined then print a message and skip it
15123 */
15124 if (nodename == NULL) {
15125 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15126 "for target %d lun %d", target, lun);
15127 return (DDI_FAILURE);
15128 }
15129
15130 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15131 DEVI_SID_NODEID, lun_dip);
15132
15133 /*
15134 * if lun alloc success, set props
15135 */
15136 if (ndi_rtn == NDI_SUCCESS) {
15137
15138 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15139 *lun_dip, LUN_PROP, lun) !=
15140 DDI_PROP_SUCCESS) {
15141 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15142 "property for target %d lun %d (LUN_PROP)",
15143 target, lun);
15144 ndi_rtn = NDI_FAILURE;
15145 goto phys_create_done;
15146 }
15147
15148 lun64 = (int64_t)lun;
15149 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15150 *lun_dip, LUN64_PROP, lun64) !=
15151 DDI_PROP_SUCCESS) {
15152 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15153 "property for target %d lun64 %d (LUN64_PROP)",
15154 target, lun);
15155 ndi_rtn = NDI_FAILURE;
15156 goto phys_create_done;
15157 }
15158 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15159 *lun_dip, "compatible", compatible, ncompatible)
15160 != DDI_PROP_SUCCESS) {
15161 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15162 "property for target %d lun %d (COMPATIBLE)",
15163 target, lun);
15164 ndi_rtn = NDI_FAILURE;
15165 goto phys_create_done;
15166 }
15167
15168 /*
15169 * We need the SAS WWN for non-multipath devices, so
15170 * we'll use the same property as that multipathing
15171 * devices need to present for MPAPI. If we don't have
15172 * a WWN (e.g. parallel SCSI), don't create the prop.
15173 */
15174 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15175 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15176 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15177 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15178 != DDI_PROP_SUCCESS) {
15179 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15180 "create property for SAS target %d lun %d "
15181 "(target-port)", target, lun);
15182 ndi_rtn = NDI_FAILURE;
15183 goto phys_create_done;
15184 }
15185
15186 be_sas_wwn = BE_64(sas_wwn);
15187 if (sas_wwn && ndi_prop_update_byte_array(
15188 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15189 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15190 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15191 "create property for SAS target %d lun %d "
15192 "(port-wwn)", target, lun);
15193 ndi_rtn = NDI_FAILURE;
15194 goto phys_create_done;
15195 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15196 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15197 DDI_PROP_SUCCESS)) {
15198 /*
15199 * Direct attached SATA device without DeviceName
15200 */
15201 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15202 "create property for SAS target %d lun %d "
15203 "(sata-phy)", target, lun);
15204 ndi_rtn = NDI_FAILURE;
15205 goto phys_create_done;
15206 }
15207
15208 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15209 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15210 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15211 "create property for SAS target %d lun %d"
15212 " (SAS_PROP)", target, lun);
15213 ndi_rtn = NDI_FAILURE;
15214 goto phys_create_done;
15215 }
15216 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15217 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15218 mptsas_log(mpt, CE_WARN, "mptsas unable "
15219 "to create guid property for target %d "
15220 "lun %d", target, lun);
15221 ndi_rtn = NDI_FAILURE;
15222 goto phys_create_done;
15223 }
15224
15225 /*
15226 * The following code is to set properties for SM-HBA support,
15227 * it doesn't apply to RAID volumes
15228 */
15229 if (ptgt->m_addr.mta_phymask == 0)
15230 goto phys_raid_lun;
15231
15232 mutex_enter(&mpt->m_mutex);
15233
15234 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15235 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15236 (uint32_t)ptgt->m_devhdl;
15237 rval = mptsas_get_sas_device_page0(mpt, page_address,
15238 &dev_hdl, &dev_sas_wwn, &dev_info,
15239 &physport, &phy_id, &pdev_hdl,
15240 &bay_num, &enclosure);
15241 if (rval != DDI_SUCCESS) {
15242 mutex_exit(&mpt->m_mutex);
15243 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15244 "parent device for handle %d.", page_address);
15245 ndi_rtn = NDI_FAILURE;
15246 goto phys_create_done;
15247 }
15248
15249 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15250 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15251 rval = mptsas_get_sas_device_page0(mpt, page_address,
15252 &dev_hdl, &pdev_sas_wwn, &pdev_info,
15253 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15254 if (rval != DDI_SUCCESS) {
15255 mutex_exit(&mpt->m_mutex);
15256 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15257 "device for handle %d.", page_address);
15258 ndi_rtn = NDI_FAILURE;
15259 goto phys_create_done;
15260 }
15261
15262 mutex_exit(&mpt->m_mutex);
15263
15264 /*
15265 * If this device direct attached to the controller
15266 * set the attached-port to the base wwid
15267 */
15268 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15269 != DEVINFO_DIRECT_ATTACHED) {
15270 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15271 pdev_sas_wwn);
15272 } else {
15273 /*
15274 * Update the iport's attached-port to guid
15275 */
15276 if (sas_wwn == 0) {
15277 (void) sprintf(wwn_str, "p%x", phy);
15278 } else {
15279 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15280 }
15281 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15282 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15283 DDI_PROP_SUCCESS) {
15284 mptsas_log(mpt, CE_WARN,
15285 "mptsas unable to create "
15286 "property for iport target-port"
15287 " %s (sas_wwn)",
15288 wwn_str);
15289 ndi_rtn = NDI_FAILURE;
15290 goto phys_create_done;
15291 }
15292
15293 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15294 mpt->un.m_base_wwid);
15295 }
15296
15297 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15298 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15299 DDI_PROP_SUCCESS) {
15300 mptsas_log(mpt, CE_WARN,
15301 "mptsas unable to create "
15302 "property for iport attached-port %s (sas_wwn)",
15303 attached_wwn_str);
15304 ndi_rtn = NDI_FAILURE;
15305 goto phys_create_done;
15306 }
15307
15308 if (IS_SATA_DEVICE(dev_info)) {
15309 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15310 *lun_dip, MPTSAS_VARIANT, "sata") !=
15311 DDI_PROP_SUCCESS) {
15312 mptsas_log(mpt, CE_WARN,
15313 "mptsas unable to create "
15314 "property for device variant ");
15315 ndi_rtn = NDI_FAILURE;
15316 goto phys_create_done;
15317 }
15318 }
15319
15320 if (IS_ATAPI_DEVICE(dev_info)) {
15321 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15322 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15323 DDI_PROP_SUCCESS) {
15324 mptsas_log(mpt, CE_WARN,
15325 "mptsas unable to create "
15326 "property for device variant ");
15327 ndi_rtn = NDI_FAILURE;
15328 goto phys_create_done;
15329 }
15330 }
15331
15332 phys_raid_lun:
15333 /*
15334 * if this is a SAS controller, and the target is a SATA
15335 * drive, set the 'pm-capable' property for sd and if on
15336 * an OPL platform, also check if this is an ATAPI
15337 * device.
15338 */
15339 instance = ddi_get_instance(mpt->m_dip);
15340 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15341 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15342 NDBG2(("mptsas%d: creating pm-capable property, "
15343 "target %d", instance, target));
15344
15345 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15346 *lun_dip, "pm-capable", 1)) !=
15347 DDI_PROP_SUCCESS) {
15348 mptsas_log(mpt, CE_WARN, "mptsas "
15349 "failed to create pm-capable "
15350 "property, target %d", target);
15351 ndi_rtn = NDI_FAILURE;
15352 goto phys_create_done;
15353 }
15354
15355 }
15356
15357 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15358 /*
15359 * add 'obp-path' properties for devinfo
15360 */
15361 bzero(wwn_str, sizeof (wwn_str));
15362 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15363 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15364 if (guid) {
15365 (void) snprintf(component, MAXPATHLEN,
15366 "disk@w%s,%x", wwn_str, lun);
15367 } else {
15368 (void) snprintf(component, MAXPATHLEN,
15369 "disk@p%x,%x", phy, lun);
15370 }
15371 if (ddi_pathname_obp_set(*lun_dip, component)
15372 != DDI_SUCCESS) {
15373 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15374 "unable to set obp-path for SAS "
15375 "object %s", component);
15376 ndi_rtn = NDI_FAILURE;
15377 goto phys_create_done;
15378 }
15379 }
15380 /*
15381 * Create the phy-num property for non-raid disk
15382 */
15383 if (ptgt->m_addr.mta_phymask != 0) {
15384 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15385 *lun_dip, "phy-num", ptgt->m_phynum) !=
15386 DDI_PROP_SUCCESS) {
15387 mptsas_log(mpt, CE_WARN, "mptsas driver "
15388 "failed to create phy-num property for "
15389 "target %d", target);
15390 ndi_rtn = NDI_FAILURE;
15391 goto phys_create_done;
15392 }
15393 }
15394 phys_create_done:
15395 /*
15396 * If props were setup ok, online the lun
15397 */
15398 if (ndi_rtn == NDI_SUCCESS) {
15399 /*
15400 * Try to online the new node
15401 */
15402 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15403 }
15404 if (ndi_rtn == NDI_SUCCESS) {
15405 mutex_enter(&mpt->m_mutex);
15406 ptgt->m_led_status = 0;
15407 (void) mptsas_flush_led_status(mpt, ptgt);
15408 mutex_exit(&mpt->m_mutex);
15409 }
15410
15411 /*
15412 * If success set rtn flag, else unwire alloc'd lun
15413 */
15414 if (ndi_rtn != NDI_SUCCESS) {
15415 NDBG12(("mptsas driver unable to online "
15416 "target %d lun %d", target, lun));
15417 ndi_prop_remove_all(*lun_dip);
15418 (void) ndi_devi_free(*lun_dip);
15419 *lun_dip = NULL;
15420 }
15421 }
15422
15423 scsi_hba_nodename_compatible_free(nodename, compatible);
15424
15425 if (wwn_str != NULL) {
15426 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15427 }
15428 if (component != NULL) {
15429 kmem_free(component, MAXPATHLEN);
15430 }
15431
15432
15433 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15434 }
15435
15436 static int
15437 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15438 {
15439 mptsas_t *mpt = DIP2MPT(pdip);
15440 struct smp_device smp_sd;
15441
15442 /* XXX An HBA driver should not be allocating an smp_device. */
15443 bzero(&smp_sd, sizeof (struct smp_device));
15444 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15445 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15446
15447 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15448 return (NDI_FAILURE);
15449 return (NDI_SUCCESS);
15450 }
15451
15452 static int
15453 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15454 {
15455 mptsas_t *mpt = DIP2MPT(pdip);
15456 mptsas_smp_t *psmp = NULL;
15457 int rval;
15458 int phymask;
15459
15460 /*
15461 * Get the physical port associated to the iport
15462 * PHYMASK TODO
15463 */
15464 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15465 "phymask", 0);
15466 /*
15467 * Find the smp node in hash table with specified sas address and
15468 * physical port
15469 */
15470 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15471 if (psmp == NULL) {
15472 return (DDI_FAILURE);
15473 }
15474
15475 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15476
15477 return (rval);
15478 }
15479
15480 static int
15481 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15482 dev_info_t **smp_dip)
15483 {
15484 char wwn_str[MPTSAS_WWN_STRLEN];
15485 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15486 int ndi_rtn = NDI_FAILURE;
15487 int rval = 0;
15488 mptsas_smp_t dev_info;
15489 uint32_t page_address;
15490 mptsas_t *mpt = DIP2MPT(pdip);
15491 uint16_t dev_hdl;
15492 uint64_t sas_wwn;
15493 uint64_t smp_sas_wwn;
15494 uint8_t physport;
15495 uint8_t phy_id;
15496 uint16_t pdev_hdl;
15497 uint8_t numphys = 0;
15498 uint16_t i = 0;
15499 char phymask[MPTSAS_MAX_PHYS];
15500 char *iport = NULL;
15501 mptsas_phymask_t phy_mask = 0;
15502 uint16_t attached_devhdl;
15503 uint16_t bay_num, enclosure;
15504
15505 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15506
15507 /*
15508 * Probe smp device, prevent the node of removed device from being
15509 * configured succesfully
15510 */
15511 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
15512 return (DDI_FAILURE);
15513 }
15514
15515 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15516 return (DDI_SUCCESS);
15517 }
15518
15519 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15520
15521 /*
15522 * if lun alloc success, set props
15523 */
15524 if (ndi_rtn == NDI_SUCCESS) {
15525 /*
15526 * Set the flavor of the child to be SMP flavored
15527 */
15528 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15529
15530 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15531 *smp_dip, SMP_WWN, wwn_str) !=
15532 DDI_PROP_SUCCESS) {
15533 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15534 "property for smp device %s (sas_wwn)",
15535 wwn_str);
15536 ndi_rtn = NDI_FAILURE;
15537 goto smp_create_done;
15538 }
15539 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
15540 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15541 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15542 DDI_PROP_SUCCESS) {
15543 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15544 "property for iport target-port %s (sas_wwn)",
15545 wwn_str);
15546 ndi_rtn = NDI_FAILURE;
15547 goto smp_create_done;
15548 }
15549
15550 mutex_enter(&mpt->m_mutex);
15551
15552 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15553 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15554 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15555 &dev_info);
15556 if (rval != DDI_SUCCESS) {
15557 mutex_exit(&mpt->m_mutex);
15558 mptsas_log(mpt, CE_WARN,
15559 "mptsas unable to get expander "
15560 "parent device info for %x", page_address);
15561 ndi_rtn = NDI_FAILURE;
15562 goto smp_create_done;
15563 }
15564
15565 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15566 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15567 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15568 (uint32_t)dev_info.m_pdevhdl;
15569 rval = mptsas_get_sas_device_page0(mpt, page_address,
15570 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
15571 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15572 if (rval != DDI_SUCCESS) {
15573 mutex_exit(&mpt->m_mutex);
15574 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15575 "device info for %x", page_address);
15576 ndi_rtn = NDI_FAILURE;
15577 goto smp_create_done;
15578 }
15579
15580 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15581 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15582 (uint32_t)dev_info.m_devhdl;
15583 rval = mptsas_get_sas_device_page0(mpt, page_address,
15584 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15585 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15586 if (rval != DDI_SUCCESS) {
15587 mutex_exit(&mpt->m_mutex);
15588 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15589 "device info for %x", page_address);
15590 ndi_rtn = NDI_FAILURE;
15591 goto smp_create_done;
15592 }
15593 mutex_exit(&mpt->m_mutex);
15594
15595 /*
15596 * If this smp direct attached to the controller
15597 * set the attached-port to the base wwid
15598 */
15599 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15600 != DEVINFO_DIRECT_ATTACHED) {
15601 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15602 sas_wwn);
15603 } else {
15604 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15605 mpt->un.m_base_wwid);
15606 }
15607
15608 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15609 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15610 DDI_PROP_SUCCESS) {
15611 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15612 "property for smp attached-port %s (sas_wwn)",
15613 attached_wwn_str);
15614 ndi_rtn = NDI_FAILURE;
15615 goto smp_create_done;
15616 }
15617
15618 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15619 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15620 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15621 "create property for SMP %s (SMP_PROP) ",
15622 wwn_str);
15623 ndi_rtn = NDI_FAILURE;
15624 goto smp_create_done;
15625 }
15626
15627 /*
15628 * check the smp to see whether it direct
15629 * attached to the controller
15630 */
15631 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15632 != DEVINFO_DIRECT_ATTACHED) {
15633 goto smp_create_done;
15634 }
15635 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15636 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15637 if (numphys > 0) {
15638 goto smp_create_done;
15639 }
15640 /*
15641 * this iport is an old iport, we need to
15642 * reconfig the props for it.
15643 */
15644 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15645 MPTSAS_VIRTUAL_PORT, 0) !=
15646 DDI_PROP_SUCCESS) {
15647 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15648 MPTSAS_VIRTUAL_PORT);
15649 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15650 "prop update failed");
15651 goto smp_create_done;
15652 }
15653
15654 mutex_enter(&mpt->m_mutex);
15655 numphys = 0;
15656 iport = ddi_get_name_addr(pdip);
15657 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15658 bzero(phymask, sizeof (phymask));
15659 (void) sprintf(phymask,
15660 "%x", mpt->m_phy_info[i].phy_mask);
15661 if (strcmp(phymask, iport) == 0) {
15662 phy_mask = mpt->m_phy_info[i].phy_mask;
15663 break;
15664 }
15665 }
15666
15667 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15668 if ((phy_mask >> i) & 0x01) {
15669 numphys++;
15670 }
15671 }
15672 /*
15673 * Update PHY info for smhba
15674 */
15675 if (mptsas_smhba_phy_init(mpt)) {
15676 mutex_exit(&mpt->m_mutex);
15677 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15678 "failed");
15679 goto smp_create_done;
15680 }
15681 mutex_exit(&mpt->m_mutex);
15682
15683 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
15684 &attached_devhdl);
15685
15686 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15687 MPTSAS_NUM_PHYS, numphys) !=
15688 DDI_PROP_SUCCESS) {
15689 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15690 MPTSAS_NUM_PHYS);
15691 mptsas_log(mpt, CE_WARN, "mptsas update "
15692 "num phys props failed");
15693 goto smp_create_done;
15694 }
15695 /*
15696 * Add parent's props for SMHBA support
15697 */
15698 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15699 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15700 DDI_PROP_SUCCESS) {
15701 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15702 SCSI_ADDR_PROP_ATTACHED_PORT);
15703 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15704 "attached-port failed");
15705 goto smp_create_done;
15706 }
15707
15708 smp_create_done:
15709 /*
15710 * If props were setup ok, online the lun
15711 */
15712 if (ndi_rtn == NDI_SUCCESS) {
15713 /*
15714 * Try to online the new node
15715 */
15716 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15717 }
15718
15719 /*
15720 * If success set rtn flag, else unwire alloc'd lun
15721 */
15722 if (ndi_rtn != NDI_SUCCESS) {
15723 NDBG12(("mptsas unable to online "
15724 "SMP target %s", wwn_str));
15725 ndi_prop_remove_all(*smp_dip);
15726 (void) ndi_devi_free(*smp_dip);
15727 }
15728 }
15729
15730 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15731 }
15732
15733 /* smp transport routine */
15734 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15735 {
15736 uint64_t wwn;
15737 Mpi2SmpPassthroughRequest_t req;
15738 Mpi2SmpPassthroughReply_t rep;
15739 uint8_t direction = 0;
15740 mptsas_t *mpt;
15741 int ret;
15742 uint64_t tmp64;
15743
15744 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15745 smp_a_hba_tran->smp_tran_hba_private;
15746
15747 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15748 /*
15749 * Need to compose a SMP request message
15750 * and call mptsas_do_passthru() function
15751 */
15752 bzero(&req, sizeof (req));
15753 bzero(&rep, sizeof (rep));
15754 req.PassthroughFlags = 0;
15755 req.PhysicalPort = 0xff;
15756 req.ChainOffset = 0;
15757 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15758
15759 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15760 smp_pkt->smp_pkt_reason = ERANGE;
15761 return (DDI_FAILURE);
15762 }
15763 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15764
15765 req.MsgFlags = 0;
15766 tmp64 = LE_64(wwn);
15767 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15768 if (smp_pkt->smp_pkt_rspsize > 0) {
15769 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15770 }
15771 if (smp_pkt->smp_pkt_reqsize > 0) {
15772 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15773 }
15774
15775 mutex_enter(&mpt->m_mutex);
15776 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15777 (uint8_t *)smp_pkt->smp_pkt_rsp,
15778 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15779 smp_pkt->smp_pkt_rspsize - 4, direction,
15780 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15781 smp_pkt->smp_pkt_timeout, FKIOCTL);
15782 mutex_exit(&mpt->m_mutex);
15783 if (ret != 0) {
15784 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15785 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15786 return (DDI_FAILURE);
15787 }
15788 /* do passthrough success, check the smp status */
15789 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15790 switch (LE_16(rep.IOCStatus)) {
15791 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15792 smp_pkt->smp_pkt_reason = ENODEV;
15793 break;
15794 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15795 smp_pkt->smp_pkt_reason = EOVERFLOW;
15796 break;
15797 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15798 smp_pkt->smp_pkt_reason = EIO;
15799 break;
15800 default:
15801 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15802 "status:%x", LE_16(rep.IOCStatus));
15803 smp_pkt->smp_pkt_reason = EIO;
15804 break;
15805 }
15806 return (DDI_FAILURE);
15807 }
15808 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15809 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15810 rep.SASStatus);
15811 smp_pkt->smp_pkt_reason = EIO;
15812 return (DDI_FAILURE);
15813 }
15814
15815 return (DDI_SUCCESS);
15816 }
15817
15818 /*
15819 * If we didn't get a match, we need to get sas page0 for each device, and
15820 * untill we get a match. If failed, return NULL
15821 */
15822 static mptsas_target_t *
15823 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
15824 {
15825 int i, j = 0;
15826 int rval = 0;
15827 uint16_t cur_handle;
15828 uint32_t page_address;
15829 mptsas_target_t *ptgt = NULL;
15830
15831 /*
15832 * PHY named device must be direct attached and attaches to
15833 * narrow port, if the iport is not parent of the device which
15834 * we are looking for.
15835 */
15836 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15837 if ((1 << i) & phymask)
15838 j++;
15839 }
15840
15841 if (j > 1)
15842 return (NULL);
15843
15844 /*
15845 * Must be a narrow port and single device attached to the narrow port
15846 * So the physical port num of device which is equal to the iport's
15847 * port num is the device what we are looking for.
15848 */
15849
15850 if (mpt->m_phy_info[phy].phy_mask != phymask)
15851 return (NULL);
15852
15853 mutex_enter(&mpt->m_mutex);
15854
15855 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
15856 &phy);
15857 if (ptgt != NULL) {
15858 mutex_exit(&mpt->m_mutex);
15859 return (ptgt);
15860 }
15861
15862 if (mpt->m_done_traverse_dev) {
15863 mutex_exit(&mpt->m_mutex);
15864 return (NULL);
15865 }
15866
15867 /* If didn't get a match, come here */
15868 cur_handle = mpt->m_dev_handle;
15869 for (; ; ) {
15870 ptgt = NULL;
15871 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15872 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15873 rval = mptsas_get_target_device_info(mpt, page_address,
15874 &cur_handle, &ptgt);
15875 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15876 (rval == DEV_INFO_FAIL_ALLOC)) {
15877 break;
15878 }
15879 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15880 (rval == DEV_INFO_PHYS_DISK)) {
15881 continue;
15882 }
15883 mpt->m_dev_handle = cur_handle;
15884
15885 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
15886 break;
15887 }
15888 }
15889
15890 mutex_exit(&mpt->m_mutex);
15891 return (ptgt);
15892 }
15893
15894 /*
15895 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
15896 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15897 * If we didn't get a match, we need to get sas page0 for each device, and
15898 * untill we get a match
15899 * If failed, return NULL
15900 */
15901 static mptsas_target_t *
15902 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15903 {
15904 int rval = 0;
15905 uint16_t cur_handle;
15906 uint32_t page_address;
15907 mptsas_target_t *tmp_tgt = NULL;
15908 mptsas_target_addr_t addr;
15909
15910 addr.mta_wwn = wwid;
15911 addr.mta_phymask = phymask;
15912 mutex_enter(&mpt->m_mutex);
15913 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15914 if (tmp_tgt != NULL) {
15915 mutex_exit(&mpt->m_mutex);
15916 return (tmp_tgt);
15917 }
15918
15919 if (phymask == 0) {
15920 /*
15921 * It's IR volume
15922 */
15923 rval = mptsas_get_raid_info(mpt);
15924 if (rval) {
15925 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15926 }
15927 mutex_exit(&mpt->m_mutex);
15928 return (tmp_tgt);
15929 }
15930
15931 if (mpt->m_done_traverse_dev) {
15932 mutex_exit(&mpt->m_mutex);
15933 return (NULL);
15934 }
15935
15936 /* If didn't get a match, come here */
15937 cur_handle = mpt->m_dev_handle;
15938 for (;;) {
15939 tmp_tgt = NULL;
15940 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15941 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15942 rval = mptsas_get_target_device_info(mpt, page_address,
15943 &cur_handle, &tmp_tgt);
15944 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15945 (rval == DEV_INFO_FAIL_ALLOC)) {
15946 tmp_tgt = NULL;
15947 break;
15948 }
15949 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15950 (rval == DEV_INFO_PHYS_DISK)) {
15951 continue;
15952 }
15953 mpt->m_dev_handle = cur_handle;
15954 if ((tmp_tgt->m_addr.mta_wwn) &&
15955 (tmp_tgt->m_addr.mta_wwn == wwid) &&
15956 (tmp_tgt->m_addr.mta_phymask == phymask)) {
15957 break;
15958 }
15959 }
15960
15961 mutex_exit(&mpt->m_mutex);
15962 return (tmp_tgt);
15963 }
15964
15965 static mptsas_smp_t *
15966 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15967 {
15968 int rval = 0;
15969 uint16_t cur_handle;
15970 uint32_t page_address;
15971 mptsas_smp_t smp_node, *psmp = NULL;
15972 mptsas_target_addr_t addr;
15973
15974 addr.mta_wwn = wwid;
15975 addr.mta_phymask = phymask;
15976 mutex_enter(&mpt->m_mutex);
15977 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
15978 if (psmp != NULL) {
15979 mutex_exit(&mpt->m_mutex);
15980 return (psmp);
15981 }
15982
15983 if (mpt->m_done_traverse_smp) {
15984 mutex_exit(&mpt->m_mutex);
15985 return (NULL);
15986 }
15987
15988 /* If didn't get a match, come here */
15989 cur_handle = mpt->m_smp_devhdl;
15990 for (;;) {
15991 psmp = NULL;
15992 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15993 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15994 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15995 &smp_node);
15996 if (rval != DDI_SUCCESS) {
15997 break;
15998 }
15999 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16000 psmp = mptsas_smp_alloc(mpt, &smp_node);
16001 ASSERT(psmp);
16002 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16003 (psmp->m_addr.mta_phymask == phymask)) {
16004 break;
16005 }
16006 }
16007
16008 mutex_exit(&mpt->m_mutex);
16009 return (psmp);
16010 }
16011
16012 mptsas_target_t *
16013 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
16014 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16015 {
16016 mptsas_target_t *tmp_tgt = NULL;
16017 mptsas_target_addr_t addr;
16018
16019 addr.mta_wwn = wwid;
16020 addr.mta_phymask = phymask;
16021 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16022 if (tmp_tgt != NULL) {
16023 NDBG20(("Hash item already exist"));
16024 tmp_tgt->m_deviceinfo = devinfo;
16025 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16026 return (tmp_tgt);
16027 }
16028 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16029 if (tmp_tgt == NULL) {
16030 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16031 return (NULL);
16032 }
16033 tmp_tgt->m_devhdl = devhdl;
16034 tmp_tgt->m_addr.mta_wwn = wwid;
16035 tmp_tgt->m_deviceinfo = devinfo;
16036 tmp_tgt->m_addr.mta_phymask = phymask;
16037 tmp_tgt->m_phynum = phynum;
16038 /* Initialized the tgt structure */
16039 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16040 tmp_tgt->m_qfull_retry_interval =
16041 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16042 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16043 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16044
16045 refhash_insert(mpt->m_targets, tmp_tgt);
16046
16047 return (tmp_tgt);
16048 }
16049
16050 static void
16051 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16052 {
16053 dst->m_devhdl = src->m_devhdl;
16054 dst->m_deviceinfo = src->m_deviceinfo;
16055 dst->m_pdevhdl = src->m_pdevhdl;
16056 dst->m_pdevinfo = src->m_pdevinfo;
16057 }
16058
16059 static mptsas_smp_t *
16060 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16061 {
16062 mptsas_target_addr_t addr;
16063 mptsas_smp_t *ret_data;
16064
16065 addr.mta_wwn = data->m_addr.mta_wwn;
16066 addr.mta_phymask = data->m_addr.mta_phymask;
16067 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16068 /*
16069 * If there's already a matching SMP target, update its fields
16070 * in place. Since the address is not changing, it's safe to do
16071 * this. We cannot just bcopy() here because the structure we've
16072 * been given has invalid hash links.
16073 */
16074 if (ret_data != NULL) {
16075 mptsas_smp_target_copy(data, ret_data);
16076 return (ret_data);
16077 }
16078
16079 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16080 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16081 refhash_insert(mpt->m_smp_targets, ret_data);
16082 return (ret_data);
16083 }
16084
16085 /*
16086 * Functions for SGPIO LED support
16087 */
16088 static dev_info_t *
16089 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16090 {
16091 dev_info_t *dip;
16092 int prop;
16093 dip = e_ddi_hold_devi_by_dev(dev, 0);
16094 if (dip == NULL)
16095 return (dip);
16096 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16097 "phymask", 0);
16098 *phymask = (mptsas_phymask_t)prop;
16099 ddi_release_devi(dip);
16100 return (dip);
16101 }
16102 static mptsas_target_t *
16103 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16104 {
16105 uint8_t phynum;
16106 uint64_t wwn;
16107 int lun;
16108 mptsas_target_t *ptgt = NULL;
16109
16110 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16111 return (NULL);
16112 }
16113 if (addr[0] == 'w') {
16114 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16115 } else {
16116 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16117 }
16118 return (ptgt);
16119 }
16120
16121 static int
16122 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16123 {
16124 uint32_t slotstatus = 0;
16125
16126 /* Build an MPI2 Slot Status based on our view of the world */
16127 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16128 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16129 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16130 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16131 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16132 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16133
16134 /* Write it to the controller */
16135 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16136 slotstatus, ptgt->m_slot_num));
16137 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16138 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16139 }
16140
16141 /*
16142 * send sep request, use enclosure/slot addressing
16143 */
16144 static int
16145 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16146 uint32_t *status, uint8_t act)
16147 {
16148 Mpi2SepRequest_t req;
16149 Mpi2SepReply_t rep;
16150 int ret;
16151
16152 ASSERT(mutex_owned(&mpt->m_mutex));
16153
16154 /*
16155 * We only support SEP control of directly-attached targets, in which
16156 * case the "SEP" we're talking to is a virtual one contained within
16157 * the HBA itself. This is necessary because DA targets typically have
16158 * no other mechanism for LED control. Targets for which a separate
16159 * enclosure service processor exists should be controlled via ses(7d)
16160 * or sgen(7d). Furthermore, since such requests can time out, they
16161 * should be made in user context rather than in response to
16162 * asynchronous fabric changes.
16163 *
16164 * In addition, we do not support this operation for RAID volumes,
16165 * since there is no slot associated with them.
16166 */
16167 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
16168 ptgt->m_addr.mta_phymask == 0) {
16169 return (ENOTTY);
16170 }
16171
16172 bzero(&req, sizeof (req));
16173 bzero(&rep, sizeof (rep));
16174
16175 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16176 req.Action = act;
16177 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16178 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16179 req.Slot = LE_16(ptgt->m_slot_num);
16180 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16181 req.SlotStatus = LE_32(*status);
16182 }
16183 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16184 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16185 if (ret != 0) {
16186 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16187 "Processor Request message error %d", ret);
16188 return (ret);
16189 }
16190 /* do passthrough success, check the ioc status */
16191 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16192 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16193 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16194 LE_32(rep.IOCLogInfo));
16195 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16196 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16197 case MPI2_IOCSTATUS_INVALID_VPID:
16198 case MPI2_IOCSTATUS_INVALID_FIELD:
16199 case MPI2_IOCSTATUS_INVALID_STATE:
16200 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16201 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16202 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16203 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16204 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16205 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16206 return (EINVAL);
16207 case MPI2_IOCSTATUS_BUSY:
16208 return (EBUSY);
16209 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16210 return (EAGAIN);
16211 case MPI2_IOCSTATUS_INVALID_SGL:
16212 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16213 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16214 default:
16215 return (EIO);
16216 }
16217 }
16218 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16219 *status = LE_32(rep.SlotStatus);
16220 }
16221
16222 return (0);
16223 }
16224
16225 int
16226 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16227 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16228 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16229 {
16230 ddi_dma_cookie_t new_cookie;
16231 size_t alloc_len;
16232 uint_t ncookie;
16233
16234 if (cookiep == NULL)
16235 cookiep = &new_cookie;
16236
16237 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16238 NULL, dma_hdp) != DDI_SUCCESS) {
16239 return (FALSE);
16240 }
16241
16242 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16243 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16244 acc_hdp) != DDI_SUCCESS) {
16245 ddi_dma_free_handle(dma_hdp);
16246 return (FALSE);
16247 }
16248
16249 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16250 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16251 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16252 (void) ddi_dma_mem_free(acc_hdp);
16253 ddi_dma_free_handle(dma_hdp);
16254 return (FALSE);
16255 }
16256
16257 return (TRUE);
16258 }
16259
16260 void
16261 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16262 {
16263 if (*dma_hdp == NULL)
16264 return;
16265
16266 (void) ddi_dma_unbind_handle(*dma_hdp);
16267 (void) ddi_dma_mem_free(acc_hdp);
16268 ddi_dma_free_handle(dma_hdp);
16269 }