Print this page
NEX-1889 upstream
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 + * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
27 28 */
28 29
29 30 /*
30 31 * Copyright (c) 2000 to 2010, LSI Corporation.
31 32 * All rights reserved.
32 33 *
33 34 * Redistribution and use in source and binary forms of all code within
34 35 * this file that is exclusively owned by LSI, with or without
35 36 * modification, is permitted provided that, in addition to the CDDL 1.0
36 37 * License requirements, the following conditions are met:
37 38 *
38 39 * Neither the name of the author nor the names of its contributors may be
39 40 * used to endorse or promote products derived from this software without
40 41 * specific prior written permission.
41 42 *
42 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
45 46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
46 47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
47 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
48 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
49 50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
50 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
51 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
52 53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
53 54 * DAMAGE.
54 55 */
55 56
56 57 /*
57 58 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
58 59 *
59 60 */
60 61
61 62 #if defined(lint) || defined(DEBUG)
62 63 #define MPTSAS_DEBUG
63 64 #endif
64 65
65 66 /*
66 67 * standard header files.
67 68 */
68 69 #include <sys/note.h>
69 70 #include <sys/scsi/scsi.h>
70 71 #include <sys/pci.h>
71 72 #include <sys/file.h>
72 73 #include <sys/policy.h>
73 74 #include <sys/model.h>
74 75 #include <sys/sysevent.h>
75 76 #include <sys/sysevent/eventdefs.h>
76 77 #include <sys/sysevent/dr.h>
77 78 #include <sys/sata/sata_defs.h>
78 79 #include <sys/scsi/generic/sas.h>
79 80 #include <sys/scsi/impl/scsi_sas.h>
80 81
81 82 #pragma pack(1)
82 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
83 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
84 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
85 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
86 87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
87 88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
88 89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
89 90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
90 91 #pragma pack()
91 92
92 93 /*
93 94 * private header files.
94 95 *
95 96 */
96 97 #include <sys/scsi/impl/scsi_reset_notify.h>
97 98 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
98 99 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
99 100 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
100 101 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
101 102 #include <sys/raidioctl.h>
102 103
103 104 #include <sys/fs/dv_node.h> /* devfs_clean */
104 105
105 106 /*
106 107 * FMA header files
107 108 */
108 109 #include <sys/ddifm.h>
109 110 #include <sys/fm/protocol.h>
110 111 #include <sys/fm/util.h>
111 112 #include <sys/fm/io/ddi.h>
112 113
113 114 /*
114 115 * autoconfiguration data and routines.
115 116 */
116 117 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
117 118 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
118 119 static int mptsas_power(dev_info_t *dip, int component, int level);
119 120
120 121 /*
121 122 * cb_ops function
122 123 */
123 124 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
124 125 cred_t *credp, int *rval);
125 126 #ifdef __sparc
126 127 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
127 128 #else /* __sparc */
128 129 static int mptsas_quiesce(dev_info_t *devi);
129 130 #endif /* __sparc */
130 131
131 132 /*
132 133 * Resource initilaization for hardware
133 134 */
134 135 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
135 136 static void mptsas_disable_bus_master(mptsas_t *mpt);
136 137 static void mptsas_hba_fini(mptsas_t *mpt);
137 138 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
138 139 static int mptsas_hba_setup(mptsas_t *mpt);
139 140 static void mptsas_hba_teardown(mptsas_t *mpt);
140 141 static int mptsas_config_space_init(mptsas_t *mpt);
141 142 static void mptsas_config_space_fini(mptsas_t *mpt);
142 143 static void mptsas_iport_register(mptsas_t *mpt);
143 144 static int mptsas_smp_setup(mptsas_t *mpt);
144 145 static void mptsas_smp_teardown(mptsas_t *mpt);
145 146 static int mptsas_cache_create(mptsas_t *mpt);
146 147 static void mptsas_cache_destroy(mptsas_t *mpt);
147 148 static int mptsas_alloc_request_frames(mptsas_t *mpt);
148 149 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
149 150 static int mptsas_alloc_free_queue(mptsas_t *mpt);
150 151 static int mptsas_alloc_post_queue(mptsas_t *mpt);
151 152 static void mptsas_alloc_reply_args(mptsas_t *mpt);
152 153 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
153 154 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
154 155 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
155 156
156 157 /*
157 158 * SCSA function prototypes
158 159 */
159 160 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
160 161 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
161 162 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
162 163 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
163 164 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
164 165 int tgtonly);
165 166 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
166 167 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
167 168 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
168 169 int tgtlen, int flags, int (*callback)(), caddr_t arg);
169 170 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
170 171 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
171 172 struct scsi_pkt *pkt);
172 173 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
173 174 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
174 175 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
175 176 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
176 177 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
177 178 void (*callback)(caddr_t), caddr_t arg);
178 179 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
179 180 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
180 181 static int mptsas_scsi_quiesce(dev_info_t *dip);
181 182 static int mptsas_scsi_unquiesce(dev_info_t *dip);
182 183 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
183 184 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
184 185
185 186 /*
186 187 * SMP functions
187 188 */
188 189 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
189 190
190 191 /*
191 192 * internal function prototypes.
192 193 */
193 194 static void mptsas_list_add(mptsas_t *mpt);
194 195 static void mptsas_list_del(mptsas_t *mpt);
195 196
196 197 static int mptsas_quiesce_bus(mptsas_t *mpt);
197 198 static int mptsas_unquiesce_bus(mptsas_t *mpt);
198 199
199 200 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
200 201 static void mptsas_free_handshake_msg(mptsas_t *mpt);
201 202
202 203 static void mptsas_ncmds_checkdrain(void *arg);
203 204
204 205 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
205 206 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
206 207 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
207 208 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
208 209
209 210 static int mptsas_do_detach(dev_info_t *dev);
210 211 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
211 212 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
212 213 struct scsi_pkt *pkt);
213 214 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
214 215
215 216 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
216 217 static void mptsas_handle_event(void *args);
217 218 static int mptsas_handle_event_sync(void *args);
218 219 static void mptsas_handle_dr(void *args);
219 220 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
220 221 dev_info_t *pdip);
221 222
222 223 static void mptsas_restart_cmd(void *);
223 224
224 225 static void mptsas_flush_hba(mptsas_t *mpt);
225 226 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
226 227 uint8_t tasktype);
227 228 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
228 229 uchar_t reason, uint_t stat);
229 230
230 231 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
231 232 static void mptsas_process_intr(mptsas_t *mpt,
232 233 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
233 234 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
234 235 pMpi2ReplyDescriptorsUnion_t reply_desc);
235 236 static void mptsas_handle_address_reply(mptsas_t *mpt,
236 237 pMpi2ReplyDescriptorsUnion_t reply_desc);
237 238 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
|
↓ open down ↓ |
201 lines elided |
↑ open up ↑ |
238 239 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
239 240 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
240 241
241 242 static void mptsas_watch(void *arg);
242 243 static void mptsas_watchsubr(mptsas_t *mpt);
243 244 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
244 245
245 246 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
246 247 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
247 248 uint8_t *data, uint32_t request_size, uint32_t reply_size,
248 - uint32_t data_size, uint32_t direction, uint8_t *dataout,
249 + uint32_t data_size, uint8_t direction, uint8_t *dataout,
249 250 uint32_t dataout_size, short timeout, int mode);
250 251 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
251 252
252 253 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
253 254 uint32_t unique_id);
254 255 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
255 256 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
256 257 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
257 258 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
258 259 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
259 260 uint32_t diag_type);
260 261 static int mptsas_diag_register(mptsas_t *mpt,
261 262 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
262 263 static int mptsas_diag_unregister(mptsas_t *mpt,
263 264 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
264 265 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
265 266 uint32_t *return_code);
266 267 static int mptsas_diag_read_buffer(mptsas_t *mpt,
267 268 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
268 269 uint32_t *return_code, int ioctl_mode);
269 270 static int mptsas_diag_release(mptsas_t *mpt,
270 271 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
271 272 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
272 273 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
273 274 int ioctl_mode);
274 275 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
275 276 int mode);
276 277
277 278 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
278 279 int cmdlen, int tgtlen, int statuslen, int kf);
279 280 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
280 281
281 282 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
282 283 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
283 284
284 285 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
285 286 int kmflags);
286 287 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
287 288
288 289 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
289 290 mptsas_cmd_t *cmd);
290 291 static void mptsas_check_task_mgt(mptsas_t *mpt,
291 292 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
292 293 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
293 294 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
294 295 int *resid);
295 296
296 297 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
297 298 static void mptsas_free_active_slots(mptsas_t *mpt);
298 299 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
299 300
300 301 static void mptsas_restart_hba(mptsas_t *mpt);
301 302 static void mptsas_restart_waitq(mptsas_t *mpt);
302 303
303 304 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
304 305 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
305 306 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
306 307
307 308 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
308 309 static void mptsas_doneq_empty(mptsas_t *mpt);
309 310 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
310 311
311 312 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
312 313 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
313 314 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
314 315 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
315 316
316 317
317 318 static void mptsas_start_watch_reset_delay();
318 319 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
319 320 static void mptsas_watch_reset_delay(void *arg);
320 321 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
321 322
322 323 /*
323 324 * helper functions
324 325 */
325 326 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
326 327
327 328 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
328 329 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
329 330 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
330 331 int lun);
331 332 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
332 333 int lun);
333 334 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
334 335 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
335 336
336 337 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
337 338 int *lun);
338 339 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
339 340
340 341 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
341 342 mptsas_phymask_t phymask, uint8_t phy);
342 343 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
343 344 mptsas_phymask_t phymask, uint64_t wwid);
344 345 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
345 346 mptsas_phymask_t phymask, uint64_t wwid);
346 347
347 348 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
348 349 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
349 350
350 351 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
351 352 uint16_t *handle, mptsas_target_t **pptgt);
352 353 static void mptsas_update_phymask(mptsas_t *mpt);
353 354
354 355 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
355 356 uint32_t *status, uint8_t cmd);
356 357 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
357 358 mptsas_phymask_t *phymask);
358 359 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
359 360 mptsas_phymask_t phymask);
360 361 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
361 362
362 363
363 364 /*
364 365 * Enumeration / DR functions
365 366 */
366 367 static void mptsas_config_all(dev_info_t *pdip);
367 368 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
368 369 dev_info_t **lundip);
369 370 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
370 371 dev_info_t **lundip);
371 372
372 373 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
373 374 static int mptsas_offline_target(dev_info_t *pdip, char *name);
374 375
375 376 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
376 377 dev_info_t **dip);
377 378
378 379 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
379 380 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
380 381 dev_info_t **dip, mptsas_target_t *ptgt);
381 382
382 383 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
383 384 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
384 385
385 386 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
386 387 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
387 388 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
388 389 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
389 390 int lun);
390 391
391 392 static void mptsas_offline_missed_luns(dev_info_t *pdip,
392 393 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
393 394 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
394 395 mdi_pathinfo_t *rpip, uint_t flags);
395 396
396 397 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
397 398 dev_info_t **smp_dip);
398 399 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
399 400 uint_t flags);
400 401
401 402 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
402 403 int mode, int *rval);
403 404 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
404 405 int mode, int *rval);
405 406 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
406 407 int mode, int *rval);
407 408 static void mptsas_record_event(void *args);
408 409 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
409 410 int mode);
410 411
411 412 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
412 413 uint32_t, mptsas_phymask_t, uint8_t);
413 414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
414 415 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
415 416 dev_info_t **smp_dip);
416 417
417 418 /*
418 419 * Power management functions
419 420 */
420 421 static int mptsas_get_pci_cap(mptsas_t *mpt);
421 422 static int mptsas_init_pm(mptsas_t *mpt);
422 423
423 424 /*
424 425 * MPT MSI tunable:
425 426 *
426 427 * By default MSI is enabled on all supported platforms.
427 428 */
428 429 boolean_t mptsas_enable_msi = B_TRUE;
429 430 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
430 431
431 432 static int mptsas_register_intrs(mptsas_t *);
432 433 static void mptsas_unregister_intrs(mptsas_t *);
433 434 static int mptsas_add_intrs(mptsas_t *, int);
434 435 static void mptsas_rem_intrs(mptsas_t *);
435 436
436 437 /*
437 438 * FMA Prototypes
438 439 */
439 440 static void mptsas_fm_init(mptsas_t *mpt);
440 441 static void mptsas_fm_fini(mptsas_t *mpt);
441 442 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
442 443
443 444 extern pri_t minclsyspri, maxclsyspri;
444 445
445 446 /*
446 447 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
447 448 * under this device that the paths to a physical device are created when
448 449 * MPxIO is used.
449 450 */
450 451 extern dev_info_t *scsi_vhci_dip;
451 452
452 453 /*
453 454 * Tunable timeout value for Inquiry VPD page 0x83
454 455 * By default the value is 30 seconds.
455 456 */
456 457 int mptsas_inq83_retry_timeout = 30;
457 458
458 459 /*
459 460 * This is used to allocate memory for message frame storage, not for
460 461 * data I/O DMA. All message frames must be stored in the first 4G of
461 462 * physical memory.
462 463 */
463 464 ddi_dma_attr_t mptsas_dma_attrs = {
464 465 DMA_ATTR_V0, /* attribute layout version */
465 466 0x0ull, /* address low - should be 0 (longlong) */
466 467 0xffffffffull, /* address high - 32-bit max range */
467 468 0x00ffffffull, /* count max - max DMA object size */
468 469 4, /* allocation alignment requirements */
469 470 0x78, /* burstsizes - binary encoded values */
470 471 1, /* minxfer - gran. of DMA engine */
471 472 0x00ffffffull, /* maxxfer - gran. of DMA engine */
472 473 0xffffffffull, /* max segment size (DMA boundary) */
473 474 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
474 475 512, /* granularity - device transfer size */
475 476 0 /* flags, set to 0 */
476 477 };
477 478
478 479 /*
479 480 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
480 481 * physical addresses are supported.)
481 482 */
482 483 ddi_dma_attr_t mptsas_dma_attrs64 = {
483 484 DMA_ATTR_V0, /* attribute layout version */
|
↓ open down ↓ |
225 lines elided |
↑ open up ↑ |
484 485 0x0ull, /* address low - should be 0 (longlong) */
485 486 0xffffffffffffffffull, /* address high - 64-bit max */
486 487 0x00ffffffull, /* count max - max DMA object size */
487 488 4, /* allocation alignment requirements */
488 489 0x78, /* burstsizes - binary encoded values */
489 490 1, /* minxfer - gran. of DMA engine */
490 491 0x00ffffffull, /* maxxfer - gran. of DMA engine */
491 492 0xffffffffull, /* max segment size (DMA boundary) */
492 493 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
493 494 512, /* granularity - device transfer size */
494 - DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
495 + 0 /* flags, set to 0 */
495 496 };
496 497
497 498 ddi_device_acc_attr_t mptsas_dev_attr = {
498 499 DDI_DEVICE_ATTR_V1,
499 500 DDI_STRUCTURE_LE_ACC,
500 501 DDI_STRICTORDER_ACC,
501 502 DDI_DEFAULT_ACC
502 503 };
503 504
504 505 static struct cb_ops mptsas_cb_ops = {
505 506 scsi_hba_open, /* open */
506 507 scsi_hba_close, /* close */
507 508 nodev, /* strategy */
508 509 nodev, /* print */
509 510 nodev, /* dump */
510 511 nodev, /* read */
511 512 nodev, /* write */
512 513 mptsas_ioctl, /* ioctl */
513 514 nodev, /* devmap */
514 515 nodev, /* mmap */
515 516 nodev, /* segmap */
516 517 nochpoll, /* chpoll */
517 518 ddi_prop_op, /* cb_prop_op */
518 519 NULL, /* streamtab */
519 520 D_MP, /* cb_flag */
520 521 CB_REV, /* rev */
521 522 nodev, /* aread */
522 523 nodev /* awrite */
523 524 };
524 525
525 526 static struct dev_ops mptsas_ops = {
526 527 DEVO_REV, /* devo_rev, */
527 528 0, /* refcnt */
528 529 ddi_no_info, /* info */
529 530 nulldev, /* identify */
530 531 nulldev, /* probe */
531 532 mptsas_attach, /* attach */
532 533 mptsas_detach, /* detach */
533 534 #ifdef __sparc
534 535 mptsas_reset,
535 536 #else
536 537 nodev, /* reset */
537 538 #endif /* __sparc */
538 539 &mptsas_cb_ops, /* driver operations */
539 540 NULL, /* bus operations */
540 541 mptsas_power, /* power management */
541 542 #ifdef __sparc
542 543 ddi_quiesce_not_needed
543 544 #else
544 545 mptsas_quiesce /* quiesce */
545 546 #endif /* __sparc */
546 547 };
547 548
548 549
549 550 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
550 551
551 552 static struct modldrv modldrv = {
552 553 &mod_driverops, /* Type of module. This one is a driver */
553 554 MPTSAS_MOD_STRING, /* Name of the module. */
554 555 &mptsas_ops, /* driver ops */
555 556 };
556 557
557 558 static struct modlinkage modlinkage = {
558 559 MODREV_1, &modldrv, NULL
559 560 };
560 561 #define TARGET_PROP "target"
561 562 #define LUN_PROP "lun"
|
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
562 563 #define LUN64_PROP "lun64"
563 564 #define SAS_PROP "sas-mpt"
564 565 #define MDI_GUID "wwn"
565 566 #define NDI_GUID "guid"
566 567 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
567 568
568 569 /*
569 570 * Local static data
570 571 */
571 572 #if defined(MPTSAS_DEBUG)
572 -uint32_t mptsas_debug_flags = 0;
573 +uint32_t mptsas_debug_flags = 0x0;
573 574 #endif /* defined(MPTSAS_DEBUG) */
574 575 uint32_t mptsas_debug_resets = 0;
575 576
576 577 static kmutex_t mptsas_global_mutex;
577 578 static void *mptsas_state; /* soft state ptr */
578 579 static krwlock_t mptsas_global_rwlock;
579 580
580 581 static kmutex_t mptsas_log_mutex;
581 582 static char mptsas_log_buf[256];
582 583 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
583 584
584 585 static mptsas_t *mptsas_head, *mptsas_tail;
585 586 static clock_t mptsas_scsi_watchdog_tick;
586 587 static clock_t mptsas_tick;
587 588 static timeout_id_t mptsas_reset_watch;
588 589 static timeout_id_t mptsas_timeout_id;
589 590 static int mptsas_timeouts_enabled = 0;
591 +
590 592 /*
593 + * The only software retriction on switching msg buffers to 64 bit seems to
594 + * be the Auto Request Sense interface. The high 32 bits for all such
595 + * requests appear to be required to sit in the same 4G segment.
596 + * See initialization of SenseBufferAddressHigh in mptsas_init.c, and
597 + * the use of SenseBufferLowAddress in requests. Note that there is
598 + * currently a dependency on scsi_alloc_consistent_buf() adhering to
599 + * this requirement.
600 + * There is also a question about improved performance over PCI/PCIX
601 + * if transfers are within the first 4Gb.
602 + */
603 +static int mptsas_use_64bit_msgaddr = 0;
604 +
605 +/*
591 606 * warlock directives
592 607 */
593 608 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
594 609 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
595 610 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
596 611 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
597 612 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
598 613 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
599 614
600 615 /*
601 616 * SM - HBA statics
602 617 */
603 618 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
604 619
605 620 #ifdef MPTSAS_DEBUG
606 621 void debug_enter(char *);
607 622 #endif
608 623
609 624 /*
610 625 * Notes:
611 626 * - scsi_hba_init(9F) initializes SCSI HBA modules
612 627 * - must call scsi_hba_fini(9F) if modload() fails
613 628 */
614 629 int
615 630 _init(void)
616 631 {
617 632 int status;
618 633 /* CONSTCOND */
619 634 ASSERT(NO_COMPETING_THREADS);
620 635
621 636 NDBG0(("_init"));
622 637
623 638 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
624 639 MPTSAS_INITIAL_SOFT_SPACE);
625 640 if (status != 0) {
626 641 return (status);
627 642 }
628 643
629 644 if ((status = scsi_hba_init(&modlinkage)) != 0) {
630 645 ddi_soft_state_fini(&mptsas_state);
631 646 return (status);
632 647 }
633 648
634 649 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
635 650 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
636 651 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
637 652
638 653 if ((status = mod_install(&modlinkage)) != 0) {
639 654 mutex_destroy(&mptsas_log_mutex);
640 655 rw_destroy(&mptsas_global_rwlock);
641 656 mutex_destroy(&mptsas_global_mutex);
642 657 ddi_soft_state_fini(&mptsas_state);
643 658 scsi_hba_fini(&modlinkage);
644 659 }
645 660
646 661 return (status);
647 662 }
648 663
649 664 /*
650 665 * Notes:
651 666 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
652 667 */
653 668 int
654 669 _fini(void)
655 670 {
656 671 int status;
657 672 /* CONSTCOND */
658 673 ASSERT(NO_COMPETING_THREADS);
659 674
660 675 NDBG0(("_fini"));
661 676
662 677 if ((status = mod_remove(&modlinkage)) == 0) {
663 678 ddi_soft_state_fini(&mptsas_state);
664 679 scsi_hba_fini(&modlinkage);
665 680 mutex_destroy(&mptsas_global_mutex);
666 681 rw_destroy(&mptsas_global_rwlock);
667 682 mutex_destroy(&mptsas_log_mutex);
668 683 }
669 684 return (status);
670 685 }
671 686
672 687 /*
673 688 * The loadable-module _info(9E) entry point
674 689 */
675 690 int
676 691 _info(struct modinfo *modinfop)
677 692 {
678 693 /* CONSTCOND */
679 694 ASSERT(NO_COMPETING_THREADS);
680 695 NDBG0(("mptsas _info"));
681 696
682 697 return (mod_info(&modlinkage, modinfop));
683 698 }
684 699
685 700 static int
686 701 mptsas_target_eval_devhdl(const void *op, void *arg)
687 702 {
688 703 uint16_t dh = *(uint16_t *)arg;
689 704 const mptsas_target_t *tp = op;
690 705
691 706 return ((int)tp->m_devhdl - (int)dh);
692 707 }
693 708
694 709 static int
695 710 mptsas_target_eval_slot(const void *op, void *arg)
696 711 {
697 712 mptsas_led_control_t *lcp = arg;
698 713 const mptsas_target_t *tp = op;
699 714
700 715 if (tp->m_enclosure != lcp->Enclosure)
701 716 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
702 717
703 718 return ((int)tp->m_slot_num - (int)lcp->Slot);
704 719 }
705 720
706 721 static int
707 722 mptsas_target_eval_nowwn(const void *op, void *arg)
708 723 {
709 724 uint8_t phy = *(uint8_t *)arg;
710 725 const mptsas_target_t *tp = op;
711 726
712 727 if (tp->m_addr.mta_wwn != 0)
713 728 return (-1);
714 729
715 730 return ((int)tp->m_phynum - (int)phy);
716 731 }
717 732
718 733 static int
719 734 mptsas_smp_eval_devhdl(const void *op, void *arg)
720 735 {
721 736 uint16_t dh = *(uint16_t *)arg;
722 737 const mptsas_smp_t *sp = op;
723 738
724 739 return ((int)sp->m_devhdl - (int)dh);
725 740 }
726 741
727 742 static uint64_t
728 743 mptsas_target_addr_hash(const void *tp)
729 744 {
730 745 const mptsas_target_addr_t *tap = tp;
731 746
732 747 return ((tap->mta_wwn & 0xffffffffffffULL) |
733 748 ((uint64_t)tap->mta_phymask << 48));
734 749 }
735 750
736 751 static int
737 752 mptsas_target_addr_cmp(const void *a, const void *b)
738 753 {
739 754 const mptsas_target_addr_t *aap = a;
740 755 const mptsas_target_addr_t *bap = b;
741 756
742 757 if (aap->mta_wwn < bap->mta_wwn)
743 758 return (-1);
744 759 if (aap->mta_wwn > bap->mta_wwn)
745 760 return (1);
746 761 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
747 762 }
748 763
749 764 static void
750 765 mptsas_target_free(void *op)
751 766 {
752 767 kmem_free(op, sizeof (mptsas_target_t));
753 768 }
754 769
755 770 static void
756 771 mptsas_smp_free(void *op)
757 772 {
758 773 kmem_free(op, sizeof (mptsas_smp_t));
759 774 }
760 775
761 776 static void
762 777 mptsas_destroy_hashes(mptsas_t *mpt)
763 778 {
764 779 mptsas_target_t *tp;
765 780 mptsas_smp_t *sp;
766 781
767 782 for (tp = refhash_first(mpt->m_targets); tp != NULL;
768 783 tp = refhash_next(mpt->m_targets, tp)) {
769 784 refhash_remove(mpt->m_targets, tp);
770 785 }
771 786 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
772 787 sp = refhash_next(mpt->m_smp_targets, sp)) {
773 788 refhash_remove(mpt->m_smp_targets, sp);
774 789 }
775 790 refhash_destroy(mpt->m_targets);
776 791 refhash_destroy(mpt->m_smp_targets);
777 792 mpt->m_targets = NULL;
778 793 mpt->m_smp_targets = NULL;
779 794 }
780 795
781 796 static int
782 797 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
783 798 {
784 799 dev_info_t *pdip;
785 800 mptsas_t *mpt;
786 801 scsi_hba_tran_t *hba_tran;
787 802 char *iport = NULL;
788 803 char phymask[MPTSAS_MAX_PHYS];
789 804 mptsas_phymask_t phy_mask = 0;
790 805 int dynamic_port = 0;
791 806 uint32_t page_address;
792 807 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
793 808 int rval = DDI_FAILURE;
794 809 int i = 0;
795 810 uint8_t numphys = 0;
796 811 uint8_t phy_id;
797 812 uint8_t phy_port = 0;
798 813 uint16_t attached_devhdl = 0;
799 814 uint32_t dev_info;
800 815 uint64_t attached_sas_wwn;
801 816 uint16_t dev_hdl;
802 817 uint16_t pdev_hdl;
803 818 uint16_t bay_num, enclosure;
804 819 char attached_wwnstr[MPTSAS_WWN_STRLEN];
805 820
806 821 /* CONSTCOND */
807 822 ASSERT(NO_COMPETING_THREADS);
808 823
809 824 switch (cmd) {
810 825 case DDI_ATTACH:
811 826 break;
812 827
813 828 case DDI_RESUME:
814 829 /*
815 830 * If this a scsi-iport node, nothing to do here.
816 831 */
817 832 return (DDI_SUCCESS);
818 833
819 834 default:
820 835 return (DDI_FAILURE);
821 836 }
822 837
823 838 pdip = ddi_get_parent(dip);
824 839
825 840 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
826 841 NULL) {
827 842 cmn_err(CE_WARN, "Failed attach iport because fail to "
828 843 "get tran vector for the HBA node");
829 844 return (DDI_FAILURE);
830 845 }
831 846
832 847 mpt = TRAN2MPT(hba_tran);
833 848 ASSERT(mpt != NULL);
834 849 if (mpt == NULL)
835 850 return (DDI_FAILURE);
836 851
837 852 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
838 853 NULL) {
839 854 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
840 855 "get tran vector for the iport node");
841 856 return (DDI_FAILURE);
842 857 }
843 858
844 859 /*
845 860 * Overwrite parent's tran_hba_private to iport's tran vector
846 861 */
847 862 hba_tran->tran_hba_private = mpt;
848 863
849 864 ddi_report_dev(dip);
850 865
851 866 /*
852 867 * Get SAS address for initiator port according dev_handle
853 868 */
854 869 iport = ddi_get_name_addr(dip);
855 870 if (iport && strncmp(iport, "v0", 2) == 0) {
856 871 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
857 872 MPTSAS_VIRTUAL_PORT, 1) !=
858 873 DDI_PROP_SUCCESS) {
859 874 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
860 875 MPTSAS_VIRTUAL_PORT);
861 876 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
862 877 "prop update failed");
863 878 return (DDI_FAILURE);
864 879 }
865 880 return (DDI_SUCCESS);
866 881 }
867 882
868 883 mutex_enter(&mpt->m_mutex);
869 884 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
870 885 bzero(phymask, sizeof (phymask));
871 886 (void) sprintf(phymask,
872 887 "%x", mpt->m_phy_info[i].phy_mask);
873 888 if (strcmp(phymask, iport) == 0) {
874 889 break;
875 890 }
876 891 }
877 892
878 893 if (i == MPTSAS_MAX_PHYS) {
879 894 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
880 895 "seems not exist", iport);
881 896 mutex_exit(&mpt->m_mutex);
882 897 return (DDI_FAILURE);
883 898 }
884 899
885 900 phy_mask = mpt->m_phy_info[i].phy_mask;
886 901
887 902 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
888 903 dynamic_port = 1;
889 904 else
890 905 dynamic_port = 0;
891 906
892 907 /*
893 908 * Update PHY info for smhba
894 909 */
895 910 if (mptsas_smhba_phy_init(mpt)) {
896 911 mutex_exit(&mpt->m_mutex);
897 912 mptsas_log(mpt, CE_WARN, "mptsas phy update "
898 913 "failed");
899 914 return (DDI_FAILURE);
900 915 }
901 916
902 917 mutex_exit(&mpt->m_mutex);
903 918
904 919 numphys = 0;
905 920 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
906 921 if ((phy_mask >> i) & 0x01) {
907 922 numphys++;
908 923 }
909 924 }
910 925
911 926 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
912 927 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
913 928 mpt->un.m_base_wwid);
914 929
915 930 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
916 931 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
917 932 DDI_PROP_SUCCESS) {
918 933 (void) ddi_prop_remove(DDI_DEV_T_NONE,
919 934 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
920 935 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
921 936 "prop update failed");
922 937 return (DDI_FAILURE);
923 938 }
924 939 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
925 940 MPTSAS_NUM_PHYS, numphys) !=
926 941 DDI_PROP_SUCCESS) {
927 942 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
928 943 return (DDI_FAILURE);
929 944 }
930 945
931 946 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
932 947 "phymask", phy_mask) !=
933 948 DDI_PROP_SUCCESS) {
934 949 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
935 950 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
936 951 "prop update failed");
937 952 return (DDI_FAILURE);
938 953 }
939 954
940 955 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
941 956 "dynamic-port", dynamic_port) !=
942 957 DDI_PROP_SUCCESS) {
943 958 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
944 959 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
945 960 "prop update failed");
946 961 return (DDI_FAILURE);
947 962 }
948 963 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
949 964 MPTSAS_VIRTUAL_PORT, 0) !=
950 965 DDI_PROP_SUCCESS) {
951 966 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
952 967 MPTSAS_VIRTUAL_PORT);
953 968 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
954 969 "prop update failed");
955 970 return (DDI_FAILURE);
956 971 }
957 972 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
958 973 &attached_devhdl);
959 974
960 975 mutex_enter(&mpt->m_mutex);
961 976 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
962 977 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
963 978 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
964 979 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
965 980 &pdev_hdl, &bay_num, &enclosure);
966 981 if (rval != DDI_SUCCESS) {
967 982 mptsas_log(mpt, CE_WARN,
968 983 "Failed to get device page0 for handle:%d",
969 984 attached_devhdl);
970 985 mutex_exit(&mpt->m_mutex);
971 986 return (DDI_FAILURE);
972 987 }
973 988
974 989 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
975 990 bzero(phymask, sizeof (phymask));
976 991 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
977 992 if (strcmp(phymask, iport) == 0) {
978 993 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
979 994 "%x",
980 995 mpt->m_phy_info[i].phy_mask);
981 996 }
982 997 }
983 998 mutex_exit(&mpt->m_mutex);
984 999
985 1000 bzero(attached_wwnstr, sizeof (attached_wwnstr));
986 1001 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
987 1002 attached_sas_wwn);
988 1003 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
989 1004 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
990 1005 DDI_PROP_SUCCESS) {
991 1006 (void) ddi_prop_remove(DDI_DEV_T_NONE,
992 1007 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
993 1008 return (DDI_FAILURE);
994 1009 }
995 1010
996 1011 /* Create kstats for each phy on this iport */
997 1012
998 1013 mptsas_create_phy_stats(mpt, iport, dip);
999 1014
1000 1015 /*
1001 1016 * register sas hba iport with mdi (MPxIO/vhci)
1002 1017 */
1003 1018 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1004 1019 dip, 0) == MDI_SUCCESS) {
1005 1020 mpt->m_mpxio_enable = TRUE;
1006 1021 }
1007 1022 return (DDI_SUCCESS);
1008 1023 }
1009 1024
1010 1025 /*
1011 1026 * Notes:
1012 1027 * Set up all device state and allocate data structures,
1013 1028 * mutexes, condition variables, etc. for device operation.
1014 1029 * Add interrupts needed.
1015 1030 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1016 1031 */
1017 1032 static int
1018 1033 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1019 1034 {
1020 1035 mptsas_t *mpt = NULL;
1021 1036 int instance, i, j;
1022 1037 int doneq_thread_num;
1023 1038 char intr_added = 0;
1024 1039 char map_setup = 0;
1025 1040 char config_setup = 0;
1026 1041 char hba_attach_setup = 0;
1027 1042 char smp_attach_setup = 0;
1028 1043 char mutex_init_done = 0;
1029 1044 char event_taskq_create = 0;
1030 1045 char dr_taskq_create = 0;
1031 1046 char doneq_thread_create = 0;
1032 1047 scsi_hba_tran_t *hba_tran;
1033 1048 uint_t mem_bar = MEM_SPACE;
1034 1049 int rval = DDI_FAILURE;
1035 1050
1036 1051 /* CONSTCOND */
1037 1052 ASSERT(NO_COMPETING_THREADS);
1038 1053
1039 1054 if (scsi_hba_iport_unit_address(dip)) {
1040 1055 return (mptsas_iport_attach(dip, cmd));
1041 1056 }
1042 1057
1043 1058 switch (cmd) {
1044 1059 case DDI_ATTACH:
1045 1060 break;
1046 1061
1047 1062 case DDI_RESUME:
1048 1063 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1049 1064 return (DDI_FAILURE);
1050 1065
1051 1066 mpt = TRAN2MPT(hba_tran);
1052 1067
1053 1068 if (!mpt) {
1054 1069 return (DDI_FAILURE);
1055 1070 }
1056 1071
1057 1072 /*
1058 1073 * Reset hardware and softc to "no outstanding commands"
1059 1074 * Note that a check condition can result on first command
1060 1075 * to a target.
1061 1076 */
1062 1077 mutex_enter(&mpt->m_mutex);
1063 1078
1064 1079 /*
1065 1080 * raise power.
1066 1081 */
1067 1082 if (mpt->m_options & MPTSAS_OPT_PM) {
1068 1083 mutex_exit(&mpt->m_mutex);
1069 1084 (void) pm_busy_component(dip, 0);
1070 1085 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1071 1086 if (rval == DDI_SUCCESS) {
1072 1087 mutex_enter(&mpt->m_mutex);
1073 1088 } else {
1074 1089 /*
1075 1090 * The pm_raise_power() call above failed,
1076 1091 * and that can only occur if we were unable
1077 1092 * to reset the hardware. This is probably
1078 1093 * due to unhealty hardware, and because
1079 1094 * important filesystems(such as the root
1080 1095 * filesystem) could be on the attached disks,
1081 1096 * it would not be a good idea to continue,
1082 1097 * as we won't be entirely certain we are
1083 1098 * writing correct data. So we panic() here
1084 1099 * to not only prevent possible data corruption,
1085 1100 * but to give developers or end users a hope
1086 1101 * of identifying and correcting any problems.
1087 1102 */
1088 1103 fm_panic("mptsas could not reset hardware "
1089 1104 "during resume");
1090 1105 }
1091 1106 }
1092 1107
1093 1108 mpt->m_suspended = 0;
1094 1109
1095 1110 /*
1096 1111 * Reinitialize ioc
1097 1112 */
1098 1113 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1099 1114 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1100 1115 mutex_exit(&mpt->m_mutex);
1101 1116 if (mpt->m_options & MPTSAS_OPT_PM) {
1102 1117 (void) pm_idle_component(dip, 0);
1103 1118 }
1104 1119 fm_panic("mptsas init chip fail during resume");
1105 1120 }
1106 1121 /*
1107 1122 * mptsas_update_driver_data needs interrupts so enable them
1108 1123 * first.
1109 1124 */
1110 1125 MPTSAS_ENABLE_INTR(mpt);
1111 1126 mptsas_update_driver_data(mpt);
1112 1127
1113 1128 /* start requests, if possible */
1114 1129 mptsas_restart_hba(mpt);
1115 1130
1116 1131 mutex_exit(&mpt->m_mutex);
1117 1132
1118 1133 /*
1119 1134 * Restart watch thread
1120 1135 */
1121 1136 mutex_enter(&mptsas_global_mutex);
1122 1137 if (mptsas_timeout_id == 0) {
1123 1138 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1124 1139 mptsas_tick);
1125 1140 mptsas_timeouts_enabled = 1;
1126 1141 }
1127 1142 mutex_exit(&mptsas_global_mutex);
1128 1143
1129 1144 /* report idle status to pm framework */
1130 1145 if (mpt->m_options & MPTSAS_OPT_PM) {
1131 1146 (void) pm_idle_component(dip, 0);
1132 1147 }
1133 1148
1134 1149 return (DDI_SUCCESS);
1135 1150
1136 1151 default:
1137 1152 return (DDI_FAILURE);
1138 1153
1139 1154 }
1140 1155
1141 1156 instance = ddi_get_instance(dip);
1142 1157
1143 1158 /*
1144 1159 * Allocate softc information.
1145 1160 */
1146 1161 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1147 1162 mptsas_log(NULL, CE_WARN,
1148 1163 "mptsas%d: cannot allocate soft state", instance);
1149 1164 goto fail;
1150 1165 }
1151 1166
1152 1167 mpt = ddi_get_soft_state(mptsas_state, instance);
1153 1168
1154 1169 if (mpt == NULL) {
1155 1170 mptsas_log(NULL, CE_WARN,
1156 1171 "mptsas%d: cannot get soft state", instance);
1157 1172 goto fail;
|
↓ open down ↓ |
557 lines elided |
↑ open up ↑ |
1158 1173 }
1159 1174
1160 1175 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1161 1176 scsi_size_clean(dip);
1162 1177
1163 1178 mpt->m_dip = dip;
1164 1179 mpt->m_instance = instance;
1165 1180
1166 1181 /* Make a per-instance copy of the structures */
1167 1182 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1168 - mpt->m_msg_dma_attr = mptsas_dma_attrs;
1183 + if (mptsas_use_64bit_msgaddr) {
1184 + mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1185 + } else {
1186 + mpt->m_msg_dma_attr = mptsas_dma_attrs;
1187 + }
1169 1188 mpt->m_reg_acc_attr = mptsas_dev_attr;
1170 1189 mpt->m_dev_acc_attr = mptsas_dev_attr;
1171 1190
1172 1191 /*
1173 1192 * Initialize FMA
1174 1193 */
1175 1194 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1176 1195 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1177 1196 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1178 1197 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1179 1198
1180 1199 mptsas_fm_init(mpt);
1181 1200
1182 1201 if (mptsas_alloc_handshake_msg(mpt,
1183 1202 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1184 1203 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1185 1204 goto fail;
1186 1205 }
1187 1206
1188 1207 /*
1189 1208 * Setup configuration space
1190 1209 */
1191 1210 if (mptsas_config_space_init(mpt) == FALSE) {
1192 1211 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1193 1212 goto fail;
1194 1213 }
1195 1214 config_setup++;
1196 1215
1197 1216 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1198 1217 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1199 1218 mptsas_log(mpt, CE_WARN, "map setup failed");
1200 1219 goto fail;
1201 1220 }
1202 1221 map_setup++;
1203 1222
1204 1223 /*
1205 1224 * A taskq is created for dealing with the event handler
1206 1225 */
1207 1226 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1208 1227 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1209 1228 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1210 1229 goto fail;
1211 1230 }
1212 1231 event_taskq_create++;
1213 1232
1214 1233 /*
1215 1234 * A taskq is created for dealing with dr events
1216 1235 */
1217 1236 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1218 1237 "mptsas_dr_taskq",
1219 1238 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1220 1239 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1221 1240 "failed");
1222 1241 goto fail;
1223 1242 }
1224 1243 dr_taskq_create++;
1225 1244
1226 1245 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1227 1246 0, "mptsas_doneq_thread_threshold_prop", 10);
1228 1247 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1229 1248 0, "mptsas_doneq_length_threshold_prop", 8);
1230 1249 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1231 1250 0, "mptsas_doneq_thread_n_prop", 8);
1232 1251
1233 1252 if (mpt->m_doneq_thread_n) {
1234 1253 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1235 1254 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1236 1255
1237 1256 mutex_enter(&mpt->m_doneq_mutex);
1238 1257 mpt->m_doneq_thread_id =
1239 1258 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1240 1259 * mpt->m_doneq_thread_n, KM_SLEEP);
1241 1260
1242 1261 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1243 1262 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1244 1263 CV_DRIVER, NULL);
1245 1264 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1246 1265 MUTEX_DRIVER, NULL);
1247 1266 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1248 1267 mpt->m_doneq_thread_id[j].flag |=
1249 1268 MPTSAS_DONEQ_THREAD_ACTIVE;
1250 1269 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1251 1270 mpt->m_doneq_thread_id[j].arg.t = j;
1252 1271 mpt->m_doneq_thread_id[j].threadp =
1253 1272 thread_create(NULL, 0, mptsas_doneq_thread,
|
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
1254 1273 &mpt->m_doneq_thread_id[j].arg,
1255 1274 0, &p0, TS_RUN, minclsyspri);
1256 1275 mpt->m_doneq_thread_id[j].donetail =
1257 1276 &mpt->m_doneq_thread_id[j].doneq;
1258 1277 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1259 1278 }
1260 1279 mutex_exit(&mpt->m_doneq_mutex);
1261 1280 doneq_thread_create++;
1262 1281 }
1263 1282
1283 + /*
1284 + * Disable hardware interrupt since we're not ready to
1285 + * handle it yet.
1286 + */
1287 + MPTSAS_DISABLE_INTR(mpt);
1288 + if (mptsas_register_intrs(mpt) == FALSE)
1289 + goto fail;
1290 + intr_added++;
1291 +
1264 1292 /* Initialize mutex used in interrupt handler */
1265 1293 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1266 1294 DDI_INTR_PRI(mpt->m_intr_pri));
1267 1295 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1268 1296 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1269 1297 DDI_INTR_PRI(mpt->m_intr_pri));
1270 1298 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1271 1299 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1272 1300 NULL, MUTEX_DRIVER,
1273 1301 DDI_INTR_PRI(mpt->m_intr_pri));
1274 1302 }
1275 1303
1276 1304 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1277 1305 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1278 1306 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1279 1307 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1280 1308 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1281 1309 mutex_init_done++;
1282 1310
1283 - /*
1284 - * Disable hardware interrupt since we're not ready to
1285 - * handle it yet.
1286 - */
1287 - MPTSAS_DISABLE_INTR(mpt);
1288 - if (mptsas_register_intrs(mpt) == FALSE)
1289 - goto fail;
1290 - intr_added++;
1291 -
1292 1311 mutex_enter(&mpt->m_mutex);
1293 1312 /*
1294 1313 * Initialize power management component
1295 1314 */
1296 1315 if (mpt->m_options & MPTSAS_OPT_PM) {
1297 1316 if (mptsas_init_pm(mpt)) {
1298 1317 mutex_exit(&mpt->m_mutex);
1299 1318 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1300 1319 "failed");
1301 1320 goto fail;
1302 1321 }
1303 1322 }
1304 1323
1305 1324 /*
1306 1325 * Initialize chip using Message Unit Reset, if allowed
1307 1326 */
1308 1327 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1309 1328 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1310 1329 mutex_exit(&mpt->m_mutex);
1311 1330 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1312 1331 goto fail;
1313 1332 }
1314 1333
1315 1334 /*
1316 1335 * Fill in the phy_info structure and get the base WWID
1317 1336 */
1318 1337 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1319 1338 mptsas_log(mpt, CE_WARN,
1320 1339 "mptsas_get_manufacture_page5 failed!");
1321 1340 goto fail;
1322 1341 }
1323 1342
1324 1343 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1325 1344 mptsas_log(mpt, CE_WARN,
1326 1345 "mptsas_get_sas_io_unit_page_hndshk failed!");
1327 1346 goto fail;
1328 1347 }
1329 1348
1330 1349 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1331 1350 mptsas_log(mpt, CE_WARN,
1332 1351 "mptsas_get_manufacture_page0 failed!");
1333 1352 goto fail;
1334 1353 }
1335 1354
1336 1355 mutex_exit(&mpt->m_mutex);
1337 1356
1338 1357 /*
1339 1358 * Register the iport for multiple port HBA
1340 1359 */
1341 1360 mptsas_iport_register(mpt);
1342 1361
1343 1362 /*
1344 1363 * initialize SCSI HBA transport structure
1345 1364 */
1346 1365 if (mptsas_hba_setup(mpt) == FALSE)
1347 1366 goto fail;
1348 1367 hba_attach_setup++;
1349 1368
1350 1369 if (mptsas_smp_setup(mpt) == FALSE)
1351 1370 goto fail;
1352 1371 smp_attach_setup++;
1353 1372
1354 1373 if (mptsas_cache_create(mpt) == FALSE)
1355 1374 goto fail;
1356 1375
1357 1376 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1358 1377 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1359 1378 if (mpt->m_scsi_reset_delay == 0) {
1360 1379 mptsas_log(mpt, CE_NOTE,
1361 1380 "scsi_reset_delay of 0 is not recommended,"
1362 1381 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1363 1382 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1364 1383 }
1365 1384
1366 1385 /*
1367 1386 * Initialize the wait and done FIFO queue
1368 1387 */
1369 1388 mpt->m_donetail = &mpt->m_doneq;
1370 1389 mpt->m_waitqtail = &mpt->m_waitq;
1371 1390 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1372 1391 mpt->m_tx_draining = 0;
1373 1392
1374 1393 /*
1375 1394 * ioc cmd queue initialize
1376 1395 */
1377 1396 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1378 1397 mpt->m_dev_handle = 0xFFFF;
1379 1398
1380 1399 MPTSAS_ENABLE_INTR(mpt);
1381 1400
1382 1401 /*
1383 1402 * enable event notification
1384 1403 */
1385 1404 mutex_enter(&mpt->m_mutex);
1386 1405 if (mptsas_ioc_enable_event_notification(mpt)) {
1387 1406 mutex_exit(&mpt->m_mutex);
1388 1407 goto fail;
1389 1408 }
1390 1409 mutex_exit(&mpt->m_mutex);
1391 1410
1392 1411 /*
1393 1412 * Initialize PHY info for smhba
1394 1413 */
1395 1414 if (mptsas_smhba_setup(mpt)) {
1396 1415 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1397 1416 "failed");
1398 1417 goto fail;
1399 1418 }
1400 1419
1401 1420 /* Check all dma handles allocated in attach */
1402 1421 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1403 1422 != DDI_SUCCESS) ||
1404 1423 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1405 1424 != DDI_SUCCESS) ||
1406 1425 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1407 1426 != DDI_SUCCESS) ||
1408 1427 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1409 1428 != DDI_SUCCESS) ||
1410 1429 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1411 1430 != DDI_SUCCESS)) {
1412 1431 goto fail;
1413 1432 }
1414 1433
1415 1434 /* Check all acc handles allocated in attach */
1416 1435 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1417 1436 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1418 1437 != DDI_SUCCESS) ||
1419 1438 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1420 1439 != DDI_SUCCESS) ||
1421 1440 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1422 1441 != DDI_SUCCESS) ||
1423 1442 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1424 1443 != DDI_SUCCESS) ||
1425 1444 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1426 1445 != DDI_SUCCESS) ||
1427 1446 (mptsas_check_acc_handle(mpt->m_config_handle)
1428 1447 != DDI_SUCCESS)) {
1429 1448 goto fail;
1430 1449 }
1431 1450
1432 1451 /*
1433 1452 * After this point, we are not going to fail the attach.
1434 1453 */
1435 1454 /*
1436 1455 * used for mptsas_watch
1437 1456 */
1438 1457 mptsas_list_add(mpt);
1439 1458
1440 1459 mutex_enter(&mptsas_global_mutex);
1441 1460 if (mptsas_timeouts_enabled == 0) {
1442 1461 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1443 1462 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1444 1463
1445 1464 mptsas_tick = mptsas_scsi_watchdog_tick *
1446 1465 drv_usectohz((clock_t)1000000);
1447 1466
1448 1467 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1449 1468 mptsas_timeouts_enabled = 1;
1450 1469 }
1451 1470 mutex_exit(&mptsas_global_mutex);
1452 1471
1453 1472 /* Print message of HBA present */
1454 1473 ddi_report_dev(dip);
1455 1474
1456 1475 /* report idle status to pm framework */
1457 1476 if (mpt->m_options & MPTSAS_OPT_PM) {
1458 1477 (void) pm_idle_component(dip, 0);
1459 1478 }
1460 1479
1461 1480 return (DDI_SUCCESS);
1462 1481
1463 1482 fail:
1464 1483 mptsas_log(mpt, CE_WARN, "attach failed");
1465 1484 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1466 1485 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1467 1486 if (mpt) {
1468 1487 mutex_enter(&mptsas_global_mutex);
1469 1488
1470 1489 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1471 1490 timeout_id_t tid = mptsas_timeout_id;
1472 1491 mptsas_timeouts_enabled = 0;
1473 1492 mptsas_timeout_id = 0;
1474 1493 mutex_exit(&mptsas_global_mutex);
1475 1494 (void) untimeout(tid);
1476 1495 mutex_enter(&mptsas_global_mutex);
1477 1496 }
1478 1497 mutex_exit(&mptsas_global_mutex);
1479 1498 /* deallocate in reverse order */
1480 1499 mptsas_cache_destroy(mpt);
1481 1500
1482 1501 if (smp_attach_setup) {
1483 1502 mptsas_smp_teardown(mpt);
1484 1503 }
1485 1504 if (hba_attach_setup) {
1486 1505 mptsas_hba_teardown(mpt);
1487 1506 }
1488 1507
1489 1508 if (mpt->m_targets)
1490 1509 refhash_destroy(mpt->m_targets);
1491 1510 if (mpt->m_smp_targets)
1492 1511 refhash_destroy(mpt->m_smp_targets);
1493 1512
1494 1513 if (mpt->m_active) {
1495 1514 mptsas_free_active_slots(mpt);
1496 1515 }
1497 1516 if (intr_added) {
1498 1517 mptsas_unregister_intrs(mpt);
1499 1518 }
1500 1519
1501 1520 if (doneq_thread_create) {
1502 1521 mutex_enter(&mpt->m_doneq_mutex);
1503 1522 doneq_thread_num = mpt->m_doneq_thread_n;
1504 1523 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1505 1524 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1506 1525 mpt->m_doneq_thread_id[j].flag &=
1507 1526 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1508 1527 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1509 1528 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1510 1529 }
1511 1530 while (mpt->m_doneq_thread_n) {
1512 1531 cv_wait(&mpt->m_doneq_thread_cv,
1513 1532 &mpt->m_doneq_mutex);
1514 1533 }
1515 1534 for (j = 0; j < doneq_thread_num; j++) {
1516 1535 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1517 1536 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1518 1537 }
1519 1538 kmem_free(mpt->m_doneq_thread_id,
1520 1539 sizeof (mptsas_doneq_thread_list_t)
1521 1540 * doneq_thread_num);
1522 1541 mutex_exit(&mpt->m_doneq_mutex);
1523 1542 cv_destroy(&mpt->m_doneq_thread_cv);
1524 1543 mutex_destroy(&mpt->m_doneq_mutex);
1525 1544 }
1526 1545 if (event_taskq_create) {
1527 1546 ddi_taskq_destroy(mpt->m_event_taskq);
1528 1547 }
1529 1548 if (dr_taskq_create) {
1530 1549 ddi_taskq_destroy(mpt->m_dr_taskq);
1531 1550 }
1532 1551 if (mutex_init_done) {
1533 1552 mutex_destroy(&mpt->m_tx_waitq_mutex);
1534 1553 mutex_destroy(&mpt->m_passthru_mutex);
1535 1554 mutex_destroy(&mpt->m_mutex);
1536 1555 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1537 1556 mutex_destroy(
1538 1557 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1539 1558 }
1540 1559 cv_destroy(&mpt->m_cv);
1541 1560 cv_destroy(&mpt->m_passthru_cv);
1542 1561 cv_destroy(&mpt->m_fw_cv);
1543 1562 cv_destroy(&mpt->m_config_cv);
1544 1563 cv_destroy(&mpt->m_fw_diag_cv);
1545 1564 }
1546 1565
1547 1566 if (map_setup) {
1548 1567 mptsas_cfg_fini(mpt);
1549 1568 }
1550 1569 if (config_setup) {
1551 1570 mptsas_config_space_fini(mpt);
1552 1571 }
1553 1572 mptsas_free_handshake_msg(mpt);
1554 1573 mptsas_hba_fini(mpt);
1555 1574
1556 1575 mptsas_fm_fini(mpt);
1557 1576 ddi_soft_state_free(mptsas_state, instance);
1558 1577 ddi_prop_remove_all(dip);
1559 1578 }
1560 1579 return (DDI_FAILURE);
1561 1580 }
1562 1581
1563 1582 static int
1564 1583 mptsas_suspend(dev_info_t *devi)
1565 1584 {
1566 1585 mptsas_t *mpt, *g;
1567 1586 scsi_hba_tran_t *tran;
1568 1587
1569 1588 if (scsi_hba_iport_unit_address(devi)) {
1570 1589 return (DDI_SUCCESS);
1571 1590 }
1572 1591
1573 1592 if ((tran = ddi_get_driver_private(devi)) == NULL)
1574 1593 return (DDI_SUCCESS);
1575 1594
1576 1595 mpt = TRAN2MPT(tran);
1577 1596 if (!mpt) {
1578 1597 return (DDI_SUCCESS);
1579 1598 }
1580 1599
1581 1600 mutex_enter(&mpt->m_mutex);
1582 1601
1583 1602 if (mpt->m_suspended++) {
1584 1603 mutex_exit(&mpt->m_mutex);
1585 1604 return (DDI_SUCCESS);
1586 1605 }
1587 1606
1588 1607 /*
1589 1608 * Cancel timeout threads for this mpt
1590 1609 */
1591 1610 if (mpt->m_quiesce_timeid) {
1592 1611 timeout_id_t tid = mpt->m_quiesce_timeid;
1593 1612 mpt->m_quiesce_timeid = 0;
1594 1613 mutex_exit(&mpt->m_mutex);
1595 1614 (void) untimeout(tid);
1596 1615 mutex_enter(&mpt->m_mutex);
1597 1616 }
1598 1617
1599 1618 if (mpt->m_restart_cmd_timeid) {
1600 1619 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1601 1620 mpt->m_restart_cmd_timeid = 0;
1602 1621 mutex_exit(&mpt->m_mutex);
1603 1622 (void) untimeout(tid);
1604 1623 mutex_enter(&mpt->m_mutex);
1605 1624 }
1606 1625
1607 1626 mutex_exit(&mpt->m_mutex);
1608 1627
1609 1628 (void) pm_idle_component(mpt->m_dip, 0);
1610 1629
1611 1630 /*
1612 1631 * Cancel watch threads if all mpts suspended
1613 1632 */
1614 1633 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1615 1634 for (g = mptsas_head; g != NULL; g = g->m_next) {
1616 1635 if (!g->m_suspended)
1617 1636 break;
1618 1637 }
1619 1638 rw_exit(&mptsas_global_rwlock);
1620 1639
1621 1640 mutex_enter(&mptsas_global_mutex);
1622 1641 if (g == NULL) {
1623 1642 timeout_id_t tid;
1624 1643
1625 1644 mptsas_timeouts_enabled = 0;
1626 1645 if (mptsas_timeout_id) {
1627 1646 tid = mptsas_timeout_id;
1628 1647 mptsas_timeout_id = 0;
1629 1648 mutex_exit(&mptsas_global_mutex);
1630 1649 (void) untimeout(tid);
1631 1650 mutex_enter(&mptsas_global_mutex);
1632 1651 }
1633 1652 if (mptsas_reset_watch) {
1634 1653 tid = mptsas_reset_watch;
1635 1654 mptsas_reset_watch = 0;
1636 1655 mutex_exit(&mptsas_global_mutex);
1637 1656 (void) untimeout(tid);
1638 1657 mutex_enter(&mptsas_global_mutex);
1639 1658 }
1640 1659 }
1641 1660 mutex_exit(&mptsas_global_mutex);
1642 1661
1643 1662 mutex_enter(&mpt->m_mutex);
1644 1663
1645 1664 /*
1646 1665 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1647 1666 */
1648 1667 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1649 1668 (mpt->m_power_level != PM_LEVEL_D0)) {
1650 1669 mutex_exit(&mpt->m_mutex);
1651 1670 return (DDI_SUCCESS);
1652 1671 }
1653 1672
1654 1673 /* Disable HBA interrupts in hardware */
1655 1674 MPTSAS_DISABLE_INTR(mpt);
1656 1675 /*
1657 1676 * Send RAID action system shutdown to sync IR
1658 1677 */
1659 1678 mptsas_raid_action_system_shutdown(mpt);
1660 1679
1661 1680 mutex_exit(&mpt->m_mutex);
1662 1681
1663 1682 /* drain the taskq */
1664 1683 ddi_taskq_wait(mpt->m_event_taskq);
1665 1684 ddi_taskq_wait(mpt->m_dr_taskq);
1666 1685
1667 1686 return (DDI_SUCCESS);
1668 1687 }
1669 1688
1670 1689 #ifdef __sparc
1671 1690 /*ARGSUSED*/
1672 1691 static int
1673 1692 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1674 1693 {
1675 1694 mptsas_t *mpt;
1676 1695 scsi_hba_tran_t *tran;
1677 1696
1678 1697 /*
1679 1698 * If this call is for iport, just return.
1680 1699 */
1681 1700 if (scsi_hba_iport_unit_address(devi))
1682 1701 return (DDI_SUCCESS);
1683 1702
1684 1703 if ((tran = ddi_get_driver_private(devi)) == NULL)
1685 1704 return (DDI_SUCCESS);
1686 1705
1687 1706 if ((mpt = TRAN2MPT(tran)) == NULL)
1688 1707 return (DDI_SUCCESS);
1689 1708
1690 1709 /*
1691 1710 * Send RAID action system shutdown to sync IR. Disable HBA
1692 1711 * interrupts in hardware first.
1693 1712 */
1694 1713 MPTSAS_DISABLE_INTR(mpt);
1695 1714 mptsas_raid_action_system_shutdown(mpt);
1696 1715
1697 1716 return (DDI_SUCCESS);
1698 1717 }
1699 1718 #else /* __sparc */
1700 1719 /*
1701 1720 * quiesce(9E) entry point.
1702 1721 *
1703 1722 * This function is called when the system is single-threaded at high
1704 1723 * PIL with preemption disabled. Therefore, this function must not be
1705 1724 * blocked.
1706 1725 *
1707 1726 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1708 1727 * DDI_FAILURE indicates an error condition and should almost never happen.
1709 1728 */
1710 1729 static int
1711 1730 mptsas_quiesce(dev_info_t *devi)
1712 1731 {
1713 1732 mptsas_t *mpt;
1714 1733 scsi_hba_tran_t *tran;
1715 1734
1716 1735 /*
1717 1736 * If this call is for iport, just return.
1718 1737 */
1719 1738 if (scsi_hba_iport_unit_address(devi))
1720 1739 return (DDI_SUCCESS);
1721 1740
1722 1741 if ((tran = ddi_get_driver_private(devi)) == NULL)
1723 1742 return (DDI_SUCCESS);
1724 1743
1725 1744 if ((mpt = TRAN2MPT(tran)) == NULL)
1726 1745 return (DDI_SUCCESS);
1727 1746
1728 1747 /* Disable HBA interrupts in hardware */
1729 1748 MPTSAS_DISABLE_INTR(mpt);
1730 1749 /* Send RAID action system shutdonw to sync IR */
1731 1750 mptsas_raid_action_system_shutdown(mpt);
1732 1751
1733 1752 return (DDI_SUCCESS);
1734 1753 }
1735 1754 #endif /* __sparc */
1736 1755
1737 1756 /*
1738 1757 * detach(9E). Remove all device allocations and system resources;
1739 1758 * disable device interrupts.
1740 1759 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1741 1760 */
1742 1761 static int
1743 1762 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1744 1763 {
1745 1764 /* CONSTCOND */
1746 1765 ASSERT(NO_COMPETING_THREADS);
1747 1766 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1748 1767
1749 1768 switch (cmd) {
1750 1769 case DDI_DETACH:
1751 1770 return (mptsas_do_detach(devi));
1752 1771
1753 1772 case DDI_SUSPEND:
1754 1773 return (mptsas_suspend(devi));
1755 1774
1756 1775 default:
1757 1776 return (DDI_FAILURE);
1758 1777 }
1759 1778 /* NOTREACHED */
1760 1779 }
1761 1780
1762 1781 static int
1763 1782 mptsas_do_detach(dev_info_t *dip)
1764 1783 {
1765 1784 mptsas_t *mpt;
1766 1785 scsi_hba_tran_t *tran;
1767 1786 int circ = 0;
1768 1787 int circ1 = 0;
1769 1788 mdi_pathinfo_t *pip = NULL;
1770 1789 int i;
1771 1790 int doneq_thread_num = 0;
1772 1791
1773 1792 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1774 1793
1775 1794 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1776 1795 return (DDI_FAILURE);
1777 1796
1778 1797 mpt = TRAN2MPT(tran);
1779 1798 if (!mpt) {
1780 1799 return (DDI_FAILURE);
1781 1800 }
1782 1801 /*
1783 1802 * Still have pathinfo child, should not detach mpt driver
1784 1803 */
1785 1804 if (scsi_hba_iport_unit_address(dip)) {
1786 1805 if (mpt->m_mpxio_enable) {
1787 1806 /*
1788 1807 * MPxIO enabled for the iport
1789 1808 */
1790 1809 ndi_devi_enter(scsi_vhci_dip, &circ1);
1791 1810 ndi_devi_enter(dip, &circ);
1792 1811 while (pip = mdi_get_next_client_path(dip, NULL)) {
1793 1812 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1794 1813 continue;
1795 1814 }
1796 1815 ndi_devi_exit(dip, circ);
1797 1816 ndi_devi_exit(scsi_vhci_dip, circ1);
1798 1817 NDBG12(("detach failed because of "
1799 1818 "outstanding path info"));
1800 1819 return (DDI_FAILURE);
1801 1820 }
1802 1821 ndi_devi_exit(dip, circ);
1803 1822 ndi_devi_exit(scsi_vhci_dip, circ1);
1804 1823 (void) mdi_phci_unregister(dip, 0);
1805 1824 }
1806 1825
1807 1826 ddi_prop_remove_all(dip);
1808 1827
1809 1828 return (DDI_SUCCESS);
1810 1829 }
1811 1830
1812 1831 /* Make sure power level is D0 before accessing registers */
1813 1832 if (mpt->m_options & MPTSAS_OPT_PM) {
1814 1833 (void) pm_busy_component(dip, 0);
1815 1834 if (mpt->m_power_level != PM_LEVEL_D0) {
1816 1835 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1817 1836 DDI_SUCCESS) {
1818 1837 mptsas_log(mpt, CE_WARN,
1819 1838 "mptsas%d: Raise power request failed.",
1820 1839 mpt->m_instance);
1821 1840 (void) pm_idle_component(dip, 0);
1822 1841 return (DDI_FAILURE);
1823 1842 }
1824 1843 }
1825 1844 }
1826 1845
1827 1846 /*
1828 1847 * Send RAID action system shutdown to sync IR. After action, send a
1829 1848 * Message Unit Reset. Since after that DMA resource will be freed,
1830 1849 * set ioc to READY state will avoid HBA initiated DMA operation.
1831 1850 */
1832 1851 mutex_enter(&mpt->m_mutex);
1833 1852 MPTSAS_DISABLE_INTR(mpt);
1834 1853 mptsas_raid_action_system_shutdown(mpt);
1835 1854 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1836 1855 (void) mptsas_ioc_reset(mpt, FALSE);
1837 1856 mutex_exit(&mpt->m_mutex);
1838 1857 mptsas_rem_intrs(mpt);
1839 1858 ddi_taskq_destroy(mpt->m_event_taskq);
1840 1859 ddi_taskq_destroy(mpt->m_dr_taskq);
1841 1860
1842 1861 if (mpt->m_doneq_thread_n) {
1843 1862 mutex_enter(&mpt->m_doneq_mutex);
1844 1863 doneq_thread_num = mpt->m_doneq_thread_n;
1845 1864 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1846 1865 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1847 1866 mpt->m_doneq_thread_id[i].flag &=
1848 1867 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1849 1868 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1850 1869 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1851 1870 }
1852 1871 while (mpt->m_doneq_thread_n) {
1853 1872 cv_wait(&mpt->m_doneq_thread_cv,
1854 1873 &mpt->m_doneq_mutex);
1855 1874 }
1856 1875 for (i = 0; i < doneq_thread_num; i++) {
1857 1876 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1858 1877 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1859 1878 }
1860 1879 kmem_free(mpt->m_doneq_thread_id,
1861 1880 sizeof (mptsas_doneq_thread_list_t)
1862 1881 * doneq_thread_num);
1863 1882 mutex_exit(&mpt->m_doneq_mutex);
1864 1883 cv_destroy(&mpt->m_doneq_thread_cv);
1865 1884 mutex_destroy(&mpt->m_doneq_mutex);
1866 1885 }
1867 1886
1868 1887 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1869 1888
1870 1889 mptsas_list_del(mpt);
1871 1890
1872 1891 /*
1873 1892 * Cancel timeout threads for this mpt
1874 1893 */
1875 1894 mutex_enter(&mpt->m_mutex);
1876 1895 if (mpt->m_quiesce_timeid) {
1877 1896 timeout_id_t tid = mpt->m_quiesce_timeid;
1878 1897 mpt->m_quiesce_timeid = 0;
1879 1898 mutex_exit(&mpt->m_mutex);
1880 1899 (void) untimeout(tid);
1881 1900 mutex_enter(&mpt->m_mutex);
1882 1901 }
1883 1902
1884 1903 if (mpt->m_restart_cmd_timeid) {
1885 1904 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1886 1905 mpt->m_restart_cmd_timeid = 0;
1887 1906 mutex_exit(&mpt->m_mutex);
1888 1907 (void) untimeout(tid);
1889 1908 mutex_enter(&mpt->m_mutex);
1890 1909 }
1891 1910
1892 1911 mutex_exit(&mpt->m_mutex);
1893 1912
1894 1913 /*
1895 1914 * last mpt? ... if active, CANCEL watch threads.
1896 1915 */
1897 1916 mutex_enter(&mptsas_global_mutex);
1898 1917 if (mptsas_head == NULL) {
1899 1918 timeout_id_t tid;
1900 1919 /*
1901 1920 * Clear mptsas_timeouts_enable so that the watch thread
1902 1921 * gets restarted on DDI_ATTACH
1903 1922 */
1904 1923 mptsas_timeouts_enabled = 0;
1905 1924 if (mptsas_timeout_id) {
1906 1925 tid = mptsas_timeout_id;
1907 1926 mptsas_timeout_id = 0;
1908 1927 mutex_exit(&mptsas_global_mutex);
1909 1928 (void) untimeout(tid);
1910 1929 mutex_enter(&mptsas_global_mutex);
1911 1930 }
1912 1931 if (mptsas_reset_watch) {
1913 1932 tid = mptsas_reset_watch;
1914 1933 mptsas_reset_watch = 0;
1915 1934 mutex_exit(&mptsas_global_mutex);
1916 1935 (void) untimeout(tid);
1917 1936 mutex_enter(&mptsas_global_mutex);
1918 1937 }
1919 1938 }
1920 1939 mutex_exit(&mptsas_global_mutex);
1921 1940
1922 1941 /*
1923 1942 * Delete Phy stats
1924 1943 */
1925 1944 mptsas_destroy_phy_stats(mpt);
1926 1945
1927 1946 mptsas_destroy_hashes(mpt);
1928 1947
1929 1948 /*
1930 1949 * Delete nt_active.
1931 1950 */
1932 1951 mutex_enter(&mpt->m_mutex);
1933 1952 mptsas_free_active_slots(mpt);
1934 1953 mutex_exit(&mpt->m_mutex);
1935 1954
1936 1955 /* deallocate everything that was allocated in mptsas_attach */
1937 1956 mptsas_cache_destroy(mpt);
1938 1957
1939 1958 mptsas_hba_fini(mpt);
1940 1959 mptsas_cfg_fini(mpt);
1941 1960
1942 1961 /* Lower the power informing PM Framework */
1943 1962 if (mpt->m_options & MPTSAS_OPT_PM) {
1944 1963 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1945 1964 mptsas_log(mpt, CE_WARN,
1946 1965 "!mptsas%d: Lower power request failed "
1947 1966 "during detach, ignoring.",
1948 1967 mpt->m_instance);
1949 1968 }
1950 1969
1951 1970 mutex_destroy(&mpt->m_tx_waitq_mutex);
1952 1971 mutex_destroy(&mpt->m_passthru_mutex);
1953 1972 mutex_destroy(&mpt->m_mutex);
1954 1973 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1955 1974 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1956 1975 }
1957 1976 cv_destroy(&mpt->m_cv);
1958 1977 cv_destroy(&mpt->m_passthru_cv);
1959 1978 cv_destroy(&mpt->m_fw_cv);
1960 1979 cv_destroy(&mpt->m_config_cv);
1961 1980 cv_destroy(&mpt->m_fw_diag_cv);
1962 1981
1963 1982
1964 1983 mptsas_smp_teardown(mpt);
1965 1984 mptsas_hba_teardown(mpt);
1966 1985
1967 1986 mptsas_config_space_fini(mpt);
1968 1987
1969 1988 mptsas_free_handshake_msg(mpt);
1970 1989
1971 1990 mptsas_fm_fini(mpt);
1972 1991 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1973 1992 ddi_prop_remove_all(dip);
1974 1993
1975 1994 return (DDI_SUCCESS);
1976 1995 }
1977 1996
1978 1997 static void
1979 1998 mptsas_list_add(mptsas_t *mpt)
1980 1999 {
1981 2000 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1982 2001
1983 2002 if (mptsas_head == NULL) {
1984 2003 mptsas_head = mpt;
1985 2004 } else {
1986 2005 mptsas_tail->m_next = mpt;
1987 2006 }
1988 2007 mptsas_tail = mpt;
1989 2008 rw_exit(&mptsas_global_rwlock);
1990 2009 }
1991 2010
1992 2011 static void
1993 2012 mptsas_list_del(mptsas_t *mpt)
1994 2013 {
1995 2014 mptsas_t *m;
1996 2015 /*
1997 2016 * Remove device instance from the global linked list
1998 2017 */
1999 2018 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2000 2019 if (mptsas_head == mpt) {
2001 2020 m = mptsas_head = mpt->m_next;
2002 2021 } else {
2003 2022 for (m = mptsas_head; m != NULL; m = m->m_next) {
2004 2023 if (m->m_next == mpt) {
2005 2024 m->m_next = mpt->m_next;
2006 2025 break;
2007 2026 }
2008 2027 }
2009 2028 if (m == NULL) {
2010 2029 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2011 2030 }
2012 2031 }
2013 2032
2014 2033 if (mptsas_tail == mpt) {
2015 2034 mptsas_tail = m;
2016 2035 }
2017 2036 rw_exit(&mptsas_global_rwlock);
2018 2037 }
2019 2038
2020 2039 static int
2021 2040 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2022 2041 {
2023 2042 ddi_dma_attr_t task_dma_attrs;
2024 2043
2025 2044 mpt->m_hshk_dma_size = 0;
2026 2045 task_dma_attrs = mpt->m_msg_dma_attr;
2027 2046 task_dma_attrs.dma_attr_sgllen = 1;
2028 2047 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2029 2048
2030 2049 /* allocate Task Management ddi_dma resources */
2031 2050 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2032 2051 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2033 2052 alloc_size, NULL) == FALSE) {
2034 2053 return (DDI_FAILURE);
2035 2054 }
2036 2055 mpt->m_hshk_dma_size = alloc_size;
2037 2056
2038 2057 return (DDI_SUCCESS);
2039 2058 }
2040 2059
2041 2060 static void
2042 2061 mptsas_free_handshake_msg(mptsas_t *mpt)
2043 2062 {
2044 2063 if (mpt->m_hshk_dma_size == 0)
2045 2064 return;
2046 2065 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2047 2066 mpt->m_hshk_dma_size = 0;
2048 2067 }
2049 2068
2050 2069 static int
2051 2070 mptsas_hba_setup(mptsas_t *mpt)
2052 2071 {
2053 2072 scsi_hba_tran_t *hba_tran;
2054 2073 int tran_flags;
2055 2074
2056 2075 /* Allocate a transport structure */
2057 2076 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2058 2077 SCSI_HBA_CANSLEEP);
2059 2078 ASSERT(mpt->m_tran != NULL);
2060 2079
2061 2080 hba_tran->tran_hba_private = mpt;
2062 2081 hba_tran->tran_tgt_private = NULL;
2063 2082
2064 2083 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2065 2084 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2066 2085
2067 2086 hba_tran->tran_start = mptsas_scsi_start;
2068 2087 hba_tran->tran_reset = mptsas_scsi_reset;
2069 2088 hba_tran->tran_abort = mptsas_scsi_abort;
2070 2089 hba_tran->tran_getcap = mptsas_scsi_getcap;
2071 2090 hba_tran->tran_setcap = mptsas_scsi_setcap;
2072 2091 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2073 2092 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2074 2093
2075 2094 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2076 2095 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2077 2096 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2078 2097
2079 2098 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2080 2099 hba_tran->tran_get_name = mptsas_get_name;
2081 2100
2082 2101 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2083 2102 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2084 2103 hba_tran->tran_bus_reset = NULL;
2085 2104
2086 2105 hba_tran->tran_add_eventcall = NULL;
2087 2106 hba_tran->tran_get_eventcookie = NULL;
2088 2107 hba_tran->tran_post_event = NULL;
2089 2108 hba_tran->tran_remove_eventcall = NULL;
2090 2109
2091 2110 hba_tran->tran_bus_config = mptsas_bus_config;
2092 2111
2093 2112 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2094 2113
2095 2114 /*
2096 2115 * All children of the HBA are iports. We need tran was cloned.
2097 2116 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2098 2117 * inherited to iport's tran vector.
2099 2118 */
2100 2119 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2101 2120
2102 2121 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2103 2122 hba_tran, tran_flags) != DDI_SUCCESS) {
2104 2123 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2105 2124 scsi_hba_tran_free(hba_tran);
2106 2125 mpt->m_tran = NULL;
2107 2126 return (FALSE);
2108 2127 }
2109 2128 return (TRUE);
2110 2129 }
2111 2130
2112 2131 static void
2113 2132 mptsas_hba_teardown(mptsas_t *mpt)
2114 2133 {
2115 2134 (void) scsi_hba_detach(mpt->m_dip);
2116 2135 if (mpt->m_tran != NULL) {
2117 2136 scsi_hba_tran_free(mpt->m_tran);
2118 2137 mpt->m_tran = NULL;
2119 2138 }
2120 2139 }
2121 2140
2122 2141 static void
2123 2142 mptsas_iport_register(mptsas_t *mpt)
2124 2143 {
2125 2144 int i, j;
2126 2145 mptsas_phymask_t mask = 0x0;
2127 2146 /*
2128 2147 * initial value of mask is 0
2129 2148 */
2130 2149 mutex_enter(&mpt->m_mutex);
2131 2150 for (i = 0; i < mpt->m_num_phys; i++) {
2132 2151 mptsas_phymask_t phy_mask = 0x0;
2133 2152 char phy_mask_name[MPTSAS_MAX_PHYS];
2134 2153 uint8_t current_port;
2135 2154
2136 2155 if (mpt->m_phy_info[i].attached_devhdl == 0)
2137 2156 continue;
2138 2157
2139 2158 bzero(phy_mask_name, sizeof (phy_mask_name));
2140 2159
2141 2160 current_port = mpt->m_phy_info[i].port_num;
2142 2161
2143 2162 if ((mask & (1 << i)) != 0)
2144 2163 continue;
2145 2164
2146 2165 for (j = 0; j < mpt->m_num_phys; j++) {
2147 2166 if (mpt->m_phy_info[j].attached_devhdl &&
2148 2167 (mpt->m_phy_info[j].port_num == current_port)) {
2149 2168 phy_mask |= (1 << j);
2150 2169 }
2151 2170 }
2152 2171 mask = mask | phy_mask;
2153 2172
2154 2173 for (j = 0; j < mpt->m_num_phys; j++) {
2155 2174 if ((phy_mask >> j) & 0x01) {
2156 2175 mpt->m_phy_info[j].phy_mask = phy_mask;
2157 2176 }
2158 2177 }
2159 2178
2160 2179 (void) sprintf(phy_mask_name, "%x", phy_mask);
2161 2180
2162 2181 mutex_exit(&mpt->m_mutex);
2163 2182 /*
2164 2183 * register a iport
2165 2184 */
2166 2185 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2167 2186 mutex_enter(&mpt->m_mutex);
2168 2187 }
2169 2188 mutex_exit(&mpt->m_mutex);
2170 2189 /*
2171 2190 * register a virtual port for RAID volume always
2172 2191 */
2173 2192 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2174 2193
2175 2194 }
2176 2195
2177 2196 static int
2178 2197 mptsas_smp_setup(mptsas_t *mpt)
2179 2198 {
2180 2199 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2181 2200 ASSERT(mpt->m_smptran != NULL);
2182 2201 mpt->m_smptran->smp_tran_hba_private = mpt;
2183 2202 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2184 2203 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2185 2204 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2186 2205 smp_hba_tran_free(mpt->m_smptran);
2187 2206 mpt->m_smptran = NULL;
2188 2207 return (FALSE);
2189 2208 }
2190 2209 /*
2191 2210 * Initialize smp hash table
2192 2211 */
2193 2212 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2194 2213 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2195 2214 mptsas_smp_free, sizeof (mptsas_smp_t),
2196 2215 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2197 2216 KM_SLEEP);
2198 2217 mpt->m_smp_devhdl = 0xFFFF;
2199 2218
2200 2219 return (TRUE);
2201 2220 }
2202 2221
2203 2222 static void
2204 2223 mptsas_smp_teardown(mptsas_t *mpt)
2205 2224 {
2206 2225 (void) smp_hba_detach(mpt->m_dip);
2207 2226 if (mpt->m_smptran != NULL) {
2208 2227 smp_hba_tran_free(mpt->m_smptran);
2209 2228 mpt->m_smptran = NULL;
2210 2229 }
2211 2230 mpt->m_smp_devhdl = 0;
2212 2231 }
2213 2232
2214 2233 static int
|
↓ open down ↓ |
913 lines elided |
↑ open up ↑ |
2215 2234 mptsas_cache_create(mptsas_t *mpt)
2216 2235 {
2217 2236 int instance = mpt->m_instance;
2218 2237 char buf[64];
2219 2238
2220 2239 /*
2221 2240 * create kmem cache for packets
2222 2241 */
2223 2242 (void) sprintf(buf, "mptsas%d_cache", instance);
2224 2243 mpt->m_kmem_cache = kmem_cache_create(buf,
2225 - sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2244 + sizeof (struct mptsas_cmd) + scsi_pkt_size(), 16,
2226 2245 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2227 2246 NULL, (void *)mpt, NULL, 0);
2228 2247
2229 2248 if (mpt->m_kmem_cache == NULL) {
2230 2249 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2231 2250 return (FALSE);
2232 2251 }
2233 2252
2234 2253 /*
2235 2254 * create kmem cache for extra SGL frames if SGL cannot
2236 2255 * be accomodated into main request frame.
2237 2256 */
2238 2257 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2239 2258 mpt->m_cache_frames = kmem_cache_create(buf,
2240 - sizeof (mptsas_cache_frames_t), 8,
2259 + sizeof (mptsas_cache_frames_t), 16,
2241 2260 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2242 2261 NULL, (void *)mpt, NULL, 0);
2243 2262
2244 2263 if (mpt->m_cache_frames == NULL) {
2245 2264 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2246 2265 return (FALSE);
2247 2266 }
2248 2267
2249 2268 return (TRUE);
2250 2269 }
2251 2270
2252 2271 static void
2253 2272 mptsas_cache_destroy(mptsas_t *mpt)
2254 2273 {
2255 2274 /* deallocate in reverse order */
2256 2275 if (mpt->m_cache_frames) {
2257 2276 kmem_cache_destroy(mpt->m_cache_frames);
2258 2277 mpt->m_cache_frames = NULL;
2259 2278 }
2260 2279 if (mpt->m_kmem_cache) {
2261 2280 kmem_cache_destroy(mpt->m_kmem_cache);
2262 2281 mpt->m_kmem_cache = NULL;
2263 2282 }
2264 2283 }
2265 2284
2266 2285 static int
2267 2286 mptsas_power(dev_info_t *dip, int component, int level)
2268 2287 {
2269 2288 #ifndef __lock_lint
2270 2289 _NOTE(ARGUNUSED(component))
2271 2290 #endif
2272 2291 mptsas_t *mpt;
2273 2292 int rval = DDI_SUCCESS;
2274 2293 int polls = 0;
2275 2294 uint32_t ioc_status;
2276 2295
2277 2296 if (scsi_hba_iport_unit_address(dip) != 0)
2278 2297 return (DDI_SUCCESS);
2279 2298
2280 2299 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2281 2300 if (mpt == NULL) {
2282 2301 return (DDI_FAILURE);
2283 2302 }
2284 2303
2285 2304 mutex_enter(&mpt->m_mutex);
2286 2305
2287 2306 /*
2288 2307 * If the device is busy, don't lower its power level
2289 2308 */
2290 2309 if (mpt->m_busy && (mpt->m_power_level > level)) {
2291 2310 mutex_exit(&mpt->m_mutex);
2292 2311 return (DDI_FAILURE);
2293 2312 }
2294 2313 switch (level) {
2295 2314 case PM_LEVEL_D0:
2296 2315 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2297 2316 MPTSAS_POWER_ON(mpt);
2298 2317 /*
2299 2318 * Wait up to 30 seconds for IOC to come out of reset.
2300 2319 */
2301 2320 while (((ioc_status = ddi_get32(mpt->m_datap,
2302 2321 &mpt->m_reg->Doorbell)) &
2303 2322 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2304 2323 if (polls++ > 3000) {
2305 2324 break;
2306 2325 }
2307 2326 delay(drv_usectohz(10000));
2308 2327 }
2309 2328 /*
2310 2329 * If IOC is not in operational state, try to hard reset it.
2311 2330 */
2312 2331 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2313 2332 MPI2_IOC_STATE_OPERATIONAL) {
2314 2333 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2315 2334 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2316 2335 mptsas_log(mpt, CE_WARN,
2317 2336 "mptsas_power: hard reset failed");
2318 2337 mutex_exit(&mpt->m_mutex);
2319 2338 return (DDI_FAILURE);
2320 2339 }
2321 2340 }
2322 2341 mpt->m_power_level = PM_LEVEL_D0;
2323 2342 break;
2324 2343 case PM_LEVEL_D3:
2325 2344 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2326 2345 MPTSAS_POWER_OFF(mpt);
2327 2346 break;
2328 2347 default:
2329 2348 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2330 2349 mpt->m_instance, level);
2331 2350 rval = DDI_FAILURE;
2332 2351 break;
2333 2352 }
2334 2353 mutex_exit(&mpt->m_mutex);
2335 2354 return (rval);
2336 2355 }
2337 2356
2338 2357 /*
2339 2358 * Initialize configuration space and figure out which
2340 2359 * chip and revison of the chip the mpt driver is using.
2341 2360 */
2342 2361 static int
2343 2362 mptsas_config_space_init(mptsas_t *mpt)
2344 2363 {
2345 2364 NDBG0(("mptsas_config_space_init"));
2346 2365
2347 2366 if (mpt->m_config_handle != NULL)
2348 2367 return (TRUE);
2349 2368
2350 2369 if (pci_config_setup(mpt->m_dip,
2351 2370 &mpt->m_config_handle) != DDI_SUCCESS) {
2352 2371 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2353 2372 return (FALSE);
2354 2373 }
2355 2374
2356 2375 /*
2357 2376 * This is a workaround for a XMITS ASIC bug which does not
2358 2377 * drive the CBE upper bits.
2359 2378 */
2360 2379 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2361 2380 PCI_STAT_PERROR) {
2362 2381 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2363 2382 PCI_STAT_PERROR);
2364 2383 }
2365 2384
2366 2385 mptsas_setup_cmd_reg(mpt);
2367 2386
2368 2387 /*
2369 2388 * Get the chip device id:
2370 2389 */
2371 2390 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2372 2391
2373 2392 /*
2374 2393 * Save the revision.
2375 2394 */
2376 2395 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2377 2396
2378 2397 /*
2379 2398 * Save the SubSystem Vendor and Device IDs
2380 2399 */
2381 2400 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2382 2401 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2383 2402
2384 2403 /*
2385 2404 * Set the latency timer to 0x40 as specified by the upa -> pci
2386 2405 * bridge chip design team. This may be done by the sparc pci
2387 2406 * bus nexus driver, but the driver should make sure the latency
2388 2407 * timer is correct for performance reasons.
2389 2408 */
2390 2409 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2391 2410 MPTSAS_LATENCY_TIMER);
2392 2411
2393 2412 (void) mptsas_get_pci_cap(mpt);
2394 2413 return (TRUE);
2395 2414 }
2396 2415
2397 2416 static void
2398 2417 mptsas_config_space_fini(mptsas_t *mpt)
2399 2418 {
2400 2419 if (mpt->m_config_handle != NULL) {
2401 2420 mptsas_disable_bus_master(mpt);
2402 2421 pci_config_teardown(&mpt->m_config_handle);
2403 2422 mpt->m_config_handle = NULL;
2404 2423 }
2405 2424 }
2406 2425
2407 2426 static void
2408 2427 mptsas_setup_cmd_reg(mptsas_t *mpt)
2409 2428 {
2410 2429 ushort_t cmdreg;
2411 2430
2412 2431 /*
2413 2432 * Set the command register to the needed values.
2414 2433 */
2415 2434 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2416 2435 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2417 2436 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2418 2437 cmdreg &= ~PCI_COMM_IO;
2419 2438 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2420 2439 }
2421 2440
2422 2441 static void
2423 2442 mptsas_disable_bus_master(mptsas_t *mpt)
2424 2443 {
2425 2444 ushort_t cmdreg;
2426 2445
2427 2446 /*
2428 2447 * Clear the master enable bit in the PCI command register.
2429 2448 * This prevents any bus mastering activity like DMA.
2430 2449 */
2431 2450 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2432 2451 cmdreg &= ~PCI_COMM_ME;
2433 2452 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2434 2453 }
2435 2454
2436 2455 int
2437 2456 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2438 2457 {
2439 2458 ddi_dma_attr_t attrs;
2440 2459
2441 2460 attrs = mpt->m_io_dma_attr;
2442 2461 attrs.dma_attr_sgllen = 1;
2443 2462
2444 2463 ASSERT(dma_statep != NULL);
2445 2464
2446 2465 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2447 2466 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2448 2467 &dma_statep->cookie) == FALSE) {
2449 2468 return (DDI_FAILURE);
2450 2469 }
2451 2470
2452 2471 return (DDI_SUCCESS);
2453 2472 }
2454 2473
2455 2474 void
2456 2475 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2457 2476 {
2458 2477 ASSERT(dma_statep != NULL);
2459 2478 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2460 2479 dma_statep->size = 0;
2461 2480 }
2462 2481
2463 2482 int
2464 2483 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2465 2484 {
2466 2485 ddi_dma_attr_t attrs;
2467 2486 ddi_dma_handle_t dma_handle;
2468 2487 caddr_t memp;
2469 2488 ddi_acc_handle_t accessp;
2470 2489 int rval;
2471 2490
2472 2491 ASSERT(mutex_owned(&mpt->m_mutex));
2473 2492
2474 2493 attrs = mpt->m_msg_dma_attr;
2475 2494 attrs.dma_attr_sgllen = 1;
2476 2495 attrs.dma_attr_granular = size;
2477 2496
2478 2497 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2479 2498 &accessp, &memp, size, NULL) == FALSE) {
2480 2499 return (DDI_FAILURE);
2481 2500 }
2482 2501
2483 2502 rval = (*callback) (mpt, memp, var, accessp);
2484 2503
2485 2504 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2486 2505 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2487 2506 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2488 2507 rval = DDI_FAILURE;
2489 2508 }
2490 2509
2491 2510 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2492 2511 return (rval);
2493 2512
2494 2513 }
2495 2514
2496 2515 static int
2497 2516 mptsas_alloc_request_frames(mptsas_t *mpt)
2498 2517 {
2499 2518 ddi_dma_attr_t frame_dma_attrs;
2500 2519 caddr_t memp;
2501 2520 ddi_dma_cookie_t cookie;
2502 2521 size_t mem_size;
2503 2522
2504 2523 /*
2505 2524 * re-alloc when it has already alloced
2506 2525 */
2507 2526 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2508 2527 &mpt->m_acc_req_frame_hdl);
2509 2528
2510 2529 /*
2511 2530 * The size of the request frame pool is:
2512 2531 * Number of Request Frames * Request Frame Size
2513 2532 */
2514 2533 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2515 2534
2516 2535 /*
2517 2536 * set the DMA attributes. System Request Message Frames must be
2518 2537 * aligned on a 16-byte boundry.
2519 2538 */
2520 2539 frame_dma_attrs = mpt->m_msg_dma_attr;
2521 2540 frame_dma_attrs.dma_attr_align = 16;
2522 2541 frame_dma_attrs.dma_attr_sgllen = 1;
2523 2542
2524 2543 /*
2525 2544 * allocate the request frame pool.
2526 2545 */
2527 2546 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2528 2547 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2529 2548 mem_size, &cookie) == FALSE) {
2530 2549 return (DDI_FAILURE);
2531 2550 }
2532 2551
2533 2552 /*
2534 2553 * Store the request frame memory address. This chip uses this
2535 2554 * address to dma to and from the driver's frame. The second
2536 2555 * address is the address mpt uses to fill in the frame.
2537 2556 */
2538 2557 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2539 2558 mpt->m_req_frame = memp;
2540 2559
2541 2560 /*
2542 2561 * Clear the request frame pool.
2543 2562 */
2544 2563 bzero(mpt->m_req_frame, mem_size);
2545 2564
2546 2565 return (DDI_SUCCESS);
2547 2566 }
2548 2567
2549 2568 static int
2550 2569 mptsas_alloc_reply_frames(mptsas_t *mpt)
2551 2570 {
2552 2571 ddi_dma_attr_t frame_dma_attrs;
2553 2572 caddr_t memp;
2554 2573 ddi_dma_cookie_t cookie;
2555 2574 size_t mem_size;
2556 2575
2557 2576 /*
2558 2577 * re-alloc when it has already alloced
2559 2578 */
2560 2579 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2561 2580 &mpt->m_acc_reply_frame_hdl);
2562 2581
2563 2582 /*
2564 2583 * The size of the reply frame pool is:
2565 2584 * Number of Reply Frames * Reply Frame Size
2566 2585 */
2567 2586 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2568 2587
2569 2588 /*
2570 2589 * set the DMA attributes. System Reply Message Frames must be
2571 2590 * aligned on a 4-byte boundry. This is the default.
2572 2591 */
2573 2592 frame_dma_attrs = mpt->m_msg_dma_attr;
2574 2593 frame_dma_attrs.dma_attr_sgllen = 1;
2575 2594
2576 2595 /*
2577 2596 * allocate the reply frame pool
2578 2597 */
2579 2598 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2580 2599 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2581 2600 mem_size, &cookie) == FALSE) {
2582 2601 return (DDI_FAILURE);
2583 2602 }
2584 2603
2585 2604 /*
2586 2605 * Store the reply frame memory address. This chip uses this
2587 2606 * address to dma to and from the driver's frame. The second
2588 2607 * address is the address mpt uses to process the frame.
2589 2608 */
2590 2609 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2591 2610 mpt->m_reply_frame = memp;
2592 2611
2593 2612 /*
2594 2613 * Clear the reply frame pool.
2595 2614 */
2596 2615 bzero(mpt->m_reply_frame, mem_size);
2597 2616
2598 2617 return (DDI_SUCCESS);
2599 2618 }
2600 2619
2601 2620 static int
2602 2621 mptsas_alloc_free_queue(mptsas_t *mpt)
2603 2622 {
2604 2623 ddi_dma_attr_t frame_dma_attrs;
2605 2624 caddr_t memp;
2606 2625 ddi_dma_cookie_t cookie;
2607 2626 size_t mem_size;
2608 2627
2609 2628 /*
2610 2629 * re-alloc when it has already alloced
2611 2630 */
2612 2631 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2613 2632 &mpt->m_acc_free_queue_hdl);
2614 2633
2615 2634 /*
2616 2635 * The reply free queue size is:
2617 2636 * Reply Free Queue Depth * 4
2618 2637 * The "4" is the size of one 32 bit address (low part of 64-bit
2619 2638 * address)
2620 2639 */
2621 2640 mem_size = mpt->m_free_queue_depth * 4;
2622 2641
2623 2642 /*
2624 2643 * set the DMA attributes The Reply Free Queue must be aligned on a
2625 2644 * 16-byte boundry.
2626 2645 */
2627 2646 frame_dma_attrs = mpt->m_msg_dma_attr;
2628 2647 frame_dma_attrs.dma_attr_align = 16;
2629 2648 frame_dma_attrs.dma_attr_sgllen = 1;
2630 2649
2631 2650 /*
2632 2651 * allocate the reply free queue
2633 2652 */
2634 2653 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2635 2654 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2636 2655 mem_size, &cookie) == FALSE) {
2637 2656 return (DDI_FAILURE);
2638 2657 }
2639 2658
2640 2659 /*
2641 2660 * Store the reply free queue memory address. This chip uses this
2642 2661 * address to read from the reply free queue. The second address
2643 2662 * is the address mpt uses to manage the queue.
2644 2663 */
2645 2664 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2646 2665 mpt->m_free_queue = memp;
2647 2666
2648 2667 /*
2649 2668 * Clear the reply free queue memory.
2650 2669 */
2651 2670 bzero(mpt->m_free_queue, mem_size);
2652 2671
2653 2672 return (DDI_SUCCESS);
2654 2673 }
2655 2674
2656 2675 static int
2657 2676 mptsas_alloc_post_queue(mptsas_t *mpt)
2658 2677 {
2659 2678 ddi_dma_attr_t frame_dma_attrs;
2660 2679 caddr_t memp;
2661 2680 ddi_dma_cookie_t cookie;
2662 2681 size_t mem_size;
2663 2682
2664 2683 /*
2665 2684 * re-alloc when it has already alloced
2666 2685 */
2667 2686 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2668 2687 &mpt->m_acc_post_queue_hdl);
2669 2688
2670 2689 /*
2671 2690 * The reply descriptor post queue size is:
2672 2691 * Reply Descriptor Post Queue Depth * 8
2673 2692 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2674 2693 */
2675 2694 mem_size = mpt->m_post_queue_depth * 8;
2676 2695
2677 2696 /*
2678 2697 * set the DMA attributes. The Reply Descriptor Post Queue must be
2679 2698 * aligned on a 16-byte boundry.
2680 2699 */
2681 2700 frame_dma_attrs = mpt->m_msg_dma_attr;
2682 2701 frame_dma_attrs.dma_attr_align = 16;
2683 2702 frame_dma_attrs.dma_attr_sgllen = 1;
2684 2703
2685 2704 /*
2686 2705 * allocate the reply post queue
2687 2706 */
2688 2707 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2689 2708 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2690 2709 mem_size, &cookie) == FALSE) {
2691 2710 return (DDI_FAILURE);
2692 2711 }
2693 2712
2694 2713 /*
2695 2714 * Store the reply descriptor post queue memory address. This chip
2696 2715 * uses this address to write to the reply descriptor post queue. The
2697 2716 * second address is the address mpt uses to manage the queue.
2698 2717 */
2699 2718 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2700 2719 mpt->m_post_queue = memp;
2701 2720
2702 2721 /*
2703 2722 * Clear the reply post queue memory.
2704 2723 */
2705 2724 bzero(mpt->m_post_queue, mem_size);
2706 2725
2707 2726 return (DDI_SUCCESS);
2708 2727 }
2709 2728
2710 2729 static void
2711 2730 mptsas_alloc_reply_args(mptsas_t *mpt)
2712 2731 {
2713 2732 if (mpt->m_replyh_args == NULL) {
2714 2733 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2715 2734 mpt->m_max_replies, KM_SLEEP);
2716 2735 }
2717 2736 }
2718 2737
2719 2738 static int
2720 2739 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2721 2740 {
2722 2741 mptsas_cache_frames_t *frames = NULL;
2723 2742 if (cmd->cmd_extra_frames == NULL) {
2724 2743 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2725 2744 if (frames == NULL) {
2726 2745 return (DDI_FAILURE);
2727 2746 }
2728 2747 cmd->cmd_extra_frames = frames;
2729 2748 }
2730 2749 return (DDI_SUCCESS);
2731 2750 }
2732 2751
2733 2752 static void
2734 2753 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2735 2754 {
2736 2755 if (cmd->cmd_extra_frames) {
2737 2756 kmem_cache_free(mpt->m_cache_frames,
2738 2757 (void *)cmd->cmd_extra_frames);
2739 2758 cmd->cmd_extra_frames = NULL;
2740 2759 }
2741 2760 }
2742 2761
2743 2762 static void
2744 2763 mptsas_cfg_fini(mptsas_t *mpt)
2745 2764 {
2746 2765 NDBG0(("mptsas_cfg_fini"));
2747 2766 ddi_regs_map_free(&mpt->m_datap);
2748 2767 }
2749 2768
2750 2769 static void
2751 2770 mptsas_hba_fini(mptsas_t *mpt)
2752 2771 {
2753 2772 NDBG0(("mptsas_hba_fini"));
2754 2773
2755 2774 /*
2756 2775 * Free up any allocated memory
2757 2776 */
2758 2777 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2759 2778 &mpt->m_acc_req_frame_hdl);
2760 2779
2761 2780 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2762 2781 &mpt->m_acc_reply_frame_hdl);
2763 2782
2764 2783 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2765 2784 &mpt->m_acc_free_queue_hdl);
2766 2785
2767 2786 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2768 2787 &mpt->m_acc_post_queue_hdl);
2769 2788
2770 2789 if (mpt->m_replyh_args != NULL) {
2771 2790 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2772 2791 * mpt->m_max_replies);
2773 2792 }
2774 2793 }
2775 2794
2776 2795 static int
2777 2796 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2778 2797 {
2779 2798 int lun = 0;
2780 2799 char *sas_wwn = NULL;
2781 2800 int phynum = -1;
2782 2801 int reallen = 0;
2783 2802
2784 2803 /* Get the target num */
2785 2804 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2786 2805 LUN_PROP, 0);
2787 2806
2788 2807 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2789 2808 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2790 2809 /*
2791 2810 * Stick in the address of form "pPHY,LUN"
2792 2811 */
2793 2812 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2794 2813 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2795 2814 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2796 2815 == DDI_PROP_SUCCESS) {
2797 2816 /*
2798 2817 * Stick in the address of the form "wWWN,LUN"
2799 2818 */
2800 2819 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2801 2820 ddi_prop_free(sas_wwn);
2802 2821 } else {
2803 2822 return (DDI_FAILURE);
2804 2823 }
2805 2824
2806 2825 ASSERT(reallen < len);
2807 2826 if (reallen >= len) {
2808 2827 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2809 2828 "length too small, it needs to be %d bytes", reallen + 1);
2810 2829 }
2811 2830 return (DDI_SUCCESS);
2812 2831 }
2813 2832
2814 2833 /*
2815 2834 * tran_tgt_init(9E) - target device instance initialization
2816 2835 */
2817 2836 static int
2818 2837 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2819 2838 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2820 2839 {
2821 2840 #ifndef __lock_lint
2822 2841 _NOTE(ARGUNUSED(hba_tran))
2823 2842 #endif
2824 2843
2825 2844 /*
2826 2845 * At this point, the scsi_device structure already exists
2827 2846 * and has been initialized.
2828 2847 *
2829 2848 * Use this function to allocate target-private data structures,
2830 2849 * if needed by this HBA. Add revised flow-control and queue
2831 2850 * properties for child here, if desired and if you can tell they
2832 2851 * support tagged queueing by now.
2833 2852 */
2834 2853 mptsas_t *mpt;
2835 2854 int lun = sd->sd_address.a_lun;
2836 2855 mdi_pathinfo_t *pip = NULL;
2837 2856 mptsas_tgt_private_t *tgt_private = NULL;
2838 2857 mptsas_target_t *ptgt = NULL;
2839 2858 char *psas_wwn = NULL;
2840 2859 mptsas_phymask_t phymask = 0;
2841 2860 uint64_t sas_wwn = 0;
2842 2861 mptsas_target_addr_t addr;
2843 2862 mpt = SDEV2MPT(sd);
2844 2863
2845 2864 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2846 2865
2847 2866 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2848 2867 (void *)hba_dip, (void *)tgt_dip, lun));
2849 2868
2850 2869 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2851 2870 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2852 2871 ddi_set_name_addr(tgt_dip, NULL);
2853 2872 return (DDI_FAILURE);
2854 2873 }
2855 2874 /*
2856 2875 * phymask is 0 means the virtual port for RAID
2857 2876 */
2858 2877 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2859 2878 "phymask", 0);
2860 2879 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2861 2880 if ((pip = (void *)(sd->sd_private)) == NULL) {
2862 2881 /*
2863 2882 * Very bad news if this occurs. Somehow scsi_vhci has
2864 2883 * lost the pathinfo node for this target.
2865 2884 */
2866 2885 return (DDI_NOT_WELL_FORMED);
2867 2886 }
2868 2887
2869 2888 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2870 2889 DDI_PROP_SUCCESS) {
2871 2890 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2872 2891 return (DDI_FAILURE);
2873 2892 }
2874 2893
2875 2894 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2876 2895 &psas_wwn) == MDI_SUCCESS) {
2877 2896 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2878 2897 sas_wwn = 0;
2879 2898 }
2880 2899 (void) mdi_prop_free(psas_wwn);
2881 2900 }
2882 2901 } else {
2883 2902 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2884 2903 DDI_PROP_DONTPASS, LUN_PROP, 0);
2885 2904 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2886 2905 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2887 2906 DDI_PROP_SUCCESS) {
2888 2907 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2889 2908 sas_wwn = 0;
2890 2909 }
2891 2910 ddi_prop_free(psas_wwn);
2892 2911 } else {
2893 2912 sas_wwn = 0;
2894 2913 }
2895 2914 }
2896 2915
2897 2916 ASSERT((sas_wwn != 0) || (phymask != 0));
2898 2917 addr.mta_wwn = sas_wwn;
2899 2918 addr.mta_phymask = phymask;
2900 2919 mutex_enter(&mpt->m_mutex);
2901 2920 ptgt = refhash_lookup(mpt->m_targets, &addr);
2902 2921 mutex_exit(&mpt->m_mutex);
2903 2922 if (ptgt == NULL) {
2904 2923 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2905 2924 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2906 2925 sas_wwn);
2907 2926 return (DDI_FAILURE);
2908 2927 }
2909 2928 if (hba_tran->tran_tgt_private == NULL) {
2910 2929 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2911 2930 KM_SLEEP);
2912 2931 tgt_private->t_lun = lun;
2913 2932 tgt_private->t_private = ptgt;
2914 2933 hba_tran->tran_tgt_private = tgt_private;
2915 2934 }
2916 2935
2917 2936 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2918 2937 return (DDI_SUCCESS);
2919 2938 }
2920 2939 mutex_enter(&mpt->m_mutex);
2921 2940
2922 2941 if (ptgt->m_deviceinfo &
2923 2942 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2924 2943 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2925 2944 uchar_t *inq89 = NULL;
2926 2945 int inq89_len = 0x238;
2927 2946 int reallen = 0;
2928 2947 int rval = 0;
2929 2948 struct sata_id *sid = NULL;
2930 2949 char model[SATA_ID_MODEL_LEN + 1];
2931 2950 char fw[SATA_ID_FW_LEN + 1];
2932 2951 char *vid, *pid;
2933 2952 int i;
2934 2953
2935 2954 mutex_exit(&mpt->m_mutex);
2936 2955 /*
2937 2956 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2938 2957 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2939 2958 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2940 2959 */
2941 2960 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2942 2961 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2943 2962 inq89, inq89_len, &reallen, 1);
2944 2963
2945 2964 if (rval != 0) {
2946 2965 if (inq89 != NULL) {
2947 2966 kmem_free(inq89, inq89_len);
2948 2967 }
2949 2968
2950 2969 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2951 2970 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2952 2971 return (DDI_SUCCESS);
2953 2972 }
2954 2973 sid = (void *)(&inq89[60]);
2955 2974
2956 2975 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2957 2976 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2958 2977
2959 2978 model[SATA_ID_MODEL_LEN] = 0;
2960 2979 fw[SATA_ID_FW_LEN] = 0;
2961 2980
2962 2981 /*
2963 2982 * split model into into vid/pid
2964 2983 */
2965 2984 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2966 2985 if ((*pid == ' ') || (*pid == '\t'))
2967 2986 break;
2968 2987 if (i < SATA_ID_MODEL_LEN) {
2969 2988 vid = model;
2970 2989 /*
2971 2990 * terminate vid, establish pid
2972 2991 */
2973 2992 *pid++ = 0;
2974 2993 } else {
2975 2994 /*
2976 2995 * vid will stay "ATA ", the rule is same
2977 2996 * as sata framework implementation.
2978 2997 */
2979 2998 vid = NULL;
2980 2999 /*
2981 3000 * model is all pid
2982 3001 */
2983 3002 pid = model;
2984 3003 }
2985 3004
2986 3005 /*
2987 3006 * override SCSA "inquiry-*" properties
2988 3007 */
2989 3008 if (vid)
2990 3009 (void) scsi_device_prop_update_inqstring(sd,
2991 3010 INQUIRY_VENDOR_ID, vid, strlen(vid));
2992 3011 if (pid)
2993 3012 (void) scsi_device_prop_update_inqstring(sd,
2994 3013 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2995 3014 (void) scsi_device_prop_update_inqstring(sd,
2996 3015 INQUIRY_REVISION_ID, fw, strlen(fw));
2997 3016
2998 3017 if (inq89 != NULL) {
2999 3018 kmem_free(inq89, inq89_len);
3000 3019 }
3001 3020 } else {
3002 3021 mutex_exit(&mpt->m_mutex);
3003 3022 }
3004 3023
3005 3024 return (DDI_SUCCESS);
3006 3025 }
3007 3026 /*
3008 3027 * tran_tgt_free(9E) - target device instance deallocation
3009 3028 */
3010 3029 static void
3011 3030 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3012 3031 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3013 3032 {
3014 3033 #ifndef __lock_lint
3015 3034 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3016 3035 #endif
3017 3036
3018 3037 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3019 3038
3020 3039 if (tgt_private != NULL) {
3021 3040 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3022 3041 hba_tran->tran_tgt_private = NULL;
3023 3042 }
3024 3043 }
3025 3044
3026 3045 /*
3027 3046 * scsi_pkt handling
3028 3047 *
3029 3048 * Visible to the external world via the transport structure.
3030 3049 */
3031 3050
3032 3051 /*
3033 3052 * Notes:
3034 3053 * - transport the command to the addressed SCSI target/lun device
3035 3054 * - normal operation is to schedule the command to be transported,
3036 3055 * and return TRAN_ACCEPT if this is successful.
3037 3056 * - if NO_INTR, tran_start must poll device for command completion
3038 3057 */
3039 3058 static int
3040 3059 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3041 3060 {
3042 3061 #ifndef __lock_lint
3043 3062 _NOTE(ARGUNUSED(ap))
3044 3063 #endif
3045 3064 mptsas_t *mpt = PKT2MPT(pkt);
3046 3065 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3047 3066 int rval;
3048 3067 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3049 3068
3050 3069 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3051 3070 ASSERT(ptgt);
3052 3071 if (ptgt == NULL)
3053 3072 return (TRAN_FATAL_ERROR);
3054 3073
3055 3074 /*
3056 3075 * prepare the pkt before taking mutex.
3057 3076 */
3058 3077 rval = mptsas_prepare_pkt(cmd);
3059 3078 if (rval != TRAN_ACCEPT) {
3060 3079 return (rval);
3061 3080 }
3062 3081
3063 3082 /*
3064 3083 * Send the command to target/lun, however your HBA requires it.
3065 3084 * If busy, return TRAN_BUSY; if there's some other formatting error
3066 3085 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3067 3086 * return of TRAN_ACCEPT.
3068 3087 *
3069 3088 * Remember that access to shared resources, including the mptsas_t
3070 3089 * data structure and the HBA hardware registers, must be protected
3071 3090 * with mutexes, here and everywhere.
3072 3091 *
3073 3092 * Also remember that at interrupt time, you'll get an argument
3074 3093 * to the interrupt handler which is a pointer to your mptsas_t
3075 3094 * structure; you'll have to remember which commands are outstanding
3076 3095 * and which scsi_pkt is the currently-running command so the
3077 3096 * interrupt handler can refer to the pkt to set completion
3078 3097 * status, call the target driver back through pkt_comp, etc.
3079 3098 *
3080 3099 * If the instance lock is held by other thread, don't spin to wait
3081 3100 * for it. Instead, queue the cmd and next time when the instance lock
3082 3101 * is not held, accept all the queued cmd. A extra tx_waitq is
3083 3102 * introduced to protect the queue.
3084 3103 *
3085 3104 * The polled cmd will not be queud and accepted as usual.
3086 3105 *
3087 3106 * Under the tx_waitq mutex, record whether a thread is draining
3088 3107 * the tx_waitq. An IO requesting thread that finds the instance
3089 3108 * mutex contended appends to the tx_waitq and while holding the
3090 3109 * tx_wait mutex, if the draining flag is not set, sets it and then
3091 3110 * proceeds to spin for the instance mutex. This scheme ensures that
3092 3111 * the last cmd in a burst be processed.
3093 3112 *
3094 3113 * we enable this feature only when the helper threads are enabled,
3095 3114 * at which we think the loads are heavy.
3096 3115 *
3097 3116 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3098 3117 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3099 3118 */
3100 3119
3101 3120 if (mpt->m_doneq_thread_n) {
3102 3121 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3103 3122 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3104 3123 mutex_exit(&mpt->m_mutex);
3105 3124 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3106 3125 mutex_enter(&mpt->m_mutex);
3107 3126 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3108 3127 mutex_exit(&mpt->m_mutex);
3109 3128 } else {
3110 3129 mutex_enter(&mpt->m_tx_waitq_mutex);
3111 3130 /*
3112 3131 * ptgt->m_dr_flag is protected by m_mutex or
3113 3132 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3114 3133 * is acquired.
3115 3134 */
3116 3135 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3117 3136 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3118 3137 /*
3119 3138 * The command should be allowed to
3120 3139 * retry by returning TRAN_BUSY to
3121 3140 * to stall the I/O's which come from
3122 3141 * scsi_vhci since the device/path is
3123 3142 * in unstable state now.
3124 3143 */
3125 3144 mutex_exit(&mpt->m_tx_waitq_mutex);
3126 3145 return (TRAN_BUSY);
3127 3146 } else {
3128 3147 /*
3129 3148 * The device is offline, just fail the
3130 3149 * command by returning
3131 3150 * TRAN_FATAL_ERROR.
3132 3151 */
3133 3152 mutex_exit(&mpt->m_tx_waitq_mutex);
3134 3153 return (TRAN_FATAL_ERROR);
3135 3154 }
3136 3155 }
3137 3156 if (mpt->m_tx_draining) {
3138 3157 cmd->cmd_flags |= CFLAG_TXQ;
3139 3158 *mpt->m_tx_waitqtail = cmd;
3140 3159 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3141 3160 mutex_exit(&mpt->m_tx_waitq_mutex);
3142 3161 } else { /* drain the queue */
3143 3162 mpt->m_tx_draining = 1;
3144 3163 mutex_exit(&mpt->m_tx_waitq_mutex);
3145 3164 mutex_enter(&mpt->m_mutex);
3146 3165 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3147 3166 mutex_exit(&mpt->m_mutex);
3148 3167 }
3149 3168 }
3150 3169 } else {
3151 3170 mutex_enter(&mpt->m_mutex);
3152 3171 /*
3153 3172 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3154 3173 * in this case, m_mutex is acquired.
3155 3174 */
3156 3175 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3157 3176 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3158 3177 /*
3159 3178 * commands should be allowed to retry by
3160 3179 * returning TRAN_BUSY to stall the I/O's
3161 3180 * which come from scsi_vhci since the device/
3162 3181 * path is in unstable state now.
3163 3182 */
3164 3183 mutex_exit(&mpt->m_mutex);
3165 3184 return (TRAN_BUSY);
3166 3185 } else {
3167 3186 /*
3168 3187 * The device is offline, just fail the
3169 3188 * command by returning TRAN_FATAL_ERROR.
3170 3189 */
3171 3190 mutex_exit(&mpt->m_mutex);
3172 3191 return (TRAN_FATAL_ERROR);
3173 3192 }
3174 3193 }
3175 3194 rval = mptsas_accept_pkt(mpt, cmd);
3176 3195 mutex_exit(&mpt->m_mutex);
3177 3196 }
3178 3197
3179 3198 return (rval);
3180 3199 }
3181 3200
3182 3201 /*
3183 3202 * Accept all the queued cmds(if any) before accept the current one.
3184 3203 */
3185 3204 static int
3186 3205 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3187 3206 {
3188 3207 int rval;
3189 3208 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3190 3209
3191 3210 ASSERT(mutex_owned(&mpt->m_mutex));
3192 3211 /*
3193 3212 * The call to mptsas_accept_tx_waitq() must always be performed
3194 3213 * because that is where mpt->m_tx_draining is cleared.
3195 3214 */
3196 3215 mutex_enter(&mpt->m_tx_waitq_mutex);
3197 3216 mptsas_accept_tx_waitq(mpt);
3198 3217 mutex_exit(&mpt->m_tx_waitq_mutex);
3199 3218 /*
3200 3219 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3201 3220 * in this case, m_mutex is acquired.
3202 3221 */
3203 3222 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3204 3223 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3205 3224 /*
3206 3225 * The command should be allowed to retry by returning
3207 3226 * TRAN_BUSY to stall the I/O's which come from
3208 3227 * scsi_vhci since the device/path is in unstable state
3209 3228 * now.
3210 3229 */
3211 3230 return (TRAN_BUSY);
3212 3231 } else {
3213 3232 /*
3214 3233 * The device is offline, just fail the command by
3215 3234 * return TRAN_FATAL_ERROR.
3216 3235 */
3217 3236 return (TRAN_FATAL_ERROR);
3218 3237 }
3219 3238 }
3220 3239 rval = mptsas_accept_pkt(mpt, cmd);
3221 3240
3222 3241 return (rval);
3223 3242 }
3224 3243
3225 3244 static int
3226 3245 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3227 3246 {
3228 3247 int rval = TRAN_ACCEPT;
3229 3248 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3230 3249
3231 3250 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3232 3251
3233 3252 ASSERT(mutex_owned(&mpt->m_mutex));
3234 3253
3235 3254 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3236 3255 rval = mptsas_prepare_pkt(cmd);
3237 3256 if (rval != TRAN_ACCEPT) {
3238 3257 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3239 3258 return (rval);
3240 3259 }
3241 3260 }
3242 3261
3243 3262 /*
3244 3263 * reset the throttle if we were draining
3245 3264 */
3246 3265 if ((ptgt->m_t_ncmds == 0) &&
3247 3266 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3248 3267 NDBG23(("reset throttle"));
3249 3268 ASSERT(ptgt->m_reset_delay == 0);
3250 3269 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3251 3270 }
3252 3271
3253 3272 /*
3254 3273 * If HBA is being reset, the DevHandles are being re-initialized,
3255 3274 * which means that they could be invalid even if the target is still
3256 3275 * attached. Check if being reset and if DevHandle is being
3257 3276 * re-initialized. If this is the case, return BUSY so the I/O can be
3258 3277 * retried later.
3259 3278 */
3260 3279 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3261 3280 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3262 3281 if (cmd->cmd_flags & CFLAG_TXQ) {
3263 3282 mptsas_doneq_add(mpt, cmd);
3264 3283 mptsas_doneq_empty(mpt);
3265 3284 return (rval);
3266 3285 } else {
3267 3286 return (TRAN_BUSY);
3268 3287 }
3269 3288 }
3270 3289
3271 3290 /*
3272 3291 * If device handle has already been invalidated, just
3273 3292 * fail the command. In theory, command from scsi_vhci
3274 3293 * client is impossible send down command with invalid
3275 3294 * devhdl since devhdl is set after path offline, target
3276 3295 * driver is not suppose to select a offlined path.
3277 3296 */
3278 3297 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3279 3298 NDBG20(("rejecting command, it might because invalid devhdl "
3280 3299 "request."));
3281 3300 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3282 3301 if (cmd->cmd_flags & CFLAG_TXQ) {
3283 3302 mptsas_doneq_add(mpt, cmd);
3284 3303 mptsas_doneq_empty(mpt);
3285 3304 return (rval);
3286 3305 } else {
3287 3306 return (TRAN_FATAL_ERROR);
3288 3307 }
3289 3308 }
3290 3309 /*
3291 3310 * The first case is the normal case. mpt gets a command from the
3292 3311 * target driver and starts it.
3293 3312 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3294 3313 * commands is m_max_requests - 2.
3295 3314 */
3296 3315 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3297 3316 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3298 3317 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3299 3318 (ptgt->m_reset_delay == 0) &&
3300 3319 (ptgt->m_t_nwait == 0) &&
3301 3320 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3302 3321 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3303 3322 (void) mptsas_start_cmd(mpt, cmd);
3304 3323 } else {
3305 3324 mptsas_waitq_add(mpt, cmd);
3306 3325 }
3307 3326 } else {
3308 3327 /*
3309 3328 * Add this pkt to the work queue
3310 3329 */
3311 3330 mptsas_waitq_add(mpt, cmd);
3312 3331
3313 3332 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3314 3333 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3315 3334
3316 3335 /*
3317 3336 * Only flush the doneq if this is not a TM
3318 3337 * cmd. For TM cmds the flushing of the
3319 3338 * doneq will be done in those routines.
3320 3339 */
3321 3340 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3322 3341 mptsas_doneq_empty(mpt);
3323 3342 }
3324 3343 }
3325 3344 }
3326 3345 return (rval);
3327 3346 }
3328 3347
3329 3348 int
3330 3349 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3331 3350 {
3332 3351 mptsas_slots_t *slots = mpt->m_active;
3333 3352 uint_t slot, start_rotor;
3334 3353 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3335 3354
3336 3355 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3337 3356
3338 3357 /*
3339 3358 * Account for reserved TM request slot and reserved SMID of 0.
3340 3359 */
3341 3360 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3342 3361
3343 3362 /*
3344 3363 * Find the next available slot, beginning at m_rotor. If no slot is
3345 3364 * available, we'll return FALSE to indicate that. This mechanism
3346 3365 * considers only the normal slots, not the reserved slot 0 nor the
3347 3366 * task management slot m_n_normal + 1. The rotor is left to point to
3348 3367 * the normal slot after the one we select, unless we select the last
3349 3368 * normal slot in which case it returns to slot 1.
3350 3369 */
3351 3370 start_rotor = slots->m_rotor;
3352 3371 do {
3353 3372 slot = slots->m_rotor++;
3354 3373 if (slots->m_rotor > slots->m_n_normal)
3355 3374 slots->m_rotor = 1;
3356 3375
3357 3376 if (slots->m_rotor == start_rotor)
3358 3377 break;
3359 3378 } while (slots->m_slot[slot] != NULL);
3360 3379
3361 3380 if (slots->m_slot[slot] != NULL)
3362 3381 return (FALSE);
3363 3382
3364 3383 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3365 3384
3366 3385 cmd->cmd_slot = slot;
3367 3386 slots->m_slot[slot] = cmd;
3368 3387 mpt->m_ncmds++;
3369 3388
3370 3389 /*
3371 3390 * only increment per target ncmds if this is not a
3372 3391 * command that has no target associated with it (i.e. a
3373 3392 * event acknoledgment)
3374 3393 */
3375 3394 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3376 3395 /*
3377 3396 * Expiration time is set in mptsas_start_cmd
3378 3397 */
3379 3398 ptgt->m_t_ncmds++;
3380 3399 cmd->cmd_active_expiration = 0;
3381 3400 } else {
3382 3401 /*
3383 3402 * Initialize expiration time for passthrough commands,
3384 3403 */
3385 3404 cmd->cmd_active_expiration = gethrtime() +
3386 3405 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3387 3406 }
3388 3407 return (TRUE);
3389 3408 }
3390 3409
3391 3410 /*
3392 3411 * prepare the pkt:
3393 3412 * the pkt may have been resubmitted or just reused so
3394 3413 * initialize some fields and do some checks.
3395 3414 */
3396 3415 static int
3397 3416 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3398 3417 {
3399 3418 struct scsi_pkt *pkt = CMD2PKT(cmd);
3400 3419
3401 3420 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3402 3421
3403 3422 /*
3404 3423 * Reinitialize some fields that need it; the packet may
3405 3424 * have been resubmitted
3406 3425 */
3407 3426 pkt->pkt_reason = CMD_CMPLT;
3408 3427 pkt->pkt_state = 0;
3409 3428 pkt->pkt_statistics = 0;
3410 3429 pkt->pkt_resid = 0;
3411 3430 cmd->cmd_age = 0;
3412 3431 cmd->cmd_pkt_flags = pkt->pkt_flags;
3413 3432
3414 3433 /*
3415 3434 * zero status byte.
3416 3435 */
3417 3436 *(pkt->pkt_scbp) = 0;
3418 3437
3419 3438 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3420 3439 pkt->pkt_resid = cmd->cmd_dmacount;
3421 3440
3422 3441 /*
3423 3442 * consistent packets need to be sync'ed first
3424 3443 * (only for data going out)
3425 3444 */
3426 3445 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3427 3446 (cmd->cmd_flags & CFLAG_DMASEND)) {
3428 3447 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3429 3448 DDI_DMA_SYNC_FORDEV);
3430 3449 }
3431 3450 }
3432 3451
3433 3452 cmd->cmd_flags =
3434 3453 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3435 3454 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3436 3455
3437 3456 return (TRAN_ACCEPT);
3438 3457 }
3439 3458
3440 3459 /*
3441 3460 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3442 3461 *
3443 3462 * One of three possibilities:
3444 3463 * - allocate scsi_pkt
3445 3464 * - allocate scsi_pkt and DMA resources
3446 3465 * - allocate DMA resources to an already-allocated pkt
3447 3466 */
3448 3467 static struct scsi_pkt *
3449 3468 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3450 3469 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3451 3470 int (*callback)(), caddr_t arg)
3452 3471 {
3453 3472 mptsas_cmd_t *cmd, *new_cmd;
3454 3473 mptsas_t *mpt = ADDR2MPT(ap);
3455 3474 int failure = 1;
3456 3475 uint_t oldcookiec;
3457 3476 mptsas_target_t *ptgt = NULL;
3458 3477 int rval;
3459 3478 mptsas_tgt_private_t *tgt_private;
3460 3479 int kf;
3461 3480
3462 3481 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3463 3482
3464 3483 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3465 3484 tran_tgt_private;
3466 3485 ASSERT(tgt_private != NULL);
3467 3486 if (tgt_private == NULL) {
3468 3487 return (NULL);
3469 3488 }
3470 3489 ptgt = tgt_private->t_private;
3471 3490 ASSERT(ptgt != NULL);
3472 3491 if (ptgt == NULL)
3473 3492 return (NULL);
3474 3493 ap->a_target = ptgt->m_devhdl;
3475 3494 ap->a_lun = tgt_private->t_lun;
3476 3495
3477 3496 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3478 3497 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3479 3498 statuslen *= 100; tgtlen *= 4;
3480 3499 #endif
3481 3500 NDBG3(("mptsas_scsi_init_pkt:\n"
3482 3501 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3483 3502 ap->a_target, (void *)pkt, (void *)bp,
3484 3503 cmdlen, statuslen, tgtlen, flags));
3485 3504
3486 3505 /*
3487 3506 * Allocate the new packet.
3488 3507 */
3489 3508 if (pkt == NULL) {
3490 3509 ddi_dma_handle_t save_dma_handle;
3491 3510 ddi_dma_handle_t save_arq_dma_handle;
3492 3511 struct buf *save_arq_bp;
3493 3512 ddi_dma_cookie_t save_arqcookie;
3494 3513
3495 3514 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3496 3515
3497 3516 if (cmd) {
3498 3517 save_dma_handle = cmd->cmd_dmahandle;
3499 3518 save_arq_dma_handle = cmd->cmd_arqhandle;
3500 3519 save_arq_bp = cmd->cmd_arq_buf;
3501 3520 save_arqcookie = cmd->cmd_arqcookie;
3502 3521 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3503 3522 cmd->cmd_dmahandle = save_dma_handle;
3504 3523 cmd->cmd_arqhandle = save_arq_dma_handle;
3505 3524 cmd->cmd_arq_buf = save_arq_bp;
3506 3525 cmd->cmd_arqcookie = save_arqcookie;
3507 3526
3508 3527 pkt = (void *)((uchar_t *)cmd +
3509 3528 sizeof (struct mptsas_cmd));
3510 3529 pkt->pkt_ha_private = (opaque_t)cmd;
3511 3530 pkt->pkt_address = *ap;
3512 3531 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3513 3532 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3514 3533 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3515 3534 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3516 3535 cmd->cmd_cdblen = (uchar_t)cmdlen;
3517 3536 cmd->cmd_scblen = statuslen;
3518 3537 cmd->cmd_rqslen = SENSE_LENGTH;
3519 3538 cmd->cmd_tgt_addr = ptgt;
3520 3539 failure = 0;
3521 3540 }
3522 3541
3523 3542 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3524 3543 (tgtlen > PKT_PRIV_LEN) ||
3525 3544 (statuslen > EXTCMDS_STATUS_SIZE)) {
3526 3545 if (failure == 0) {
3527 3546 /*
3528 3547 * if extern alloc fails, all will be
3529 3548 * deallocated, including cmd
3530 3549 */
3531 3550 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3532 3551 cmdlen, tgtlen, statuslen, kf);
3533 3552 }
3534 3553 if (failure) {
3535 3554 /*
3536 3555 * if extern allocation fails, it will
3537 3556 * deallocate the new pkt as well
3538 3557 */
3539 3558 return (NULL);
3540 3559 }
3541 3560 }
3542 3561 new_cmd = cmd;
3543 3562
3544 3563 } else {
3545 3564 cmd = PKT2CMD(pkt);
3546 3565 new_cmd = NULL;
3547 3566 }
3548 3567
3549 3568
3550 3569 /* grab cmd->cmd_cookiec here as oldcookiec */
3551 3570
3552 3571 oldcookiec = cmd->cmd_cookiec;
3553 3572
3554 3573 /*
3555 3574 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3556 3575 * greater than 0 and we'll need to grab the next dma window
3557 3576 */
3558 3577 /*
3559 3578 * SLM-not doing extra command frame right now; may add later
3560 3579 */
3561 3580
3562 3581 if (cmd->cmd_nwin > 0) {
3563 3582
3564 3583 /*
3565 3584 * Make sure we havn't gone past the the total number
3566 3585 * of windows
3567 3586 */
3568 3587 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3569 3588 return (NULL);
3570 3589 }
3571 3590 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3572 3591 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3573 3592 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3574 3593 return (NULL);
3575 3594 }
3576 3595 goto get_dma_cookies;
3577 3596 }
3578 3597
3579 3598
3580 3599 if (flags & PKT_XARQ) {
3581 3600 cmd->cmd_flags |= CFLAG_XARQ;
3582 3601 }
3583 3602
3584 3603 /*
3585 3604 * DMA resource allocation. This version assumes your
3586 3605 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3587 3606 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3588 3607 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3589 3608 */
3590 3609 if (bp && (bp->b_bcount != 0) &&
3591 3610 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3592 3611
3593 3612 int cnt, dma_flags;
3594 3613 mptti_t *dmap; /* ptr to the S/G list */
3595 3614
3596 3615 /*
3597 3616 * Set up DMA memory and position to the next DMA segment.
3598 3617 */
3599 3618 ASSERT(cmd->cmd_dmahandle != NULL);
3600 3619
3601 3620 if (bp->b_flags & B_READ) {
3602 3621 dma_flags = DDI_DMA_READ;
3603 3622 cmd->cmd_flags &= ~CFLAG_DMASEND;
3604 3623 } else {
3605 3624 dma_flags = DDI_DMA_WRITE;
3606 3625 cmd->cmd_flags |= CFLAG_DMASEND;
3607 3626 }
3608 3627 if (flags & PKT_CONSISTENT) {
3609 3628 cmd->cmd_flags |= CFLAG_CMDIOPB;
3610 3629 dma_flags |= DDI_DMA_CONSISTENT;
3611 3630 }
3612 3631
3613 3632 if (flags & PKT_DMA_PARTIAL) {
3614 3633 dma_flags |= DDI_DMA_PARTIAL;
3615 3634 }
3616 3635
3617 3636 /*
3618 3637 * workaround for byte hole issue on psycho and
3619 3638 * schizo pre 2.1
3620 3639 */
3621 3640 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3622 3641 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3623 3642 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3624 3643 dma_flags |= DDI_DMA_CONSISTENT;
3625 3644 }
3626 3645
3627 3646 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3628 3647 dma_flags, callback, arg,
3629 3648 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3630 3649 if (rval == DDI_DMA_PARTIAL_MAP) {
3631 3650 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3632 3651 &cmd->cmd_nwin);
3633 3652 cmd->cmd_winindex = 0;
3634 3653 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3635 3654 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3636 3655 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3637 3656 &cmd->cmd_cookiec);
3638 3657 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3639 3658 switch (rval) {
3640 3659 case DDI_DMA_NORESOURCES:
3641 3660 bioerror(bp, 0);
3642 3661 break;
3643 3662 case DDI_DMA_BADATTR:
3644 3663 case DDI_DMA_NOMAPPING:
3645 3664 bioerror(bp, EFAULT);
3646 3665 break;
3647 3666 case DDI_DMA_TOOBIG:
3648 3667 default:
3649 3668 bioerror(bp, EINVAL);
3650 3669 break;
3651 3670 }
3652 3671 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3653 3672 if (new_cmd) {
3654 3673 mptsas_scsi_destroy_pkt(ap, pkt);
3655 3674 }
3656 3675 return ((struct scsi_pkt *)NULL);
3657 3676 }
3658 3677
3659 3678 get_dma_cookies:
3660 3679 cmd->cmd_flags |= CFLAG_DMAVALID;
3661 3680 ASSERT(cmd->cmd_cookiec > 0);
3662 3681
3663 3682 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3664 3683 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3665 3684 cmd->cmd_cookiec);
3666 3685 bioerror(bp, EINVAL);
3667 3686 if (new_cmd) {
3668 3687 mptsas_scsi_destroy_pkt(ap, pkt);
3669 3688 }
3670 3689 return ((struct scsi_pkt *)NULL);
3671 3690 }
3672 3691
3673 3692 /*
3674 3693 * Allocate extra SGL buffer if needed.
3675 3694 */
3676 3695 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3677 3696 (cmd->cmd_extra_frames == NULL)) {
3678 3697 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3679 3698 DDI_FAILURE) {
3680 3699 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3681 3700 "failed");
3682 3701 bioerror(bp, ENOMEM);
3683 3702 if (new_cmd) {
3684 3703 mptsas_scsi_destroy_pkt(ap, pkt);
3685 3704 }
3686 3705 return ((struct scsi_pkt *)NULL);
3687 3706 }
3688 3707 }
3689 3708
3690 3709 /*
3691 3710 * Always use scatter-gather transfer
3692 3711 * Use the loop below to store physical addresses of
3693 3712 * DMA segments, from the DMA cookies, into your HBA's
3694 3713 * scatter-gather list.
3695 3714 * We need to ensure we have enough kmem alloc'd
3696 3715 * for the sg entries since we are no longer using an
3697 3716 * array inside mptsas_cmd_t.
3698 3717 *
3699 3718 * We check cmd->cmd_cookiec against oldcookiec so
3700 3719 * the scatter-gather list is correctly allocated
3701 3720 */
3702 3721
3703 3722 if (oldcookiec != cmd->cmd_cookiec) {
3704 3723 if (cmd->cmd_sg != (mptti_t *)NULL) {
3705 3724 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3706 3725 oldcookiec);
3707 3726 cmd->cmd_sg = NULL;
3708 3727 }
3709 3728 }
3710 3729
3711 3730 if (cmd->cmd_sg == (mptti_t *)NULL) {
3712 3731 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3713 3732 cmd->cmd_cookiec), kf);
3714 3733
3715 3734 if (cmd->cmd_sg == (mptti_t *)NULL) {
3716 3735 mptsas_log(mpt, CE_WARN,
3717 3736 "unable to kmem_alloc enough memory "
3718 3737 "for scatter/gather list");
3719 3738 /*
3720 3739 * if we have an ENOMEM condition we need to behave
3721 3740 * the same way as the rest of this routine
3722 3741 */
3723 3742
3724 3743 bioerror(bp, ENOMEM);
3725 3744 if (new_cmd) {
3726 3745 mptsas_scsi_destroy_pkt(ap, pkt);
3727 3746 }
3728 3747 return ((struct scsi_pkt *)NULL);
3729 3748 }
3730 3749 }
3731 3750
3732 3751 dmap = cmd->cmd_sg;
3733 3752
3734 3753 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3735 3754
3736 3755 /*
3737 3756 * store the first segment into the S/G list
3738 3757 */
3739 3758 dmap->count = cmd->cmd_cookie.dmac_size;
3740 3759 dmap->addr.address64.Low = (uint32_t)
3741 3760 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3742 3761 dmap->addr.address64.High = (uint32_t)
3743 3762 (cmd->cmd_cookie.dmac_laddress >> 32);
3744 3763
3745 3764 /*
3746 3765 * dmacount counts the size of the dma for this window
3747 3766 * (if partial dma is being used). totaldmacount
3748 3767 * keeps track of the total amount of dma we have
3749 3768 * transferred for all the windows (needed to calculate
3750 3769 * the resid value below).
3751 3770 */
3752 3771 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3753 3772 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3754 3773
3755 3774 /*
3756 3775 * We already stored the first DMA scatter gather segment,
3757 3776 * start at 1 if we need to store more.
3758 3777 */
3759 3778 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3760 3779 /*
3761 3780 * Get next DMA cookie
3762 3781 */
3763 3782 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3764 3783 &cmd->cmd_cookie);
3765 3784 dmap++;
3766 3785
3767 3786 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3768 3787 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3769 3788
3770 3789 /*
3771 3790 * store the segment parms into the S/G list
3772 3791 */
3773 3792 dmap->count = cmd->cmd_cookie.dmac_size;
3774 3793 dmap->addr.address64.Low = (uint32_t)
3775 3794 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3776 3795 dmap->addr.address64.High = (uint32_t)
3777 3796 (cmd->cmd_cookie.dmac_laddress >> 32);
3778 3797 }
3779 3798
3780 3799 /*
3781 3800 * If this was partially allocated we set the resid
3782 3801 * the amount of data NOT transferred in this window
3783 3802 * If there is only one window, the resid will be 0
3784 3803 */
3785 3804 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3786 3805 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3787 3806 }
3788 3807 return (pkt);
3789 3808 }
3790 3809
3791 3810 /*
3792 3811 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3793 3812 *
3794 3813 * Notes:
3795 3814 * - also frees DMA resources if allocated
3796 3815 * - implicit DMA synchonization
3797 3816 */
3798 3817 static void
3799 3818 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3800 3819 {
3801 3820 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3802 3821 mptsas_t *mpt = ADDR2MPT(ap);
3803 3822
3804 3823 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3805 3824 ap->a_target, (void *)pkt));
3806 3825
3807 3826 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3808 3827 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3809 3828 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3810 3829 }
3811 3830
3812 3831 if (cmd->cmd_sg) {
3813 3832 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3814 3833 cmd->cmd_sg = NULL;
3815 3834 }
3816 3835
3817 3836 mptsas_free_extra_sgl_frame(mpt, cmd);
3818 3837
3819 3838 if ((cmd->cmd_flags &
3820 3839 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3821 3840 CFLAG_SCBEXTERN)) == 0) {
3822 3841 cmd->cmd_flags = CFLAG_FREE;
3823 3842 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3824 3843 } else {
3825 3844 mptsas_pkt_destroy_extern(mpt, cmd);
3826 3845 }
3827 3846 }
3828 3847
3829 3848 /*
3830 3849 * kmem cache constructor and destructor:
3831 3850 * When constructing, we bzero the cmd and allocate the dma handle
3832 3851 * When destructing, just free the dma handle
3833 3852 */
3834 3853 static int
3835 3854 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3836 3855 {
3837 3856 mptsas_cmd_t *cmd = buf;
3838 3857 mptsas_t *mpt = cdrarg;
3839 3858 struct scsi_address ap;
3840 3859 uint_t cookiec;
3841 3860 ddi_dma_attr_t arq_dma_attr;
3842 3861 int (*callback)(caddr_t);
3843 3862
3844 3863 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3845 3864
3846 3865 NDBG4(("mptsas_kmem_cache_constructor"));
3847 3866
3848 3867 ap.a_hba_tran = mpt->m_tran;
3849 3868 ap.a_target = 0;
3850 3869 ap.a_lun = 0;
3851 3870
3852 3871 /*
3853 3872 * allocate a dma handle
3854 3873 */
3855 3874 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3856 3875 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3857 3876 cmd->cmd_dmahandle = NULL;
3858 3877 return (-1);
3859 3878 }
3860 3879
3861 3880 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3862 3881 SENSE_LENGTH, B_READ, callback, NULL);
3863 3882 if (cmd->cmd_arq_buf == NULL) {
3864 3883 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3865 3884 cmd->cmd_dmahandle = NULL;
3866 3885 return (-1);
3867 3886 }
3868 3887
3869 3888 /*
3870 3889 * allocate a arq handle
3871 3890 */
3872 3891 arq_dma_attr = mpt->m_msg_dma_attr;
3873 3892 arq_dma_attr.dma_attr_sgllen = 1;
3874 3893 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3875 3894 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3876 3895 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3877 3896 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3878 3897 cmd->cmd_dmahandle = NULL;
3879 3898 cmd->cmd_arqhandle = NULL;
3880 3899 return (-1);
3881 3900 }
3882 3901
3883 3902 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3884 3903 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3885 3904 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3886 3905 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3887 3906 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3888 3907 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3889 3908 cmd->cmd_dmahandle = NULL;
3890 3909 cmd->cmd_arqhandle = NULL;
3891 3910 cmd->cmd_arq_buf = NULL;
3892 3911 return (-1);
3893 3912 }
3894 3913
3895 3914 return (0);
3896 3915 }
3897 3916
3898 3917 static void
3899 3918 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3900 3919 {
3901 3920 #ifndef __lock_lint
3902 3921 _NOTE(ARGUNUSED(cdrarg))
3903 3922 #endif
3904 3923 mptsas_cmd_t *cmd = buf;
3905 3924
3906 3925 NDBG4(("mptsas_kmem_cache_destructor"));
3907 3926
3908 3927 if (cmd->cmd_arqhandle) {
3909 3928 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3910 3929 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3911 3930 cmd->cmd_arqhandle = NULL;
3912 3931 }
3913 3932 if (cmd->cmd_arq_buf) {
3914 3933 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3915 3934 cmd->cmd_arq_buf = NULL;
3916 3935 }
3917 3936 if (cmd->cmd_dmahandle) {
3918 3937 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3919 3938 cmd->cmd_dmahandle = NULL;
3920 3939 }
3921 3940 }
3922 3941
3923 3942 static int
3924 3943 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3925 3944 {
3926 3945 mptsas_cache_frames_t *p = buf;
3927 3946 mptsas_t *mpt = cdrarg;
3928 3947 ddi_dma_attr_t frame_dma_attr;
3929 3948 size_t mem_size, alloc_len;
3930 3949 ddi_dma_cookie_t cookie;
3931 3950 uint_t ncookie;
3932 3951 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3933 3952 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3934 3953
3935 3954 frame_dma_attr = mpt->m_msg_dma_attr;
3936 3955 frame_dma_attr.dma_attr_align = 0x10;
3937 3956 frame_dma_attr.dma_attr_sgllen = 1;
3938 3957
3939 3958 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3940 3959 &p->m_dma_hdl) != DDI_SUCCESS) {
3941 3960 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3942 3961 " extra SGL.");
3943 3962 return (DDI_FAILURE);
3944 3963 }
3945 3964
3946 3965 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3947 3966
3948 3967 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3949 3968 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3950 3969 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3951 3970 ddi_dma_free_handle(&p->m_dma_hdl);
3952 3971 p->m_dma_hdl = NULL;
3953 3972 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3954 3973 " extra SGL.");
3955 3974 return (DDI_FAILURE);
3956 3975 }
3957 3976
3958 3977 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3959 3978 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3960 3979 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3961 3980 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3962 3981 ddi_dma_free_handle(&p->m_dma_hdl);
3963 3982 p->m_dma_hdl = NULL;
|
↓ open down ↓ |
1713 lines elided |
↑ open up ↑ |
3964 3983 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3965 3984 " extra SGL");
3966 3985 return (DDI_FAILURE);
3967 3986 }
3968 3987
3969 3988 /*
3970 3989 * Store the SGL memory address. This chip uses this
3971 3990 * address to dma to and from the driver. The second
3972 3991 * address is the address mpt uses to fill in the SGL.
3973 3992 */
3974 - p->m_phys_addr = cookie.dmac_address;
3993 + p->m_phys_addr = cookie.dmac_laddress;
3975 3994
3976 3995 return (DDI_SUCCESS);
3977 3996 }
3978 3997
3979 3998 static void
3980 3999 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3981 4000 {
3982 4001 #ifndef __lock_lint
3983 4002 _NOTE(ARGUNUSED(cdrarg))
3984 4003 #endif
3985 4004 mptsas_cache_frames_t *p = buf;
3986 4005 if (p->m_dma_hdl != NULL) {
3987 4006 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3988 4007 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3989 4008 ddi_dma_free_handle(&p->m_dma_hdl);
3990 4009 p->m_phys_addr = NULL;
3991 4010 p->m_frames_addr = NULL;
3992 4011 p->m_dma_hdl = NULL;
3993 4012 p->m_acc_hdl = NULL;
3994 4013 }
3995 4014
3996 4015 }
3997 4016
3998 4017 /*
3999 4018 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4000 4019 * for non-standard length cdb, pkt_private, status areas
4001 4020 * if allocation fails, then deallocate all external space and the pkt
4002 4021 */
4003 4022 /* ARGSUSED */
4004 4023 static int
4005 4024 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4006 4025 int cmdlen, int tgtlen, int statuslen, int kf)
4007 4026 {
4008 4027 caddr_t cdbp, scbp, tgt;
4009 4028 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
4010 4029 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
4011 4030 struct scsi_address ap;
4012 4031 size_t senselength;
4013 4032 ddi_dma_attr_t ext_arq_dma_attr;
4014 4033 uint_t cookiec;
4015 4034
4016 4035 NDBG3(("mptsas_pkt_alloc_extern: "
4017 4036 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4018 4037 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4019 4038
4020 4039 tgt = cdbp = scbp = NULL;
4021 4040 cmd->cmd_scblen = statuslen;
4022 4041 cmd->cmd_privlen = (uchar_t)tgtlen;
4023 4042
4024 4043 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4025 4044 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4026 4045 goto fail;
4027 4046 }
4028 4047 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4029 4048 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4030 4049 }
4031 4050 if (tgtlen > PKT_PRIV_LEN) {
4032 4051 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4033 4052 goto fail;
4034 4053 }
4035 4054 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4036 4055 cmd->cmd_pkt->pkt_private = tgt;
4037 4056 }
4038 4057 if (statuslen > EXTCMDS_STATUS_SIZE) {
4039 4058 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4040 4059 goto fail;
4041 4060 }
4042 4061 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4043 4062 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4044 4063
4045 4064 /* allocate sense data buf for DMA */
4046 4065
4047 4066 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4048 4067 struct scsi_arq_status, sts_sensedata);
4049 4068 cmd->cmd_rqslen = (uchar_t)senselength;
4050 4069
4051 4070 ap.a_hba_tran = mpt->m_tran;
4052 4071 ap.a_target = 0;
4053 4072 ap.a_lun = 0;
4054 4073
4055 4074 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4056 4075 (struct buf *)NULL, senselength, B_READ,
4057 4076 callback, NULL);
4058 4077
4059 4078 if (cmd->cmd_ext_arq_buf == NULL) {
4060 4079 goto fail;
4061 4080 }
4062 4081 /*
4063 4082 * allocate a extern arq handle and bind the buf
4064 4083 */
4065 4084 ext_arq_dma_attr = mpt->m_msg_dma_attr;
4066 4085 ext_arq_dma_attr.dma_attr_sgllen = 1;
4067 4086 if ((ddi_dma_alloc_handle(mpt->m_dip,
4068 4087 &ext_arq_dma_attr, callback,
4069 4088 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4070 4089 goto fail;
4071 4090 }
4072 4091
4073 4092 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4074 4093 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4075 4094 callback, NULL, &cmd->cmd_ext_arqcookie,
4076 4095 &cookiec)
4077 4096 != DDI_SUCCESS) {
4078 4097 goto fail;
4079 4098 }
4080 4099 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4081 4100 }
4082 4101 return (0);
4083 4102 fail:
4084 4103 mptsas_pkt_destroy_extern(mpt, cmd);
4085 4104 return (1);
4086 4105 }
4087 4106
4088 4107 /*
4089 4108 * deallocate external pkt space and deallocate the pkt
4090 4109 */
4091 4110 static void
4092 4111 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4093 4112 {
4094 4113 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4095 4114
4096 4115 if (cmd->cmd_flags & CFLAG_FREE) {
4097 4116 mptsas_log(mpt, CE_PANIC,
4098 4117 "mptsas_pkt_destroy_extern: freeing free packet");
4099 4118 _NOTE(NOT_REACHED)
4100 4119 /* NOTREACHED */
4101 4120 }
4102 4121 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4103 4122 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4104 4123 }
4105 4124 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4106 4125 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4107 4126 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4108 4127 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4109 4128 }
4110 4129 if (cmd->cmd_ext_arqhandle) {
4111 4130 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4112 4131 cmd->cmd_ext_arqhandle = NULL;
4113 4132 }
4114 4133 if (cmd->cmd_ext_arq_buf)
4115 4134 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4116 4135 }
4117 4136 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4118 4137 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4119 4138 }
4120 4139 cmd->cmd_flags = CFLAG_FREE;
4121 4140 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4122 4141 }
4123 4142
4124 4143 /*
4125 4144 * tran_sync_pkt(9E) - explicit DMA synchronization
4126 4145 */
4127 4146 /*ARGSUSED*/
4128 4147 static void
4129 4148 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4130 4149 {
4131 4150 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4132 4151
4133 4152 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4134 4153 ap->a_target, (void *)pkt));
4135 4154
4136 4155 if (cmd->cmd_dmahandle) {
4137 4156 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4138 4157 (cmd->cmd_flags & CFLAG_DMASEND) ?
4139 4158 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4140 4159 }
4141 4160 }
4142 4161
4143 4162 /*
4144 4163 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4145 4164 */
4146 4165 /*ARGSUSED*/
4147 4166 static void
4148 4167 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4149 4168 {
4150 4169 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4151 4170 mptsas_t *mpt = ADDR2MPT(ap);
4152 4171
4153 4172 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4154 4173 ap->a_target, (void *)pkt));
4155 4174
4156 4175 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4157 4176 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4158 4177 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4159 4178 }
4160 4179
4161 4180 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4162 4181 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4163 4182 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4164 4183 }
4165 4184
4166 4185 mptsas_free_extra_sgl_frame(mpt, cmd);
4167 4186 }
4168 4187
4169 4188 static void
4170 4189 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
|
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
4171 4190 {
4172 4191 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4173 4192 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4174 4193 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4175 4194 DDI_DMA_SYNC_FORCPU);
4176 4195 }
4177 4196 (*pkt->pkt_comp)(pkt);
4178 4197 }
4179 4198
4180 4199 static void
4181 -mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4182 - pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4200 +mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4201 + ddi_acc_handle_t acc_hdl, uint_t cookiec,
4202 + uint32_t end_flags)
4183 4203 {
4184 - uint_t cookiec;
4204 + pMpi2SGESimple64_t sge;
4185 4205 mptti_t *dmap;
4186 4206 uint32_t flags;
4207 +
4208 + dmap = cmd->cmd_sg;
4209 +
4210 + sge = (pMpi2SGESimple64_t)(&frame->SGL);
4211 + while (cookiec--) {
4212 + ddi_put32(acc_hdl, &sge->Address.Low,
4213 + dmap->addr.address64.Low);
4214 + ddi_put32(acc_hdl, &sge->Address.High,
4215 + dmap->addr.address64.High);
4216 + ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4217 + flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4218 + flags |= ((uint32_t)
4219 + (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4220 + MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4221 + MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4222 + MPI2_SGE_FLAGS_SHIFT);
4223 +
4224 + /*
4225 + * If this is the last cookie, we set the flags
4226 + * to indicate so
4227 + */
4228 + if (cookiec == 0) {
4229 + flags |= end_flags;
4230 + }
4231 + if (cmd->cmd_flags & CFLAG_DMASEND) {
4232 + flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4233 + MPI2_SGE_FLAGS_SHIFT);
4234 + } else {
4235 + flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4236 + MPI2_SGE_FLAGS_SHIFT);
4237 + }
4238 + ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4239 + dmap++;
4240 + sge++;
4241 + }
4242 +}
4243 +
4244 +static void
4245 +mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4246 + pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4247 +{
4187 4248 pMpi2SGESimple64_t sge;
4188 4249 pMpi2SGEChain64_t sgechain;
4189 - ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4250 + uint64_t nframe_phys_addr;
4251 + uint_t cookiec;
4252 + mptti_t *dmap;
4253 + uint32_t flags;
4254 + int i, j, k, l, frames, sgemax;
4255 + int temp, maxframe_sges;
4256 + uint8_t chainflags;
4257 + uint16_t chainlength;
4258 + mptsas_cache_frames_t *p;
4190 4259
4260 + cookiec = cmd->cmd_cookiec;
4261 +
4191 4262 /*
4192 - * Save the number of entries in the DMA
4193 - * Scatter/Gather list
4263 + * Hereby we start to deal with multiple frames.
4264 + * The process is as follows:
4265 + * 1. Determine how many frames are needed for SGL element
4266 + * storage; Note that all frames are stored in contiguous
4267 + * memory space and in 64-bit DMA mode each element is
4268 + * 3 double-words (12 bytes) long.
4269 + * 2. Fill up the main frame. We need to do this separately
4270 + * since it contains the SCSI IO request header and needs
4271 + * dedicated processing. Note that the last 4 double-words
4272 + * of the SCSI IO header is for SGL element storage
4273 + * (MPI2_SGE_IO_UNION).
4274 + * 3. Fill the chain element in the main frame, so the DMA
4275 + * engine can use the following frames.
4276 + * 4. Enter a loop to fill the remaining frames. Note that the
4277 + * last frame contains no chain element. The remaining
4278 + * frames go into the mpt SGL buffer allocated on the fly,
4279 + * not immediately following the main message frame, as in
4280 + * Gen1.
4281 + * Some restrictions:
4282 + * 1. For 64-bit DMA, the simple element and chain element
4283 + * are both of 3 double-words (12 bytes) in size, even
4284 + * though all frames are stored in the first 4G of mem
4285 + * range and the higher 32-bits of the address are always 0.
4286 + * 2. On some controllers (like the 1064/1068), a frame can
4287 + * hold SGL elements with the last 1 or 2 double-words
4288 + * (4 or 8 bytes) un-used. On these controllers, we should
4289 + * recognize that there's not enough room for another SGL
4290 + * element and move the sge pointer to the next frame.
4194 4291 */
4195 - cookiec = cmd->cmd_cookiec;
4196 4292
4197 - NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4293 + /*
4294 + * Sgemax is the number of SGE's that will fit
4295 + * each extra frame and frames is total
4296 + * number of frames we'll need. 1 sge entry per
4297 + * frame is reseverd for the chain element thus the -1 below.
4298 + */
4299 + sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64)) - 1);
4300 + maxframe_sges = MPTSAS_MAX_FRAME_SGES64(mpt);
4301 + temp = (cookiec - (maxframe_sges - 1)) / sgemax;
4198 4302
4199 4303 /*
4200 - * Set read/write bit in control.
4304 + * A little check to see if we need to round up the number
4305 + * of frames we need
4201 4306 */
4202 - if (cmd->cmd_flags & CFLAG_DMASEND) {
4203 - *control |= MPI2_SCSIIO_CONTROL_WRITE;
4307 + if ((cookiec - (maxframe_sges - 1)) - (temp * sgemax) > 1) {
4308 + frames = (temp + 1);
4204 4309 } else {
4205 - *control |= MPI2_SCSIIO_CONTROL_READ;
4310 + frames = temp;
4206 4311 }
4312 + dmap = cmd->cmd_sg;
4313 + sge = (pMpi2SGESimple64_t)(&frame->SGL);
4207 4314
4208 - ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4315 + /*
4316 + * First fill in the main frame
4317 + */
4318 + j = maxframe_sges - 1;
4319 + mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4320 + ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4321 + MPI2_SGE_FLAGS_SHIFT));
4322 + dmap += j;
4323 + sge += j;
4324 + j++;
4209 4325
4210 4326 /*
4211 - * We have 2 cases here. First where we can fit all the
4212 - * SG elements into the main frame, and the case
4213 - * where we can't.
4214 - * If we have more cookies than we can attach to a frame
4215 - * we will need to use a chain element to point
4216 - * a location of memory where the rest of the S/G
4217 - * elements reside.
4327 + * Fill in the chain element in the main frame.
4328 + * About calculation on ChainOffset:
4329 + * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4330 + * in the end reserved for SGL element storage
4331 + * (MPI2_SGE_IO_UNION); we should count it in our
4332 + * calculation. See its definition in the header file.
4333 + * 2. Constant j is the counter of the current SGL element
4334 + * that will be processed, and (j - 1) is the number of
4335 + * SGL elements that have been processed (stored in the
4336 + * main frame).
4337 + * 3. ChainOffset value should be in units of double-words (4
4338 + * bytes) so the last value should be divided by 4.
4218 4339 */
4219 - if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4220 - dmap = cmd->cmd_sg;
4221 - sge = (pMpi2SGESimple64_t)(&frame->SGL);
4222 - while (cookiec--) {
4223 - ddi_put32(acc_hdl,
4224 - &sge->Address.Low, dmap->addr.address64.Low);
4225 - ddi_put32(acc_hdl,
4226 - &sge->Address.High, dmap->addr.address64.High);
4227 - ddi_put32(acc_hdl, &sge->FlagsLength,
4228 - dmap->count);
4229 - flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4230 - flags |= ((uint32_t)
4231 - (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4232 - MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4233 - MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4234 - MPI2_SGE_FLAGS_SHIFT);
4340 + ddi_put8(acc_hdl, &frame->ChainOffset,
4341 + (sizeof (MPI2_SCSI_IO_REQUEST) -
4342 + sizeof (MPI2_SGE_IO_UNION) +
4343 + (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4344 + sgechain = (pMpi2SGEChain64_t)sge;
4345 + chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4346 + MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4347 + MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4348 + ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4235 4349
4350 + /*
4351 + * The size of the next frame is the accurate size of space
4352 + * (in bytes) used to store the SGL elements. j is the counter
4353 + * of SGL elements. (j - 1) is the number of SGL elements that
4354 + * have been processed (stored in frames).
4355 + */
4356 + if (frames >= 2) {
4357 + chainlength = mpt->m_req_frame_size /
4358 + sizeof (MPI2_SGE_SIMPLE64) *
4359 + sizeof (MPI2_SGE_SIMPLE64);
4360 + } else {
4361 + chainlength = ((cookiec - (j - 1)) *
4362 + sizeof (MPI2_SGE_SIMPLE64));
4363 + }
4364 +
4365 + p = cmd->cmd_extra_frames;
4366 +
4367 + ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4368 + ddi_put32(acc_hdl, &sgechain->Address.Low,
4369 + (p->m_phys_addr&0xffffffffull));
4370 + ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr>>32);
4371 +
4372 + /*
4373 + * If there are more than 2 frames left we have to
4374 + * fill in the next chain offset to the location of
4375 + * the chain element in the next frame.
4376 + * sgemax is the number of simple elements in an extra
4377 + * frame. Note that the value NextChainOffset should be
4378 + * in double-words (4 bytes).
4379 + */
4380 + if (frames >= 2) {
4381 + ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4382 + (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4383 + } else {
4384 + ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4385 + }
4386 +
4387 + /*
4388 + * Jump to next frame;
4389 + * Starting here, chain buffers go into the per command SGL.
4390 + * This buffer is allocated when chain buffers are needed.
4391 + */
4392 + sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4393 + i = cookiec;
4394 +
4395 + /*
4396 + * Start filling in frames with SGE's. If we
4397 + * reach the end of frame and still have SGE's
4398 + * to fill we need to add a chain element and
4399 + * use another frame. j will be our counter
4400 + * for what cookie we are at and i will be
4401 + * the total cookiec. k is the current frame
4402 + */
4403 + for (k = 1; k <= frames; k++) {
4404 + for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4405 +
4236 4406 /*
4237 - * If this is the last cookie, we set the flags
4238 - * to indicate so
4407 + * If we have reached the end of frame
4408 + * and we have more SGE's to fill in
4409 + * we have to fill the final entry
4410 + * with a chain element and then
4411 + * continue to the next frame
4239 4412 */
4240 - if (cookiec == 0) {
4241 - flags |=
4242 - ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4243 - | MPI2_SGE_FLAGS_END_OF_BUFFER
4244 - | MPI2_SGE_FLAGS_END_OF_LIST) <<
4245 - MPI2_SGE_FLAGS_SHIFT);
4246 - }
4247 - if (cmd->cmd_flags & CFLAG_DMASEND) {
4248 - flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4249 - MPI2_SGE_FLAGS_SHIFT);
4250 - } else {
4251 - flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4252 - MPI2_SGE_FLAGS_SHIFT);
4253 - }
4254 - ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4255 - dmap++;
4256 - sge++;
4257 - }
4258 - } else {
4259 - /*
4260 - * Hereby we start to deal with multiple frames.
4261 - * The process is as follows:
4262 - * 1. Determine how many frames are needed for SGL element
4263 - * storage; Note that all frames are stored in contiguous
4264 - * memory space and in 64-bit DMA mode each element is
4265 - * 3 double-words (12 bytes) long.
4266 - * 2. Fill up the main frame. We need to do this separately
4267 - * since it contains the SCSI IO request header and needs
4268 - * dedicated processing. Note that the last 4 double-words
4269 - * of the SCSI IO header is for SGL element storage
4270 - * (MPI2_SGE_IO_UNION).
4271 - * 3. Fill the chain element in the main frame, so the DMA
4272 - * engine can use the following frames.
4273 - * 4. Enter a loop to fill the remaining frames. Note that the
4274 - * last frame contains no chain element. The remaining
4275 - * frames go into the mpt SGL buffer allocated on the fly,
4276 - * not immediately following the main message frame, as in
4277 - * Gen1.
4278 - * Some restrictions:
4279 - * 1. For 64-bit DMA, the simple element and chain element
4280 - * are both of 3 double-words (12 bytes) in size, even
4281 - * though all frames are stored in the first 4G of mem
4282 - * range and the higher 32-bits of the address are always 0.
4283 - * 2. On some controllers (like the 1064/1068), a frame can
4284 - * hold SGL elements with the last 1 or 2 double-words
4285 - * (4 or 8 bytes) un-used. On these controllers, we should
4286 - * recognize that there's not enough room for another SGL
4287 - * element and move the sge pointer to the next frame.
4288 - */
4289 - int i, j, k, l, frames, sgemax;
4290 - int temp;
4291 - uint8_t chainflags;
4292 - uint16_t chainlength;
4293 - mptsas_cache_frames_t *p;
4413 + if ((l == (sgemax + 1)) && (k != frames)) {
4414 + sgechain = (pMpi2SGEChain64_t)sge;
4415 + j--;
4416 + chainflags = (
4417 + MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4418 + MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4419 + MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4420 + ddi_put8(p->m_acc_hdl,
4421 + &sgechain->Flags, chainflags);
4422 + /*
4423 + * k is the frame counter and (k + 1)
4424 + * is the number of the next frame.
4425 + * Note that frames are in contiguous
4426 + * memory space.
4427 + */
4428 + nframe_phys_addr = p->m_phys_addr +
4429 + (mpt->m_req_frame_size * k);
4430 + ddi_put32(p->m_acc_hdl,
4431 + &sgechain->Address.Low,
4432 + nframe_phys_addr&0xffffffffull);
4433 + ddi_put32(p->m_acc_hdl,
4434 + &sgechain->Address.High,
4435 + nframe_phys_addr>>32);
4294 4436
4295 - /*
4296 - * Sgemax is the number of SGE's that will fit
4297 - * each extra frame and frames is total
4298 - * number of frames we'll need. 1 sge entry per
4299 - * frame is reseverd for the chain element thus the -1 below.
4300 - */
4301 - sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4302 - - 1);
4303 - temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4437 + /*
4438 + * If there are more than 2 frames left
4439 + * we have to next chain offset to
4440 + * the location of the chain element
4441 + * in the next frame and fill in the
4442 + * length of the next chain
4443 + */
4444 + if ((frames - k) >= 2) {
4445 + ddi_put8(p->m_acc_hdl,
4446 + &sgechain->NextChainOffset,
4447 + (sgemax *
4448 + sizeof (MPI2_SGE_SIMPLE64))
4449 + >> 2);
4450 + ddi_put16(p->m_acc_hdl,
4451 + &sgechain->Length,
4452 + mpt->m_req_frame_size /
4453 + sizeof (MPI2_SGE_SIMPLE64) *
4454 + sizeof (MPI2_SGE_SIMPLE64));
4455 + } else {
4456 + /*
4457 + * This is the last frame. Set
4458 + * the NextChainOffset to 0 and
4459 + * Length is the total size of
4460 + * all remaining simple elements
4461 + */
4462 + ddi_put8(p->m_acc_hdl,
4463 + &sgechain->NextChainOffset,
4464 + 0);
4465 + ddi_put16(p->m_acc_hdl,
4466 + &sgechain->Length,
4467 + (cookiec - j) *
4468 + sizeof (MPI2_SGE_SIMPLE64));
4469 + }
4304 4470
4305 - /*
4306 - * A little check to see if we need to round up the number
4307 - * of frames we need
4308 - */
4309 - if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4310 - sgemax) > 1) {
4311 - frames = (temp + 1);
4312 - } else {
4313 - frames = temp;
4314 - }
4315 - dmap = cmd->cmd_sg;
4316 - sge = (pMpi2SGESimple64_t)(&frame->SGL);
4471 + /* Jump to the next frame */
4472 + sge = (pMpi2SGESimple64_t)
4473 + ((char *)p->m_frames_addr +
4474 + (int)mpt->m_req_frame_size * k);
4317 4475
4318 - /*
4319 - * First fill in the main frame
4320 - */
4321 - for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4322 - ddi_put32(acc_hdl, &sge->Address.Low,
4476 + continue;
4477 + }
4478 +
4479 + ddi_put32(p->m_acc_hdl,
4480 + &sge->Address.Low,
4323 4481 dmap->addr.address64.Low);
4324 - ddi_put32(acc_hdl, &sge->Address.High,
4482 + ddi_put32(p->m_acc_hdl,
4483 + &sge->Address.High,
4325 4484 dmap->addr.address64.High);
4326 - ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4327 - flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4328 - flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4485 + ddi_put32(p->m_acc_hdl,
4486 + &sge->FlagsLength, dmap->count);
4487 + flags = ddi_get32(p->m_acc_hdl,
4488 + &sge->FlagsLength);
4489 + flags |= ((uint32_t)(
4490 + MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4329 4491 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4330 4492 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4331 4493 MPI2_SGE_FLAGS_SHIFT);
4332 4494
4333 4495 /*
4334 - * If this is the last SGE of this frame
4335 - * we set the end of list flag
4496 + * If we are at the end of the frame and
4497 + * there is another frame to fill in
4498 + * we set the last simple element as last
4499 + * element
4336 4500 */
4337 - if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4501 + if ((l == sgemax) && (k != frames)) {
4338 4502 flags |= ((uint32_t)
4339 4503 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4340 4504 MPI2_SGE_FLAGS_SHIFT);
4341 4505 }
4506 +
4507 + /*
4508 + * If this is the final cookie we
4509 + * indicate it by setting the flags
4510 + */
4511 + if (j == i) {
4512 + flags |= ((uint32_t)
4513 + (MPI2_SGE_FLAGS_LAST_ELEMENT |
4514 + MPI2_SGE_FLAGS_END_OF_BUFFER |
4515 + MPI2_SGE_FLAGS_END_OF_LIST) <<
4516 + MPI2_SGE_FLAGS_SHIFT);
4517 + }
4342 4518 if (cmd->cmd_flags & CFLAG_DMASEND) {
4343 4519 flags |=
4344 4520 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4345 4521 MPI2_SGE_FLAGS_SHIFT);
4346 4522 } else {
4347 4523 flags |=
4348 4524 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4349 4525 MPI2_SGE_FLAGS_SHIFT);
4350 4526 }
4351 - ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4527 + ddi_put32(p->m_acc_hdl,
4528 + &sge->FlagsLength, flags);
4352 4529 dmap++;
4353 4530 sge++;
4354 4531 }
4532 + }
4355 4533
4356 - /*
4357 - * Fill in the chain element in the main frame.
4358 - * About calculation on ChainOffset:
4359 - * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4360 - * in the end reserved for SGL element storage
4361 - * (MPI2_SGE_IO_UNION); we should count it in our
4362 - * calculation. See its definition in the header file.
4363 - * 2. Constant j is the counter of the current SGL element
4364 - * that will be processed, and (j - 1) is the number of
4365 - * SGL elements that have been processed (stored in the
4366 - * main frame).
4367 - * 3. ChainOffset value should be in units of double-words (4
4368 - * bytes) so the last value should be divided by 4.
4369 - */
4370 - ddi_put8(acc_hdl, &frame->ChainOffset,
4371 - (sizeof (MPI2_SCSI_IO_REQUEST) -
4372 - sizeof (MPI2_SGE_IO_UNION) +
4373 - (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4374 - sgechain = (pMpi2SGEChain64_t)sge;
4375 - chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4376 - MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4377 - MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4378 - ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4534 + /*
4535 + * Sync DMA with the chain buffers that were just created
4536 + */
4537 + (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4538 +}
4379 4539
4380 - /*
4381 - * The size of the next frame is the accurate size of space
4382 - * (in bytes) used to store the SGL elements. j is the counter
4383 - * of SGL elements. (j - 1) is the number of SGL elements that
4384 - * have been processed (stored in frames).
4385 - */
4386 - if (frames >= 2) {
4387 - chainlength = mpt->m_req_frame_size /
4388 - sizeof (MPI2_SGE_SIMPLE64) *
4389 - sizeof (MPI2_SGE_SIMPLE64);
4390 - } else {
4391 - chainlength = ((cookiec - (j - 1)) *
4392 - sizeof (MPI2_SGE_SIMPLE64));
4393 - }
4540 +static void
4541 +mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4542 + ddi_acc_handle_t acc_hdl, uint_t cookiec,
4543 + uint8_t end_flag)
4544 +{
4545 + pMpi2IeeeSgeSimple64_t ieeesge;
4546 + mptti_t *dmap;
4547 + uint8_t flags;
4394 4548
4395 - p = cmd->cmd_extra_frames;
4549 + dmap = cmd->cmd_sg;
4396 4550
4397 - ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4398 - ddi_put32(acc_hdl, &sgechain->Address.Low,
4399 - p->m_phys_addr);
4400 - /* SGL is allocated in the first 4G mem range */
4401 - ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4551 + NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4552 + cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4402 4553
4554 + ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4555 + while (cookiec--) {
4556 + ddi_put32(acc_hdl, &ieeesge->Address.Low,
4557 + dmap->addr.address64.Low);
4558 + ddi_put32(acc_hdl, &ieeesge->Address.High,
4559 + dmap->addr.address64.High);
4560 + ddi_put32(acc_hdl, &ieeesge->Length, dmap->count);
4561 + NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4562 + flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4563 + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4564 +
4403 4565 /*
4404 - * If there are more than 2 frames left we have to
4405 - * fill in the next chain offset to the location of
4406 - * the chain element in the next frame.
4407 - * sgemax is the number of simple elements in an extra
4408 - * frame. Note that the value NextChainOffset should be
4409 - * in double-words (4 bytes).
4566 + * If this is the last cookie, we set the flags
4567 + * to indicate so
4410 4568 */
4411 - if (frames >= 2) {
4412 - ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4413 - (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4414 - } else {
4415 - ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4569 + if (cookiec == 0) {
4570 + flags |= end_flag;
4416 4571 }
4417 4572
4418 4573 /*
4419 - * Jump to next frame;
4420 - * Starting here, chain buffers go into the per command SGL.
4421 - * This buffer is allocated when chain buffers are needed.
4574 + * XXX: Hmmm, what about the direction based on
4575 + * cmd->cmd_flags & CFLAG_DMASEND?
4422 4576 */
4423 - sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4424 - i = cookiec;
4577 + ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4578 + dmap++;
4579 + ieeesge++;
4580 + }
4581 +}
4425 4582
4426 - /*
4427 - * Start filling in frames with SGE's. If we
4428 - * reach the end of frame and still have SGE's
4429 - * to fill we need to add a chain element and
4430 - * use another frame. j will be our counter
4431 - * for what cookie we are at and i will be
4432 - * the total cookiec. k is the current frame
4433 - */
4434 - for (k = 1; k <= frames; k++) {
4435 - for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4583 +static void
4584 +mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4585 + pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4586 +{
4587 + pMpi2IeeeSgeSimple64_t ieeesge;
4588 + pMpi25IeeeSgeChain64_t ieeesgechain;
4589 + uint64_t nframe_phys_addr;
4590 + uint_t cookiec;
4591 + mptti_t *dmap;
4592 + uint8_t flags;
4593 + int i, j, k, l, frames, sgemax;
4594 + int temp, maxframe_sges;
4595 + uint8_t chainflags;
4596 + uint32_t chainlength;
4597 + mptsas_cache_frames_t *p;
4436 4598
4437 - /*
4438 - * If we have reached the end of frame
4439 - * and we have more SGE's to fill in
4440 - * we have to fill the final entry
4441 - * with a chain element and then
4442 - * continue to the next frame
4443 - */
4444 - if ((l == (sgemax + 1)) && (k != frames)) {
4445 - sgechain = (pMpi2SGEChain64_t)sge;
4446 - j--;
4447 - chainflags = (
4448 - MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4449 - MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4450 - MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4451 - ddi_put8(p->m_acc_hdl,
4452 - &sgechain->Flags, chainflags);
4453 - /*
4454 - * k is the frame counter and (k + 1)
4455 - * is the number of the next frame.
4456 - * Note that frames are in contiguous
4457 - * memory space.
4458 - */
4459 - ddi_put32(p->m_acc_hdl,
4460 - &sgechain->Address.Low,
4461 - (p->m_phys_addr +
4462 - (mpt->m_req_frame_size * k)));
4463 - ddi_put32(p->m_acc_hdl,
4464 - &sgechain->Address.High, 0);
4599 + cookiec = cmd->cmd_cookiec;
4465 4600
4466 - /*
4467 - * If there are more than 2 frames left
4468 - * we have to next chain offset to
4469 - * the location of the chain element
4470 - * in the next frame and fill in the
4471 - * length of the next chain
4472 - */
4473 - if ((frames - k) >= 2) {
4474 - ddi_put8(p->m_acc_hdl,
4475 - &sgechain->NextChainOffset,
4476 - (sgemax *
4477 - sizeof (MPI2_SGE_SIMPLE64))
4478 - >> 2);
4479 - ddi_put16(p->m_acc_hdl,
4480 - &sgechain->Length,
4481 - mpt->m_req_frame_size /
4482 - sizeof (MPI2_SGE_SIMPLE64) *
4483 - sizeof (MPI2_SGE_SIMPLE64));
4484 - } else {
4485 - /*
4486 - * This is the last frame. Set
4487 - * the NextChainOffset to 0 and
4488 - * Length is the total size of
4489 - * all remaining simple elements
4490 - */
4491 - ddi_put8(p->m_acc_hdl,
4492 - &sgechain->NextChainOffset,
4493 - 0);
4494 - ddi_put16(p->m_acc_hdl,
4495 - &sgechain->Length,
4496 - (cookiec - j) *
4497 - sizeof (MPI2_SGE_SIMPLE64));
4498 - }
4601 + NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4499 4602
4500 - /* Jump to the next frame */
4501 - sge = (pMpi2SGESimple64_t)
4502 - ((char *)p->m_frames_addr +
4503 - (int)mpt->m_req_frame_size * k);
4603 + /*
4604 + * Hereby we start to deal with multiple frames.
4605 + * The process is as follows:
4606 + * 1. Determine how many frames are needed for SGL element
4607 + * storage; Note that all frames are stored in contiguous
4608 + * memory space and in 64-bit DMA mode each element is
4609 + * 4 double-words (16 bytes) long.
4610 + * 2. Fill up the main frame. We need to do this separately
4611 + * since it contains the SCSI IO request header and needs
4612 + * dedicated processing. Note that the last 4 double-words
4613 + * of the SCSI IO header is for SGL element storage
4614 + * (MPI2_SGE_IO_UNION).
4615 + * 3. Fill the chain element in the main frame, so the DMA
4616 + * engine can use the following frames.
4617 + * 4. Enter a loop to fill the remaining frames. Note that the
4618 + * last frame contains no chain element. The remaining
4619 + * frames go into the mpt SGL buffer allocated on the fly,
4620 + * not immediately following the main message frame, as in
4621 + * Gen1.
4622 + * Some restrictions:
4623 + * 1. For 64-bit DMA, the simple element and chain element
4624 + * are both of 4 double-words (16 bytes) in size, even
4625 + * though all frames are stored in the first 4G of mem
4626 + * range and the higher 32-bits of the address are always 0.
4627 + * 2. On some controllers (like the 1064/1068), a frame can
4628 + * hold SGL elements with the last 1 or 2 double-words
4629 + * (4 or 8 bytes) un-used. On these controllers, we should
4630 + * recognize that there's not enough room for another SGL
4631 + * element and move the sge pointer to the next frame.
4632 + */
4504 4633
4505 - continue;
4506 - }
4634 + /*
4635 + * Sgemax is the number of SGE's that will fit
4636 + * each extra frame and frames is total
4637 + * number of frames we'll need. 1 sge entry per
4638 + * frame is reseverd for the chain element thus the -1 below.
4639 + */
4640 + sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4641 + - 1);
4642 + maxframe_sges = MPTSAS_MAX_FRAME_SGES64(mpt);
4643 + temp = (cookiec - (maxframe_sges - 1)) / sgemax;
4507 4644
4508 - ddi_put32(p->m_acc_hdl,
4509 - &sge->Address.Low,
4510 - dmap->addr.address64.Low);
4511 - ddi_put32(p->m_acc_hdl,
4512 - &sge->Address.High,
4513 - dmap->addr.address64.High);
4514 - ddi_put32(p->m_acc_hdl,
4515 - &sge->FlagsLength, dmap->count);
4516 - flags = ddi_get32(p->m_acc_hdl,
4517 - &sge->FlagsLength);
4518 - flags |= ((uint32_t)(
4519 - MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4520 - MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4521 - MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4522 - MPI2_SGE_FLAGS_SHIFT);
4645 + /*
4646 + * A little check to see if we need to round up the number
4647 + * of frames we need
4648 + */
4649 + if ((cookiec - (maxframe_sges - 1)) - (temp * sgemax) > 1) {
4650 + frames = (temp + 1);
4651 + } else {
4652 + frames = temp;
4653 + }
4654 + NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4655 + dmap = cmd->cmd_sg;
4656 + ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4523 4657
4658 + /*
4659 + * First fill in the main frame
4660 + */
4661 + j = maxframe_sges - 1;
4662 + mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4663 + dmap += j;
4664 + ieeesge += j;
4665 + j++;
4666 +
4667 + /*
4668 + * Fill in the chain element in the main frame.
4669 + * About calculation on ChainOffset:
4670 + * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4671 + * in the end reserved for SGL element storage
4672 + * (MPI2_SGE_IO_UNION); we should count it in our
4673 + * calculation. See its definition in the header file.
4674 + * 2. Constant j is the counter of the current SGL element
4675 + * that will be processed, and (j - 1) is the number of
4676 + * SGL elements that have been processed (stored in the
4677 + * main frame).
4678 + * 3. ChainOffset value should be in units of quad-words (16
4679 + * bytes) so the last value should be divided by 16.
4680 + */
4681 + ddi_put8(acc_hdl, &frame->ChainOffset,
4682 + (sizeof (MPI2_SCSI_IO_REQUEST) -
4683 + sizeof (MPI2_SGE_IO_UNION) +
4684 + (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4685 + ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4686 + chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4687 + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4688 + ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4689 +
4690 + /*
4691 + * The size of the next frame is the accurate size of space
4692 + * (in bytes) used to store the SGL elements. j is the counter
4693 + * of SGL elements. (j - 1) is the number of SGL elements that
4694 + * have been processed (stored in frames).
4695 + */
4696 + if (frames >= 2) {
4697 + chainlength = mpt->m_req_frame_size /
4698 + sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4699 + sizeof (MPI2_IEEE_SGE_SIMPLE64);
4700 + } else {
4701 + chainlength = ((cookiec - (j - 1)) *
4702 + sizeof (MPI2_IEEE_SGE_SIMPLE64));
4703 + }
4704 +
4705 + p = cmd->cmd_extra_frames;
4706 +
4707 + ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4708 + ddi_put32(acc_hdl, &ieeesgechain->Address.Low,
4709 + p->m_phys_addr&0xffffffffull);
4710 + ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr>>32);
4711 +
4712 + /*
4713 + * If there are more than 2 frames left we have to
4714 + * fill in the next chain offset to the location of
4715 + * the chain element in the next frame.
4716 + * sgemax is the number of simple elements in an extra
4717 + * frame. Note that the value NextChainOffset should be
4718 + * in double-words (4 bytes).
4719 + */
4720 + if (frames >= 2) {
4721 + ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4722 + (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4723 + } else {
4724 + ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4725 + }
4726 +
4727 + /*
4728 + * Jump to next frame;
4729 + * Starting here, chain buffers go into the per command SGL.
4730 + * This buffer is allocated when chain buffers are needed.
4731 + */
4732 + ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4733 + i = cookiec;
4734 +
4735 + /*
4736 + * Start filling in frames with SGE's. If we
4737 + * reach the end of frame and still have SGE's
4738 + * to fill we need to add a chain element and
4739 + * use another frame. j will be our counter
4740 + * for what cookie we are at and i will be
4741 + * the total cookiec. k is the current frame
4742 + */
4743 + for (k = 1; k <= frames; k++) {
4744 + for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4745 +
4746 + /*
4747 + * If we have reached the end of frame
4748 + * and we have more SGE's to fill in
4749 + * we have to fill the final entry
4750 + * with a chain element and then
4751 + * continue to the next frame
4752 + */
4753 + if ((l == (sgemax + 1)) && (k != frames)) {
4754 + ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4755 + j--;
4756 + chainflags =
4757 + MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4758 + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4759 + ddi_put8(p->m_acc_hdl,
4760 + &ieeesgechain->Flags, chainflags);
4524 4761 /*
4525 - * If we are at the end of the frame and
4526 - * there is another frame to fill in
4527 - * we set the last simple element as last
4528 - * element
4762 + * k is the frame counter and (k + 1)
4763 + * is the number of the next frame.
4764 + * Note that frames are in contiguous
4765 + * memory space.
4529 4766 */
4530 - if ((l == sgemax) && (k != frames)) {
4531 - flags |= ((uint32_t)
4532 - (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4533 - MPI2_SGE_FLAGS_SHIFT);
4534 - }
4767 + nframe_phys_addr = p->m_phys_addr +
4768 + (mpt->m_req_frame_size * k);
4769 + ddi_put32(p->m_acc_hdl,
4770 + &ieeesgechain->Address.Low,
4771 + nframe_phys_addr&0xffffffffull);
4772 + ddi_put32(p->m_acc_hdl,
4773 + &ieeesgechain->Address.High,
4774 + nframe_phys_addr>>32);
4535 4775
4536 4776 /*
4537 - * If this is the final cookie we
4538 - * indicate it by setting the flags
4777 + * If there are more than 2 frames left
4778 + * we have to next chain offset to
4779 + * the location of the chain element
4780 + * in the next frame and fill in the
4781 + * length of the next chain
4539 4782 */
4540 - if (j == i) {
4541 - flags |= ((uint32_t)
4542 - (MPI2_SGE_FLAGS_LAST_ELEMENT |
4543 - MPI2_SGE_FLAGS_END_OF_BUFFER |
4544 - MPI2_SGE_FLAGS_END_OF_LIST) <<
4545 - MPI2_SGE_FLAGS_SHIFT);
4546 - }
4547 - if (cmd->cmd_flags & CFLAG_DMASEND) {
4548 - flags |=
4549 - (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4550 - MPI2_SGE_FLAGS_SHIFT);
4783 + if ((frames - k) >= 2) {
4784 + ddi_put8(p->m_acc_hdl,
4785 + &ieeesgechain->NextChainOffset,
4786 + (sgemax *
4787 + sizeof (MPI2_IEEE_SGE_SIMPLE64))
4788 + >> 4);
4789 + ddi_put32(p->m_acc_hdl,
4790 + &ieeesgechain->Length,
4791 + mpt->m_req_frame_size /
4792 + sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4793 + sizeof (MPI2_IEEE_SGE_SIMPLE64));
4551 4794 } else {
4552 - flags |=
4553 - (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4554 - MPI2_SGE_FLAGS_SHIFT);
4795 + /*
4796 + * This is the last frame. Set
4797 + * the NextChainOffset to 0 and
4798 + * Length is the total size of
4799 + * all remaining simple elements
4800 + */
4801 + ddi_put8(p->m_acc_hdl,
4802 + &ieeesgechain->NextChainOffset,
4803 + 0);
4804 + ddi_put32(p->m_acc_hdl,
4805 + &ieeesgechain->Length,
4806 + (cookiec - j) *
4807 + sizeof (MPI2_IEEE_SGE_SIMPLE64));
4555 4808 }
4556 - ddi_put32(p->m_acc_hdl,
4557 - &sge->FlagsLength, flags);
4558 - dmap++;
4559 - sge++;
4809 +
4810 + /* Jump to the next frame */
4811 + ieeesge = (pMpi2IeeeSgeSimple64_t)
4812 + ((char *)p->m_frames_addr +
4813 + (int)mpt->m_req_frame_size * k);
4814 +
4815 + continue;
4560 4816 }
4817 +
4818 + ddi_put32(p->m_acc_hdl,
4819 + &ieeesge->Address.Low,
4820 + dmap->addr.address64.Low);
4821 + ddi_put32(p->m_acc_hdl,
4822 + &ieeesge->Address.High,
4823 + dmap->addr.address64.High);
4824 + ddi_put32(p->m_acc_hdl,
4825 + &ieeesge->Length, dmap->count);
4826 + flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4827 + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4828 +
4829 + /*
4830 + * If we are at the end of the frame and
4831 + * there is another frame to fill in
4832 + * do we need to do anything?
4833 + * if ((l == sgemax) && (k != frames)) {
4834 + * }
4835 + */
4836 +
4837 + /*
4838 + * If this is the final cookie set end of list.
4839 + */
4840 + if (j == i) {
4841 + flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
4842 + }
4843 +
4844 + ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
4845 + dmap++;
4846 + ieeesge++;
4561 4847 }
4848 + }
4562 4849
4563 - /*
4564 - * Sync DMA with the chain buffers that were just created
4565 - */
4566 - (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4850 + /*
4851 + * Sync DMA with the chain buffers that were just created
4852 + */
4853 + (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4854 +}
4855 +
4856 +static void
4857 +mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4858 + pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4859 +{
4860 + ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4861 +
4862 + NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
4863 +
4864 + /*
4865 + * Set read/write bit in control.
4866 + */
4867 + if (cmd->cmd_flags & CFLAG_DMASEND) {
4868 + *control |= MPI2_SCSIIO_CONTROL_WRITE;
4869 + } else {
4870 + *control |= MPI2_SCSIIO_CONTROL_READ;
4567 4871 }
4872 +
4873 + ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4874 +
4875 + /*
4876 + * We have 4 cases here. First where we can fit all the
4877 + * SG elements into the main frame, and the case
4878 + * where we can't. The SG element is also different when using
4879 + * MPI2.5 interface.
4880 + * If we have more cookies than we can attach to a frame
4881 + * we will need to use a chain element to point
4882 + * a location of memory where the rest of the S/G
4883 + * elements reside.
4884 + */
4885 + if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4886 + if (mpt->m_MPI25) {
4887 + mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
4888 + cmd->cmd_cookiec,
4889 + MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
4890 + } else {
4891 + mptsas_sge_mainframe(cmd, frame, acc_hdl,
4892 + cmd->cmd_cookiec,
4893 + ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4894 + | MPI2_SGE_FLAGS_END_OF_BUFFER
4895 + | MPI2_SGE_FLAGS_END_OF_LIST) <<
4896 + MPI2_SGE_FLAGS_SHIFT));
4897 + }
4898 + } else {
4899 + if (mpt->m_MPI25) {
4900 + mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
4901 + } else {
4902 + mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
4903 + }
4904 + }
4568 4905 }
4569 4906
4570 4907 /*
4571 4908 * Interrupt handling
4572 4909 * Utility routine. Poll for status of a command sent to HBA
4573 4910 * without interrupts (a FLAG_NOINTR command).
4574 4911 */
4575 4912 int
4576 4913 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4577 4914 {
4578 4915 int rval = TRUE;
4579 4916
4580 4917 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4581 4918
4582 4919 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4583 4920 mptsas_restart_hba(mpt);
4584 4921 }
4585 4922
4586 4923 /*
4587 4924 * Wait, using drv_usecwait(), long enough for the command to
4588 4925 * reasonably return from the target if the target isn't
4589 4926 * "dead". A polled command may well be sent from scsi_poll, and
4590 4927 * there are retries built in to scsi_poll if the transport
4591 4928 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4592 4929 * and retries the transport up to scsi_poll_busycnt times
4593 4930 * (currently 60) if
4594 4931 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4595 4932 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4596 4933 *
4597 4934 * limit the waiting to avoid a hang in the event that the
4598 4935 * cmd never gets started but we are still receiving interrupts
4599 4936 */
4600 4937 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4601 4938 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4602 4939 NDBG5(("mptsas_poll: command incomplete"));
4603 4940 rval = FALSE;
4604 4941 break;
4605 4942 }
4606 4943 }
4607 4944
4608 4945 if (rval == FALSE) {
4609 4946
4610 4947 /*
4611 4948 * this isn't supposed to happen, the hba must be wedged
4612 4949 * Mark this cmd as a timeout.
4613 4950 */
4614 4951 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4615 4952 (STAT_TIMEOUT|STAT_ABORTED));
4616 4953
4617 4954 if (poll_cmd->cmd_queued == FALSE) {
4618 4955
4619 4956 NDBG5(("mptsas_poll: not on waitq"));
4620 4957
4621 4958 poll_cmd->cmd_pkt->pkt_state |=
4622 4959 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4623 4960 } else {
4624 4961
4625 4962 /* find and remove it from the waitq */
4626 4963 NDBG5(("mptsas_poll: delete from waitq"));
4627 4964 mptsas_waitq_delete(mpt, poll_cmd);
4628 4965 }
4629 4966
4630 4967 }
4631 4968 mptsas_fma_check(mpt, poll_cmd);
4632 4969 NDBG5(("mptsas_poll: done"));
4633 4970 return (rval);
4634 4971 }
4635 4972
4636 4973 /*
4637 4974 * Used for polling cmds and TM function
4638 4975 */
4639 4976 static int
4640 4977 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4641 4978 {
4642 4979 int cnt;
4643 4980 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4644 4981 uint32_t int_mask;
4645 4982
4646 4983 NDBG5(("mptsas_wait_intr"));
4647 4984
4648 4985 mpt->m_polled_intr = 1;
4649 4986
4650 4987 /*
4651 4988 * Get the current interrupt mask and disable interrupts. When
4652 4989 * re-enabling ints, set mask to saved value.
4653 4990 */
4654 4991 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4655 4992 MPTSAS_DISABLE_INTR(mpt);
4656 4993
4657 4994 /*
4658 4995 * Keep polling for at least (polltime * 1000) seconds
4659 4996 */
4660 4997 for (cnt = 0; cnt < polltime; cnt++) {
4661 4998 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4662 4999 DDI_DMA_SYNC_FORCPU);
4663 5000
4664 5001 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4665 5002 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4666 5003
4667 5004 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4668 5005 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4669 5006 ddi_get32(mpt->m_acc_post_queue_hdl,
4670 5007 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4671 5008 drv_usecwait(1000);
4672 5009 continue;
4673 5010 }
4674 5011
4675 5012 /*
4676 5013 * The reply is valid, process it according to its
4677 5014 * type.
4678 5015 */
4679 5016 mptsas_process_intr(mpt, reply_desc_union);
4680 5017
4681 5018 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4682 5019 mpt->m_post_index = 0;
4683 5020 }
4684 5021
4685 5022 /*
4686 5023 * Update the global reply index
4687 5024 */
4688 5025 ddi_put32(mpt->m_datap,
4689 5026 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4690 5027 mpt->m_polled_intr = 0;
4691 5028
4692 5029 /*
4693 5030 * Re-enable interrupts and quit.
4694 5031 */
4695 5032 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4696 5033 int_mask);
4697 5034 return (TRUE);
4698 5035
4699 5036 }
4700 5037
4701 5038 /*
4702 5039 * Clear polling flag, re-enable interrupts and quit.
4703 5040 */
4704 5041 mpt->m_polled_intr = 0;
4705 5042 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4706 5043 return (FALSE);
4707 5044 }
4708 5045
4709 5046 static void
4710 5047 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4711 5048 pMpi2ReplyDescriptorsUnion_t reply_desc)
4712 5049 {
4713 5050 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4714 5051 uint16_t SMID;
4715 5052 mptsas_slots_t *slots = mpt->m_active;
4716 5053 mptsas_cmd_t *cmd = NULL;
4717 5054 struct scsi_pkt *pkt;
4718 5055
4719 5056 ASSERT(mutex_owned(&mpt->m_mutex));
4720 5057
4721 5058 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4722 5059 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4723 5060
4724 5061 /*
4725 5062 * This is a success reply so just complete the IO. First, do a sanity
4726 5063 * check on the SMID. The final slot is used for TM requests, which
4727 5064 * would not come into this reply handler.
4728 5065 */
4729 5066 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
4730 5067 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4731 5068 SMID);
4732 5069 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4733 5070 return;
4734 5071 }
4735 5072
4736 5073 cmd = slots->m_slot[SMID];
4737 5074
4738 5075 /*
4739 5076 * print warning and return if the slot is empty
4740 5077 */
4741 5078 if (cmd == NULL) {
4742 5079 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4743 5080 "in slot %d", SMID);
4744 5081 return;
4745 5082 }
4746 5083
4747 5084 pkt = CMD2PKT(cmd);
4748 5085 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4749 5086 STATE_GOT_STATUS);
4750 5087 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4751 5088 pkt->pkt_state |= STATE_XFERRED_DATA;
4752 5089 }
4753 5090 pkt->pkt_resid = 0;
4754 5091
4755 5092 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4756 5093 cmd->cmd_flags |= CFLAG_FINISHED;
4757 5094 cv_broadcast(&mpt->m_passthru_cv);
4758 5095 return;
4759 5096 } else {
4760 5097 mptsas_remove_cmd(mpt, cmd);
4761 5098 }
4762 5099
4763 5100 if (cmd->cmd_flags & CFLAG_RETRY) {
4764 5101 /*
4765 5102 * The target returned QFULL or busy, do not add tihs
4766 5103 * pkt to the doneq since the hba will retry
4767 5104 * this cmd.
4768 5105 *
4769 5106 * The pkt has already been resubmitted in
4770 5107 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4771 5108 * Remove this cmd_flag here.
4772 5109 */
4773 5110 cmd->cmd_flags &= ~CFLAG_RETRY;
4774 5111 } else {
4775 5112 mptsas_doneq_add(mpt, cmd);
4776 5113 }
4777 5114 }
4778 5115
4779 5116 static void
4780 5117 mptsas_handle_address_reply(mptsas_t *mpt,
4781 5118 pMpi2ReplyDescriptorsUnion_t reply_desc)
4782 5119 {
4783 5120 pMpi2AddressReplyDescriptor_t address_reply;
4784 5121 pMPI2DefaultReply_t reply;
4785 5122 mptsas_fw_diagnostic_buffer_t *pBuffer;
4786 5123 uint32_t reply_addr;
4787 5124 uint16_t SMID, iocstatus;
4788 5125 mptsas_slots_t *slots = mpt->m_active;
4789 5126 mptsas_cmd_t *cmd = NULL;
4790 5127 uint8_t function, buffer_type;
4791 5128 m_replyh_arg_t *args;
4792 5129 int reply_frame_no;
4793 5130
4794 5131 ASSERT(mutex_owned(&mpt->m_mutex));
4795 5132
4796 5133 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4797 5134 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4798 5135 &address_reply->ReplyFrameAddress);
4799 5136 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4800 5137
4801 5138 /*
4802 5139 * If reply frame is not in the proper range we should ignore this
4803 5140 * message and exit the interrupt handler.
4804 5141 */
4805 5142 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4806 5143 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4807 5144 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4808 5145 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4809 5146 mpt->m_reply_frame_size != 0)) {
4810 5147 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4811 5148 "address 0x%x\n", reply_addr);
4812 5149 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4813 5150 return;
4814 5151 }
4815 5152
4816 5153 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4817 5154 DDI_DMA_SYNC_FORCPU);
4818 5155 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4819 5156 mpt->m_reply_frame_dma_addr));
4820 5157 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4821 5158
4822 5159 /*
4823 5160 * don't get slot information and command for events since these values
4824 5161 * don't exist
4825 5162 */
4826 5163 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4827 5164 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4828 5165 /*
4829 5166 * This could be a TM reply, which use the last allocated SMID,
4830 5167 * so allow for that.
4831 5168 */
4832 5169 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
4833 5170 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4834 5171 "%d\n", SMID);
4835 5172 ddi_fm_service_impact(mpt->m_dip,
4836 5173 DDI_SERVICE_UNAFFECTED);
4837 5174 return;
4838 5175 }
4839 5176
4840 5177 cmd = slots->m_slot[SMID];
4841 5178
4842 5179 /*
4843 5180 * print warning and return if the slot is empty
4844 5181 */
4845 5182 if (cmd == NULL) {
4846 5183 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4847 5184 "reply in slot %d", SMID);
4848 5185 return;
4849 5186 }
4850 5187 if ((cmd->cmd_flags &
4851 5188 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
4852 5189 cmd->cmd_rfm = reply_addr;
4853 5190 cmd->cmd_flags |= CFLAG_FINISHED;
4854 5191 cv_broadcast(&mpt->m_passthru_cv);
4855 5192 cv_broadcast(&mpt->m_config_cv);
4856 5193 cv_broadcast(&mpt->m_fw_diag_cv);
4857 5194 return;
4858 5195 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4859 5196 mptsas_remove_cmd(mpt, cmd);
4860 5197 }
4861 5198 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4862 5199 }
4863 5200 /*
4864 5201 * Depending on the function, we need to handle
4865 5202 * the reply frame (and cmd) differently.
4866 5203 */
4867 5204 switch (function) {
4868 5205 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4869 5206 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4870 5207 break;
4871 5208 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4872 5209 cmd->cmd_rfm = reply_addr;
4873 5210 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4874 5211 cmd);
4875 5212 break;
4876 5213 case MPI2_FUNCTION_FW_DOWNLOAD:
4877 5214 cmd->cmd_flags |= CFLAG_FINISHED;
4878 5215 cv_signal(&mpt->m_fw_cv);
4879 5216 break;
4880 5217 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4881 5218 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4882 5219 mpt->m_reply_frame_size;
4883 5220 args = &mpt->m_replyh_args[reply_frame_no];
4884 5221 args->mpt = (void *)mpt;
4885 5222 args->rfm = reply_addr;
4886 5223
4887 5224 /*
4888 5225 * Record the event if its type is enabled in
4889 5226 * this mpt instance by ioctl.
4890 5227 */
4891 5228 mptsas_record_event(args);
4892 5229
4893 5230 /*
4894 5231 * Handle time critical events
4895 5232 * NOT_RESPONDING/ADDED only now
4896 5233 */
4897 5234 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4898 5235 /*
4899 5236 * Would not return main process,
4900 5237 * just let taskq resolve ack action
4901 5238 * and ack would be sent in taskq thread
4902 5239 */
4903 5240 NDBG20(("send mptsas_handle_event_sync success"));
4904 5241 }
4905 5242
4906 5243 if (mpt->m_in_reset) {
4907 5244 NDBG20(("dropping event received during reset"));
4908 5245 return;
4909 5246 }
4910 5247
4911 5248 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4912 5249 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4913 5250 mptsas_log(mpt, CE_WARN, "No memory available"
4914 5251 "for dispatch taskq");
4915 5252 /*
4916 5253 * Return the reply frame to the free queue.
4917 5254 */
4918 5255 ddi_put32(mpt->m_acc_free_queue_hdl,
4919 5256 &((uint32_t *)(void *)
4920 5257 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4921 5258 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4922 5259 DDI_DMA_SYNC_FORDEV);
4923 5260 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4924 5261 mpt->m_free_index = 0;
4925 5262 }
4926 5263
4927 5264 ddi_put32(mpt->m_datap,
4928 5265 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4929 5266 }
4930 5267 return;
4931 5268 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4932 5269 /*
4933 5270 * If SMID is 0, this implies that the reply is due to a
4934 5271 * release function with a status that the buffer has been
4935 5272 * released. Set the buffer flags accordingly.
4936 5273 */
4937 5274 if (SMID == 0) {
4938 5275 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4939 5276 &reply->IOCStatus);
4940 5277 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4941 5278 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4942 5279 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4943 5280 pBuffer =
4944 5281 &mpt->m_fw_diag_buffer_list[buffer_type];
4945 5282 pBuffer->valid_data = TRUE;
4946 5283 pBuffer->owned_by_firmware = FALSE;
4947 5284 pBuffer->immediate = FALSE;
4948 5285 }
4949 5286 } else {
4950 5287 /*
4951 5288 * Normal handling of diag post reply with SMID.
4952 5289 */
4953 5290 cmd = slots->m_slot[SMID];
4954 5291
4955 5292 /*
4956 5293 * print warning and return if the slot is empty
4957 5294 */
4958 5295 if (cmd == NULL) {
4959 5296 mptsas_log(mpt, CE_WARN, "?NULL command for "
4960 5297 "address reply in slot %d", SMID);
4961 5298 return;
4962 5299 }
4963 5300 cmd->cmd_rfm = reply_addr;
4964 5301 cmd->cmd_flags |= CFLAG_FINISHED;
4965 5302 cv_broadcast(&mpt->m_fw_diag_cv);
4966 5303 }
4967 5304 return;
4968 5305 default:
4969 5306 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4970 5307 break;
4971 5308 }
4972 5309
4973 5310 /*
4974 5311 * Return the reply frame to the free queue.
4975 5312 */
4976 5313 ddi_put32(mpt->m_acc_free_queue_hdl,
4977 5314 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4978 5315 reply_addr);
4979 5316 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4980 5317 DDI_DMA_SYNC_FORDEV);
4981 5318 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4982 5319 mpt->m_free_index = 0;
4983 5320 }
4984 5321 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4985 5322 mpt->m_free_index);
4986 5323
4987 5324 if (cmd->cmd_flags & CFLAG_FW_CMD)
4988 5325 return;
4989 5326
4990 5327 if (cmd->cmd_flags & CFLAG_RETRY) {
4991 5328 /*
4992 5329 * The target returned QFULL or busy, do not add tihs
4993 5330 * pkt to the doneq since the hba will retry
4994 5331 * this cmd.
4995 5332 *
4996 5333 * The pkt has already been resubmitted in
4997 5334 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4998 5335 * Remove this cmd_flag here.
4999 5336 */
5000 5337 cmd->cmd_flags &= ~CFLAG_RETRY;
5001 5338 } else {
5002 5339 mptsas_doneq_add(mpt, cmd);
5003 5340 }
5004 5341 }
5005 5342
5006 5343 static void
5007 5344 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5008 5345 mptsas_cmd_t *cmd)
5009 5346 {
5010 5347 uint8_t scsi_status, scsi_state;
5011 5348 uint16_t ioc_status;
5012 5349 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5013 5350 struct scsi_pkt *pkt;
5014 5351 struct scsi_arq_status *arqstat;
5015 5352 struct buf *bp;
5016 5353 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5017 5354 uint8_t *sensedata = NULL;
5018 5355 uint64_t sas_wwn;
5019 5356 uint8_t phy;
5020 5357 char wwn_str[MPTSAS_WWN_STRLEN];
5021 5358
5022 5359 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5023 5360 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5024 5361 bp = cmd->cmd_ext_arq_buf;
5025 5362 } else {
5026 5363 bp = cmd->cmd_arq_buf;
5027 5364 }
5028 5365
5029 5366 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5030 5367 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5031 5368 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5032 5369 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5033 5370 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5034 5371 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5035 5372 &reply->ResponseInfo);
5036 5373
5037 5374 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5038 5375 sas_wwn = ptgt->m_addr.mta_wwn;
5039 5376 phy = ptgt->m_phynum;
5040 5377 if (sas_wwn == 0) {
5041 5378 (void) sprintf(wwn_str, "p%x", phy);
5042 5379 } else {
5043 5380 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5044 5381 }
5045 5382 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5046 5383 &reply->IOCLogInfo);
5047 5384 mptsas_log(mpt, CE_NOTE,
5048 5385 "?Log info 0x%x received for target %d %s.\n"
5049 5386 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5050 5387 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5051 5388 scsi_state);
5052 5389 }
5053 5390
5054 5391 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5055 5392 scsi_status, ioc_status, scsi_state));
5056 5393
5057 5394 pkt = CMD2PKT(cmd);
5058 5395 *(pkt->pkt_scbp) = scsi_status;
5059 5396
5060 5397 if (loginfo == 0x31170000) {
5061 5398 /*
5062 5399 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5063 5400 * 0x31170000 comes, that means the device missing delay
5064 5401 * is in progressing, the command need retry later.
5065 5402 */
5066 5403 *(pkt->pkt_scbp) = STATUS_BUSY;
5067 5404 return;
5068 5405 }
5069 5406
5070 5407 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5071 5408 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5072 5409 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5073 5410 pkt->pkt_reason = CMD_INCOMPLETE;
5074 5411 pkt->pkt_state |= STATE_GOT_BUS;
5075 5412 if (ptgt->m_reset_delay == 0) {
5076 5413 mptsas_set_throttle(mpt, ptgt,
5077 5414 DRAIN_THROTTLE);
5078 5415 }
5079 5416 return;
5080 5417 }
5081 5418
5082 5419 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5083 5420 responsedata &= 0x000000FF;
5084 5421 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5085 5422 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5086 5423 pkt->pkt_reason = CMD_TLR_OFF;
5087 5424 return;
5088 5425 }
5089 5426 }
5090 5427
5091 5428
5092 5429 switch (scsi_status) {
5093 5430 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5094 5431 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5095 5432 arqstat = (void*)(pkt->pkt_scbp);
5096 5433 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5097 5434 (pkt->pkt_scbp));
5098 5435 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5099 5436 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5100 5437 if (cmd->cmd_flags & CFLAG_XARQ) {
5101 5438 pkt->pkt_state |= STATE_XARQ_DONE;
5102 5439 }
5103 5440 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5104 5441 pkt->pkt_state |= STATE_XFERRED_DATA;
5105 5442 }
5106 5443 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5107 5444 arqstat->sts_rqpkt_state = pkt->pkt_state;
5108 5445 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5109 5446 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5110 5447 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5111 5448
5112 5449 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5113 5450 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5114 5451 cmd->cmd_rqslen));
5115 5452 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5116 5453 cmd->cmd_flags |= CFLAG_CMDARQ;
5117 5454 /*
5118 5455 * Set proper status for pkt if autosense was valid
5119 5456 */
5120 5457 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5121 5458 struct scsi_status zero_status = { 0 };
5122 5459 arqstat->sts_rqpkt_status = zero_status;
5123 5460 }
5124 5461
5125 5462 /*
5126 5463 * ASC=0x47 is parity error
5127 5464 * ASC=0x48 is initiator detected error received
5128 5465 */
5129 5466 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5130 5467 ((scsi_sense_asc(sensedata) == 0x47) ||
5131 5468 (scsi_sense_asc(sensedata) == 0x48))) {
5132 5469 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5133 5470 }
5134 5471
5135 5472 /*
5136 5473 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5137 5474 * ASC/ASCQ=0x25/0x00 means invalid lun
5138 5475 */
5139 5476 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5140 5477 (scsi_sense_asc(sensedata) == 0x3F) &&
5141 5478 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5142 5479 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5143 5480 (scsi_sense_asc(sensedata) == 0x25) &&
5144 5481 (scsi_sense_ascq(sensedata) == 0x00))) {
5145 5482 mptsas_topo_change_list_t *topo_node = NULL;
5146 5483
5147 5484 topo_node = kmem_zalloc(
5148 5485 sizeof (mptsas_topo_change_list_t),
5149 5486 KM_NOSLEEP);
5150 5487 if (topo_node == NULL) {
5151 5488 mptsas_log(mpt, CE_NOTE, "No memory"
5152 5489 "resource for handle SAS dynamic"
5153 5490 "reconfigure.\n");
5154 5491 break;
5155 5492 }
5156 5493 topo_node->mpt = mpt;
5157 5494 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5158 5495 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5159 5496 topo_node->devhdl = ptgt->m_devhdl;
5160 5497 topo_node->object = (void *)ptgt;
5161 5498 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5162 5499
5163 5500 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5164 5501 mptsas_handle_dr,
5165 5502 (void *)topo_node,
5166 5503 DDI_NOSLEEP)) != DDI_SUCCESS) {
5167 5504 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5168 5505 "for handle SAS dynamic reconfigure"
5169 5506 "failed. \n");
5170 5507 }
5171 5508 }
5172 5509 break;
5173 5510 case MPI2_SCSI_STATUS_GOOD:
5174 5511 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5175 5512 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5176 5513 pkt->pkt_reason = CMD_DEV_GONE;
5177 5514 pkt->pkt_state |= STATE_GOT_BUS;
5178 5515 if (ptgt->m_reset_delay == 0) {
5179 5516 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5180 5517 }
5181 5518 NDBG31(("lost disk for target%d, command:%x",
5182 5519 Tgt(cmd), pkt->pkt_cdbp[0]));
5183 5520 break;
5184 5521 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5185 5522 NDBG31(("data overrun: xferred=%d", xferred));
5186 5523 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5187 5524 pkt->pkt_reason = CMD_DATA_OVR;
5188 5525 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5189 5526 | STATE_SENT_CMD | STATE_GOT_STATUS
5190 5527 | STATE_XFERRED_DATA);
5191 5528 pkt->pkt_resid = 0;
5192 5529 break;
5193 5530 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5194 5531 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5195 5532 NDBG31(("data underrun: xferred=%d", xferred));
5196 5533 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5197 5534 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5198 5535 | STATE_SENT_CMD | STATE_GOT_STATUS);
5199 5536 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5200 5537 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5201 5538 pkt->pkt_state |= STATE_XFERRED_DATA;
5202 5539 }
5203 5540 break;
5204 5541 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5205 5542 if (cmd->cmd_active_expiration <= gethrtime()) {
5206 5543 /*
5207 5544 * When timeout requested, propagate
5208 5545 * proper reason and statistics to
5209 5546 * target drivers.
5210 5547 */
5211 5548 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5212 5549 STAT_BUS_RESET | STAT_TIMEOUT);
5213 5550 } else {
5214 5551 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5215 5552 STAT_BUS_RESET);
5216 5553 }
5217 5554 break;
5218 5555 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5219 5556 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5220 5557 mptsas_set_pkt_reason(mpt,
5221 5558 cmd, CMD_RESET, STAT_DEV_RESET);
5222 5559 break;
5223 5560 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5224 5561 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5225 5562 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5226 5563 mptsas_set_pkt_reason(mpt,
5227 5564 cmd, CMD_TERMINATED, STAT_TERMINATED);
5228 5565 break;
5229 5566 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5230 5567 case MPI2_IOCSTATUS_BUSY:
5231 5568 /*
5232 5569 * set throttles to drain
5233 5570 */
5234 5571 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5235 5572 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5236 5573 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5237 5574 }
5238 5575
5239 5576 /*
5240 5577 * retry command
5241 5578 */
5242 5579 cmd->cmd_flags |= CFLAG_RETRY;
5243 5580 cmd->cmd_pkt_flags |= FLAG_HEAD;
5244 5581
5245 5582 (void) mptsas_accept_pkt(mpt, cmd);
5246 5583 break;
5247 5584 default:
5248 5585 mptsas_log(mpt, CE_WARN,
5249 5586 "unknown ioc_status = %x\n", ioc_status);
5250 5587 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5251 5588 "count = %x, scsi_status = %x", scsi_state,
5252 5589 xferred, scsi_status);
5253 5590 break;
5254 5591 }
5255 5592 break;
5256 5593 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5257 5594 mptsas_handle_qfull(mpt, cmd);
5258 5595 break;
5259 5596 case MPI2_SCSI_STATUS_BUSY:
5260 5597 NDBG31(("scsi_status busy received"));
5261 5598 break;
5262 5599 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5263 5600 NDBG31(("scsi_status reservation conflict received"));
5264 5601 break;
5265 5602 default:
5266 5603 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5267 5604 scsi_status, ioc_status);
5268 5605 mptsas_log(mpt, CE_WARN,
5269 5606 "mptsas_process_intr: invalid scsi status\n");
5270 5607 break;
5271 5608 }
5272 5609 }
5273 5610
5274 5611 static void
5275 5612 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5276 5613 mptsas_cmd_t *cmd)
5277 5614 {
5278 5615 uint8_t task_type;
5279 5616 uint16_t ioc_status;
5280 5617 uint32_t log_info;
5281 5618 uint16_t dev_handle;
5282 5619 struct scsi_pkt *pkt = CMD2PKT(cmd);
5283 5620
5284 5621 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5285 5622 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5286 5623 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5287 5624 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5288 5625
5289 5626 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5290 5627 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5291 5628 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5292 5629 task_type, ioc_status, log_info, dev_handle);
5293 5630 pkt->pkt_reason = CMD_INCOMPLETE;
5294 5631 return;
5295 5632 }
5296 5633
5297 5634 switch (task_type) {
5298 5635 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5299 5636 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5300 5637 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5301 5638 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5302 5639 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5303 5640 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5304 5641 break;
5305 5642 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5306 5643 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5307 5644 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5308 5645 /*
5309 5646 * Check for invalid DevHandle of 0 in case application
5310 5647 * sends bad command. DevHandle of 0 could cause problems.
5311 5648 */
5312 5649 if (dev_handle == 0) {
5313 5650 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5314 5651 " DevHandle of 0.");
5315 5652 } else {
5316 5653 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5317 5654 task_type);
5318 5655 }
5319 5656 break;
5320 5657 default:
5321 5658 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5322 5659 task_type);
5323 5660 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5324 5661 break;
5325 5662 }
5326 5663 }
5327 5664
5328 5665 static void
5329 5666 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5330 5667 {
5331 5668 mptsas_t *mpt = arg->mpt;
5332 5669 uint64_t t = arg->t;
5333 5670 mptsas_cmd_t *cmd;
5334 5671 struct scsi_pkt *pkt;
5335 5672 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5336 5673
5337 5674 mutex_enter(&item->mutex);
5338 5675 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5339 5676 if (!item->doneq) {
5340 5677 cv_wait(&item->cv, &item->mutex);
5341 5678 }
5342 5679 pkt = NULL;
5343 5680 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5344 5681 cmd->cmd_flags |= CFLAG_COMPLETED;
5345 5682 pkt = CMD2PKT(cmd);
5346 5683 }
5347 5684 mutex_exit(&item->mutex);
5348 5685 if (pkt) {
5349 5686 mptsas_pkt_comp(pkt, cmd);
5350 5687 }
5351 5688 mutex_enter(&item->mutex);
5352 5689 }
5353 5690 mutex_exit(&item->mutex);
5354 5691 mutex_enter(&mpt->m_doneq_mutex);
5355 5692 mpt->m_doneq_thread_n--;
5356 5693 cv_broadcast(&mpt->m_doneq_thread_cv);
5357 5694 mutex_exit(&mpt->m_doneq_mutex);
5358 5695 }
5359 5696
5360 5697
5361 5698 /*
5362 5699 * mpt interrupt handler.
5363 5700 */
5364 5701 static uint_t
5365 5702 mptsas_intr(caddr_t arg1, caddr_t arg2)
5366 5703 {
5367 5704 mptsas_t *mpt = (void *)arg1;
5368 5705 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5369 5706 uchar_t did_reply = FALSE;
5370 5707
5371 5708 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5372 5709
5373 5710 mutex_enter(&mpt->m_mutex);
5374 5711
5375 5712 /*
5376 5713 * If interrupts are shared by two channels then check whether this
5377 5714 * interrupt is genuinely for this channel by making sure first the
5378 5715 * chip is in high power state.
5379 5716 */
5380 5717 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5381 5718 (mpt->m_power_level != PM_LEVEL_D0)) {
5382 5719 mutex_exit(&mpt->m_mutex);
5383 5720 return (DDI_INTR_UNCLAIMED);
5384 5721 }
5385 5722
5386 5723 /*
5387 5724 * If polling, interrupt was triggered by some shared interrupt because
5388 5725 * IOC interrupts are disabled during polling, so polling routine will
5389 5726 * handle any replies. Considering this, if polling is happening,
5390 5727 * return with interrupt unclaimed.
5391 5728 */
5392 5729 if (mpt->m_polled_intr) {
5393 5730 mutex_exit(&mpt->m_mutex);
5394 5731 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5395 5732 return (DDI_INTR_UNCLAIMED);
5396 5733 }
5397 5734
5398 5735 /*
5399 5736 * Read the istat register.
5400 5737 */
5401 5738 if ((INTPENDING(mpt)) != 0) {
5402 5739 /*
5403 5740 * read fifo until empty.
5404 5741 */
5405 5742 #ifndef __lock_lint
5406 5743 _NOTE(CONSTCOND)
5407 5744 #endif
5408 5745 while (TRUE) {
5409 5746 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5410 5747 DDI_DMA_SYNC_FORCPU);
5411 5748 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5412 5749 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5413 5750
5414 5751 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5415 5752 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5416 5753 ddi_get32(mpt->m_acc_post_queue_hdl,
5417 5754 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5418 5755 break;
5419 5756 }
5420 5757
5421 5758 /*
5422 5759 * The reply is valid, process it according to its
5423 5760 * type. Also, set a flag for updating the reply index
5424 5761 * after they've all been processed.
5425 5762 */
5426 5763 did_reply = TRUE;
5427 5764
5428 5765 mptsas_process_intr(mpt, reply_desc_union);
5429 5766
5430 5767 /*
5431 5768 * Increment post index and roll over if needed.
5432 5769 */
5433 5770 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5434 5771 mpt->m_post_index = 0;
5435 5772 }
5436 5773 }
5437 5774
5438 5775 /*
5439 5776 * Update the global reply index if at least one reply was
5440 5777 * processed.
5441 5778 */
5442 5779 if (did_reply) {
5443 5780 ddi_put32(mpt->m_datap,
5444 5781 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5445 5782 }
5446 5783 } else {
5447 5784 mutex_exit(&mpt->m_mutex);
5448 5785 return (DDI_INTR_UNCLAIMED);
5449 5786 }
5450 5787 NDBG1(("mptsas_intr complete"));
5451 5788
5452 5789 /*
5453 5790 * If no helper threads are created, process the doneq in ISR. If
5454 5791 * helpers are created, use the doneq length as a metric to measure the
5455 5792 * load on the interrupt CPU. If it is long enough, which indicates the
5456 5793 * load is heavy, then we deliver the IO completions to the helpers.
5457 5794 * This measurement has some limitations, although it is simple and
5458 5795 * straightforward and works well for most of the cases at present.
5459 5796 */
5460 5797 if (!mpt->m_doneq_thread_n ||
5461 5798 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5462 5799 mptsas_doneq_empty(mpt);
5463 5800 } else {
5464 5801 mptsas_deliver_doneq_thread(mpt);
5465 5802 }
5466 5803
5467 5804 /*
5468 5805 * If there are queued cmd, start them now.
5469 5806 */
5470 5807 if (mpt->m_waitq != NULL) {
5471 5808 mptsas_restart_waitq(mpt);
5472 5809 }
5473 5810
5474 5811 mutex_exit(&mpt->m_mutex);
5475 5812 return (DDI_INTR_CLAIMED);
5476 5813 }
5477 5814
5478 5815 static void
5479 5816 mptsas_process_intr(mptsas_t *mpt,
5480 5817 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5481 5818 {
5482 5819 uint8_t reply_type;
5483 5820
|
↓ open down ↓ |
906 lines elided |
↑ open up ↑ |
5484 5821 ASSERT(mutex_owned(&mpt->m_mutex));
5485 5822
5486 5823 /*
5487 5824 * The reply is valid, process it according to its
5488 5825 * type. Also, set a flag for updated the reply index
5489 5826 * after they've all been processed.
5490 5827 */
5491 5828 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5492 5829 &reply_desc_union->Default.ReplyFlags);
5493 5830 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5494 - if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5831 + if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
5832 + reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
5495 5833 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5496 5834 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5497 5835 mptsas_handle_address_reply(mpt, reply_desc_union);
5498 5836 } else {
5499 5837 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5500 5838 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5501 5839 }
5502 5840
5503 5841 /*
5504 5842 * Clear the reply descriptor for re-use and increment
5505 5843 * index.
5506 5844 */
5507 5845 ddi_put64(mpt->m_acc_post_queue_hdl,
5508 5846 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5509 5847 0xFFFFFFFFFFFFFFFF);
5510 5848 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5511 5849 DDI_DMA_SYNC_FORDEV);
5512 5850 }
5513 5851
5514 5852 /*
5515 5853 * handle qfull condition
5516 5854 */
5517 5855 static void
5518 5856 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5519 5857 {
5520 5858 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5521 5859
5522 5860 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5523 5861 (ptgt->m_qfull_retries == 0)) {
5524 5862 /*
5525 5863 * We have exhausted the retries on QFULL, or,
5526 5864 * the target driver has indicated that it
5527 5865 * wants to handle QFULL itself by setting
5528 5866 * qfull-retries capability to 0. In either case
5529 5867 * we want the target driver's QFULL handling
5530 5868 * to kick in. We do this by having pkt_reason
5531 5869 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5532 5870 */
5533 5871 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5534 5872 } else {
5535 5873 if (ptgt->m_reset_delay == 0) {
5536 5874 ptgt->m_t_throttle =
5537 5875 max((ptgt->m_t_ncmds - 2), 0);
5538 5876 }
5539 5877
5540 5878 cmd->cmd_pkt_flags |= FLAG_HEAD;
5541 5879 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5542 5880 cmd->cmd_flags |= CFLAG_RETRY;
5543 5881
5544 5882 (void) mptsas_accept_pkt(mpt, cmd);
5545 5883
5546 5884 /*
5547 5885 * when target gives queue full status with no commands
5548 5886 * outstanding (m_t_ncmds == 0), throttle is set to 0
5549 5887 * (HOLD_THROTTLE), and the queue full handling start
5550 5888 * (see psarc/1994/313); if there are commands outstanding,
5551 5889 * throttle is set to (m_t_ncmds - 2)
5552 5890 */
5553 5891 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5554 5892 /*
5555 5893 * By setting throttle to QFULL_THROTTLE, we
5556 5894 * avoid submitting new commands and in
5557 5895 * mptsas_restart_cmd find out slots which need
5558 5896 * their throttles to be cleared.
5559 5897 */
5560 5898 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5561 5899 if (mpt->m_restart_cmd_timeid == 0) {
5562 5900 mpt->m_restart_cmd_timeid =
5563 5901 timeout(mptsas_restart_cmd, mpt,
5564 5902 ptgt->m_qfull_retry_interval);
5565 5903 }
5566 5904 }
5567 5905 }
5568 5906 }
5569 5907
5570 5908 mptsas_phymask_t
5571 5909 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5572 5910 {
5573 5911 mptsas_phymask_t phy_mask = 0;
5574 5912 uint8_t i = 0;
5575 5913
5576 5914 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5577 5915
5578 5916 ASSERT(mutex_owned(&mpt->m_mutex));
5579 5917
5580 5918 /*
5581 5919 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5582 5920 */
5583 5921 if (physport == 0xFF) {
5584 5922 return (0);
5585 5923 }
5586 5924
5587 5925 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5588 5926 if (mpt->m_phy_info[i].attached_devhdl &&
5589 5927 (mpt->m_phy_info[i].phy_mask != 0) &&
5590 5928 (mpt->m_phy_info[i].port_num == physport)) {
5591 5929 phy_mask = mpt->m_phy_info[i].phy_mask;
5592 5930 break;
5593 5931 }
5594 5932 }
5595 5933 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5596 5934 mpt->m_instance, physport, phy_mask));
5597 5935 return (phy_mask);
5598 5936 }
5599 5937
5600 5938 /*
5601 5939 * mpt free device handle after device gone, by use of passthrough
5602 5940 */
5603 5941 static int
5604 5942 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5605 5943 {
5606 5944 Mpi2SasIoUnitControlRequest_t req;
5607 5945 Mpi2SasIoUnitControlReply_t rep;
5608 5946 int ret;
5609 5947
5610 5948 ASSERT(mutex_owned(&mpt->m_mutex));
5611 5949
5612 5950 /*
5613 5951 * Need to compose a SAS IO Unit Control request message
5614 5952 * and call mptsas_do_passthru() function
5615 5953 */
5616 5954 bzero(&req, sizeof (req));
5617 5955 bzero(&rep, sizeof (rep));
5618 5956
5619 5957 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5620 5958 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5621 5959 req.DevHandle = LE_16(devhdl);
5622 5960
5623 5961 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5624 5962 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5625 5963 if (ret != 0) {
5626 5964 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5627 5965 "Control error %d", ret);
5628 5966 return (DDI_FAILURE);
5629 5967 }
5630 5968
5631 5969 /* do passthrough success, check the ioc status */
5632 5970 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5633 5971 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5634 5972 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5635 5973 return (DDI_FAILURE);
5636 5974 }
5637 5975
5638 5976 return (DDI_SUCCESS);
5639 5977 }
5640 5978
5641 5979 static void
5642 5980 mptsas_update_phymask(mptsas_t *mpt)
5643 5981 {
5644 5982 mptsas_phymask_t mask = 0, phy_mask;
5645 5983 char *phy_mask_name;
5646 5984 uint8_t current_port;
5647 5985 int i, j;
5648 5986
5649 5987 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5650 5988
5651 5989 ASSERT(mutex_owned(&mpt->m_mutex));
5652 5990
5653 5991 (void) mptsas_get_sas_io_unit_page(mpt);
5654 5992
5655 5993 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5656 5994
5657 5995 for (i = 0; i < mpt->m_num_phys; i++) {
5658 5996 phy_mask = 0x00;
5659 5997
5660 5998 if (mpt->m_phy_info[i].attached_devhdl == 0)
5661 5999 continue;
5662 6000
5663 6001 bzero(phy_mask_name, sizeof (phy_mask_name));
5664 6002
5665 6003 current_port = mpt->m_phy_info[i].port_num;
5666 6004
5667 6005 if ((mask & (1 << i)) != 0)
5668 6006 continue;
5669 6007
5670 6008 for (j = 0; j < mpt->m_num_phys; j++) {
5671 6009 if (mpt->m_phy_info[j].attached_devhdl &&
5672 6010 (mpt->m_phy_info[j].port_num == current_port)) {
5673 6011 phy_mask |= (1 << j);
5674 6012 }
5675 6013 }
5676 6014 mask = mask | phy_mask;
5677 6015
5678 6016 for (j = 0; j < mpt->m_num_phys; j++) {
5679 6017 if ((phy_mask >> j) & 0x01) {
5680 6018 mpt->m_phy_info[j].phy_mask = phy_mask;
5681 6019 }
5682 6020 }
5683 6021
5684 6022 (void) sprintf(phy_mask_name, "%x", phy_mask);
5685 6023
5686 6024 mutex_exit(&mpt->m_mutex);
5687 6025 /*
5688 6026 * register a iport, if the port has already been existed
5689 6027 * SCSA will do nothing and just return.
5690 6028 */
5691 6029 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5692 6030 mutex_enter(&mpt->m_mutex);
5693 6031 }
5694 6032 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5695 6033 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5696 6034 }
5697 6035
5698 6036 /*
5699 6037 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5700 6038 * 1. Directly attched Device Added/Removed.
5701 6039 * 2. Expander Device Added/Removed.
5702 6040 * 3. Indirectly Attached Device Added/Expander.
5703 6041 * 4. LUNs of a existing device status change.
5704 6042 * 5. RAID volume created/deleted.
5705 6043 * 6. Member of RAID volume is released because of RAID deletion.
5706 6044 * 7. Physical disks are removed because of RAID creation.
5707 6045 */
5708 6046 static void
5709 6047 mptsas_handle_dr(void *args) {
5710 6048 mptsas_topo_change_list_t *topo_node = NULL;
5711 6049 mptsas_topo_change_list_t *save_node = NULL;
5712 6050 mptsas_t *mpt;
5713 6051 dev_info_t *parent = NULL;
5714 6052 mptsas_phymask_t phymask = 0;
5715 6053 char *phy_mask_name;
5716 6054 uint8_t flags = 0, physport = 0xff;
5717 6055 uint8_t port_update = 0;
5718 6056 uint_t event;
5719 6057
5720 6058 topo_node = (mptsas_topo_change_list_t *)args;
5721 6059
5722 6060 mpt = topo_node->mpt;
5723 6061 event = topo_node->event;
5724 6062 flags = topo_node->flags;
5725 6063
5726 6064 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5727 6065
5728 6066 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5729 6067
5730 6068 switch (event) {
5731 6069 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5732 6070 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5733 6071 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5734 6072 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5735 6073 /*
5736 6074 * Direct attached or expander attached device added
5737 6075 * into system or a Phys Disk that is being unhidden.
5738 6076 */
5739 6077 port_update = 1;
5740 6078 }
5741 6079 break;
5742 6080 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5743 6081 /*
5744 6082 * New expander added into system, it must be the head
5745 6083 * of topo_change_list_t
5746 6084 */
5747 6085 port_update = 1;
5748 6086 break;
5749 6087 default:
5750 6088 port_update = 0;
5751 6089 break;
5752 6090 }
5753 6091 /*
5754 6092 * All cases port_update == 1 may cause initiator port form change
5755 6093 */
5756 6094 mutex_enter(&mpt->m_mutex);
5757 6095 if (mpt->m_port_chng && port_update) {
5758 6096 /*
5759 6097 * mpt->m_port_chng flag indicates some PHYs of initiator
5760 6098 * port have changed to online. So when expander added or
5761 6099 * directly attached device online event come, we force to
5762 6100 * update port information by issueing SAS IO Unit Page and
5763 6101 * update PHYMASKs.
5764 6102 */
5765 6103 (void) mptsas_update_phymask(mpt);
5766 6104 mpt->m_port_chng = 0;
5767 6105
5768 6106 }
5769 6107 mutex_exit(&mpt->m_mutex);
5770 6108 while (topo_node) {
5771 6109 phymask = 0;
5772 6110 if (parent == NULL) {
5773 6111 physport = topo_node->un.physport;
5774 6112 event = topo_node->event;
5775 6113 flags = topo_node->flags;
5776 6114 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5777 6115 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5778 6116 /*
5779 6117 * For all offline events, phymask is known
5780 6118 */
5781 6119 phymask = topo_node->un.phymask;
5782 6120 goto find_parent;
5783 6121 }
5784 6122 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5785 6123 goto handle_topo_change;
5786 6124 }
5787 6125 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5788 6126 phymask = topo_node->un.phymask;
5789 6127 goto find_parent;
5790 6128 }
5791 6129
5792 6130 if ((flags ==
5793 6131 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5794 6132 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5795 6133 /*
5796 6134 * There is no any field in IR_CONFIG_CHANGE
5797 6135 * event indicate physport/phynum, let's get
5798 6136 * parent after SAS Device Page0 request.
5799 6137 */
5800 6138 goto handle_topo_change;
5801 6139 }
5802 6140
5803 6141 mutex_enter(&mpt->m_mutex);
5804 6142 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5805 6143 /*
5806 6144 * If the direct attached device added or a
5807 6145 * phys disk is being unhidden, argument
5808 6146 * physport actually is PHY#, so we have to get
5809 6147 * phymask according PHY#.
5810 6148 */
5811 6149 physport = mpt->m_phy_info[physport].port_num;
5812 6150 }
5813 6151
5814 6152 /*
5815 6153 * Translate physport to phymask so that we can search
5816 6154 * parent dip.
5817 6155 */
5818 6156 phymask = mptsas_physport_to_phymask(mpt,
5819 6157 physport);
5820 6158 mutex_exit(&mpt->m_mutex);
5821 6159
5822 6160 find_parent:
5823 6161 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5824 6162 /*
5825 6163 * For RAID topology change node, write the iport name
5826 6164 * as v0.
5827 6165 */
5828 6166 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5829 6167 (void) sprintf(phy_mask_name, "v0");
5830 6168 } else {
5831 6169 /*
5832 6170 * phymask can bo 0 if the drive has been
5833 6171 * pulled by the time an add event is
5834 6172 * processed. If phymask is 0, just skip this
5835 6173 * event and continue.
5836 6174 */
5837 6175 if (phymask == 0) {
5838 6176 mutex_enter(&mpt->m_mutex);
5839 6177 save_node = topo_node;
5840 6178 topo_node = topo_node->next;
5841 6179 ASSERT(save_node);
5842 6180 kmem_free(save_node,
5843 6181 sizeof (mptsas_topo_change_list_t));
5844 6182 mutex_exit(&mpt->m_mutex);
5845 6183
5846 6184 parent = NULL;
5847 6185 continue;
5848 6186 }
5849 6187 (void) sprintf(phy_mask_name, "%x", phymask);
5850 6188 }
5851 6189 parent = scsi_hba_iport_find(mpt->m_dip,
5852 6190 phy_mask_name);
5853 6191 if (parent == NULL) {
5854 6192 mptsas_log(mpt, CE_WARN, "Failed to find an "
5855 6193 "iport, should not happen!");
5856 6194 goto out;
5857 6195 }
5858 6196
5859 6197 }
5860 6198 ASSERT(parent);
5861 6199 handle_topo_change:
5862 6200
5863 6201 mutex_enter(&mpt->m_mutex);
5864 6202 /*
5865 6203 * If HBA is being reset, don't perform operations depending
5866 6204 * on the IOC. We must free the topo list, however.
5867 6205 */
5868 6206 if (!mpt->m_in_reset)
5869 6207 mptsas_handle_topo_change(topo_node, parent);
5870 6208 else
5871 6209 NDBG20(("skipping topo change received during reset"));
5872 6210 save_node = topo_node;
5873 6211 topo_node = topo_node->next;
5874 6212 ASSERT(save_node);
5875 6213 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5876 6214 mutex_exit(&mpt->m_mutex);
5877 6215
5878 6216 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5879 6217 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5880 6218 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5881 6219 /*
5882 6220 * If direct attached device associated, make sure
5883 6221 * reset the parent before start the next one. But
5884 6222 * all devices associated with expander shares the
5885 6223 * parent. Also, reset parent if this is for RAID.
5886 6224 */
5887 6225 parent = NULL;
5888 6226 }
5889 6227 }
5890 6228 out:
5891 6229 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5892 6230 }
5893 6231
5894 6232 static void
5895 6233 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5896 6234 dev_info_t *parent)
5897 6235 {
5898 6236 mptsas_target_t *ptgt = NULL;
5899 6237 mptsas_smp_t *psmp = NULL;
5900 6238 mptsas_t *mpt = (void *)topo_node->mpt;
5901 6239 uint16_t devhdl;
5902 6240 uint16_t attached_devhdl;
5903 6241 uint64_t sas_wwn = 0;
5904 6242 int rval = 0;
5905 6243 uint32_t page_address;
5906 6244 uint8_t phy, flags;
5907 6245 char *addr = NULL;
5908 6246 dev_info_t *lundip;
5909 6247 int circ = 0, circ1 = 0;
5910 6248 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5911 6249
5912 6250 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5913 6251
5914 6252 ASSERT(mutex_owned(&mpt->m_mutex));
5915 6253
5916 6254 switch (topo_node->event) {
5917 6255 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5918 6256 {
5919 6257 char *phy_mask_name;
5920 6258 mptsas_phymask_t phymask = 0;
5921 6259
5922 6260 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5923 6261 /*
5924 6262 * Get latest RAID info.
5925 6263 */
5926 6264 (void) mptsas_get_raid_info(mpt);
5927 6265 ptgt = refhash_linear_search(mpt->m_targets,
5928 6266 mptsas_target_eval_devhdl, &topo_node->devhdl);
5929 6267 if (ptgt == NULL)
5930 6268 break;
5931 6269 } else {
5932 6270 ptgt = (void *)topo_node->object;
5933 6271 }
5934 6272
5935 6273 if (ptgt == NULL) {
5936 6274 /*
5937 6275 * If a Phys Disk was deleted, RAID info needs to be
5938 6276 * updated to reflect the new topology.
5939 6277 */
5940 6278 (void) mptsas_get_raid_info(mpt);
5941 6279
5942 6280 /*
5943 6281 * Get sas device page 0 by DevHandle to make sure if
5944 6282 * SSP/SATA end device exist.
5945 6283 */
5946 6284 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5947 6285 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5948 6286 topo_node->devhdl;
5949 6287
5950 6288 rval = mptsas_get_target_device_info(mpt, page_address,
5951 6289 &devhdl, &ptgt);
5952 6290 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5953 6291 mptsas_log(mpt, CE_NOTE,
5954 6292 "mptsas_handle_topo_change: target %d is "
5955 6293 "not a SAS/SATA device. \n",
5956 6294 topo_node->devhdl);
5957 6295 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5958 6296 mptsas_log(mpt, CE_NOTE,
5959 6297 "mptsas_handle_topo_change: could not "
5960 6298 "allocate memory. \n");
5961 6299 }
5962 6300 /*
5963 6301 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5964 6302 * else to do, just leave.
5965 6303 */
5966 6304 if (rval != DEV_INFO_SUCCESS) {
5967 6305 return;
5968 6306 }
5969 6307 }
5970 6308
5971 6309 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5972 6310
5973 6311 mutex_exit(&mpt->m_mutex);
5974 6312 flags = topo_node->flags;
5975 6313
5976 6314 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5977 6315 phymask = ptgt->m_addr.mta_phymask;
5978 6316 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5979 6317 (void) sprintf(phy_mask_name, "%x", phymask);
5980 6318 parent = scsi_hba_iport_find(mpt->m_dip,
5981 6319 phy_mask_name);
5982 6320 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5983 6321 if (parent == NULL) {
5984 6322 mptsas_log(mpt, CE_WARN, "Failed to find a "
5985 6323 "iport for PD, should not happen!");
5986 6324 mutex_enter(&mpt->m_mutex);
5987 6325 break;
5988 6326 }
5989 6327 }
5990 6328
5991 6329 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5992 6330 ndi_devi_enter(parent, &circ1);
5993 6331 (void) mptsas_config_raid(parent, topo_node->devhdl,
5994 6332 &lundip);
5995 6333 ndi_devi_exit(parent, circ1);
5996 6334 } else {
5997 6335 /*
5998 6336 * hold nexus for bus configure
5999 6337 */
6000 6338 ndi_devi_enter(scsi_vhci_dip, &circ);
6001 6339 ndi_devi_enter(parent, &circ1);
6002 6340 rval = mptsas_config_target(parent, ptgt);
6003 6341 /*
6004 6342 * release nexus for bus configure
6005 6343 */
6006 6344 ndi_devi_exit(parent, circ1);
6007 6345 ndi_devi_exit(scsi_vhci_dip, circ);
6008 6346
6009 6347 /*
6010 6348 * Add parent's props for SMHBA support
6011 6349 */
6012 6350 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6013 6351 bzero(attached_wwnstr,
6014 6352 sizeof (attached_wwnstr));
6015 6353 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6016 6354 ptgt->m_addr.mta_wwn);
6017 6355 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6018 6356 parent,
6019 6357 SCSI_ADDR_PROP_ATTACHED_PORT,
6020 6358 attached_wwnstr)
6021 6359 != DDI_PROP_SUCCESS) {
6022 6360 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6023 6361 parent,
6024 6362 SCSI_ADDR_PROP_ATTACHED_PORT);
6025 6363 mptsas_log(mpt, CE_WARN, "Failed to"
6026 6364 "attached-port props");
6027 6365 return;
6028 6366 }
6029 6367 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6030 6368 MPTSAS_NUM_PHYS, 1) !=
6031 6369 DDI_PROP_SUCCESS) {
6032 6370 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6033 6371 parent, MPTSAS_NUM_PHYS);
6034 6372 mptsas_log(mpt, CE_WARN, "Failed to"
6035 6373 " create num-phys props");
6036 6374 return;
6037 6375 }
6038 6376
6039 6377 /*
6040 6378 * Update PHY info for smhba
6041 6379 */
6042 6380 mutex_enter(&mpt->m_mutex);
6043 6381 if (mptsas_smhba_phy_init(mpt)) {
6044 6382 mutex_exit(&mpt->m_mutex);
6045 6383 mptsas_log(mpt, CE_WARN, "mptsas phy"
6046 6384 " update failed");
6047 6385 return;
6048 6386 }
6049 6387 mutex_exit(&mpt->m_mutex);
6050 6388
6051 6389 /*
6052 6390 * topo_node->un.physport is really the PHY#
6053 6391 * for direct attached devices
6054 6392 */
6055 6393 mptsas_smhba_set_one_phy_props(mpt, parent,
6056 6394 topo_node->un.physport, &attached_devhdl);
6057 6395
6058 6396 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6059 6397 MPTSAS_VIRTUAL_PORT, 0) !=
6060 6398 DDI_PROP_SUCCESS) {
6061 6399 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6062 6400 parent, MPTSAS_VIRTUAL_PORT);
6063 6401 mptsas_log(mpt, CE_WARN,
6064 6402 "mptsas virtual-port"
6065 6403 "port prop update failed");
6066 6404 return;
6067 6405 }
6068 6406 }
6069 6407 }
6070 6408 mutex_enter(&mpt->m_mutex);
6071 6409
6072 6410 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6073 6411 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6074 6412 ptgt->m_addr.mta_phymask));
6075 6413 break;
6076 6414 }
6077 6415 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6078 6416 {
6079 6417 devhdl = topo_node->devhdl;
6080 6418 ptgt = refhash_linear_search(mpt->m_targets,
6081 6419 mptsas_target_eval_devhdl, &devhdl);
6082 6420 if (ptgt == NULL)
6083 6421 break;
6084 6422
6085 6423 sas_wwn = ptgt->m_addr.mta_wwn;
6086 6424 phy = ptgt->m_phynum;
6087 6425
6088 6426 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6089 6427
6090 6428 if (sas_wwn) {
6091 6429 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6092 6430 } else {
6093 6431 (void) sprintf(addr, "p%x", phy);
6094 6432 }
6095 6433 ASSERT(ptgt->m_devhdl == devhdl);
6096 6434
6097 6435 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6098 6436 (topo_node->flags ==
6099 6437 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6100 6438 /*
6101 6439 * Get latest RAID info if RAID volume status changes
6102 6440 * or Phys Disk status changes
6103 6441 */
6104 6442 (void) mptsas_get_raid_info(mpt);
6105 6443 }
6106 6444 /*
6107 6445 * Abort all outstanding command on the device
6108 6446 */
6109 6447 rval = mptsas_do_scsi_reset(mpt, devhdl);
6110 6448 if (rval) {
6111 6449 NDBG20(("mptsas%d handle_topo_change to reset target "
6112 6450 "before offline devhdl:%x, phymask:%x, rval:%x",
6113 6451 mpt->m_instance, ptgt->m_devhdl,
6114 6452 ptgt->m_addr.mta_phymask, rval));
6115 6453 }
6116 6454
6117 6455 mutex_exit(&mpt->m_mutex);
6118 6456
6119 6457 ndi_devi_enter(scsi_vhci_dip, &circ);
6120 6458 ndi_devi_enter(parent, &circ1);
6121 6459 rval = mptsas_offline_target(parent, addr);
6122 6460 ndi_devi_exit(parent, circ1);
6123 6461 ndi_devi_exit(scsi_vhci_dip, circ);
6124 6462 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6125 6463 "phymask:%x, rval:%x", mpt->m_instance,
6126 6464 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6127 6465
6128 6466 kmem_free(addr, SCSI_MAXNAMELEN);
6129 6467
6130 6468 /*
6131 6469 * Clear parent's props for SMHBA support
6132 6470 */
6133 6471 flags = topo_node->flags;
6134 6472 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6135 6473 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6136 6474 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6137 6475 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6138 6476 DDI_PROP_SUCCESS) {
6139 6477 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6140 6478 SCSI_ADDR_PROP_ATTACHED_PORT);
6141 6479 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6142 6480 "prop update failed");
6143 6481 break;
6144 6482 }
6145 6483 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6146 6484 MPTSAS_NUM_PHYS, 0) !=
6147 6485 DDI_PROP_SUCCESS) {
6148 6486 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6149 6487 MPTSAS_NUM_PHYS);
6150 6488 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6151 6489 "prop update failed");
6152 6490 break;
6153 6491 }
6154 6492 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6155 6493 MPTSAS_VIRTUAL_PORT, 1) !=
6156 6494 DDI_PROP_SUCCESS) {
6157 6495 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6158 6496 MPTSAS_VIRTUAL_PORT);
6159 6497 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6160 6498 "prop update failed");
6161 6499 break;
6162 6500 }
6163 6501 }
6164 6502
6165 6503 mutex_enter(&mpt->m_mutex);
6166 6504 ptgt->m_led_status = 0;
6167 6505 (void) mptsas_flush_led_status(mpt, ptgt);
6168 6506 if (rval == DDI_SUCCESS) {
6169 6507 refhash_remove(mpt->m_targets, ptgt);
6170 6508 ptgt = NULL;
6171 6509 } else {
6172 6510 /*
6173 6511 * clean DR_INTRANSITION flag to allow I/O down to
6174 6512 * PHCI driver since failover finished.
6175 6513 * Invalidate the devhdl
6176 6514 */
6177 6515 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6178 6516 ptgt->m_tgt_unconfigured = 0;
6179 6517 mutex_enter(&mpt->m_tx_waitq_mutex);
6180 6518 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6181 6519 mutex_exit(&mpt->m_tx_waitq_mutex);
6182 6520 }
6183 6521
6184 6522 /*
6185 6523 * Send SAS IO Unit Control to free the dev handle
6186 6524 */
6187 6525 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6188 6526 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6189 6527 rval = mptsas_free_devhdl(mpt, devhdl);
6190 6528
6191 6529 NDBG20(("mptsas%d handle_topo_change to remove "
6192 6530 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6193 6531 rval));
6194 6532 }
6195 6533
6196 6534 break;
6197 6535 }
6198 6536 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6199 6537 {
6200 6538 devhdl = topo_node->devhdl;
6201 6539 /*
6202 6540 * If this is the remove handle event, do a reset first.
6203 6541 */
6204 6542 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6205 6543 rval = mptsas_do_scsi_reset(mpt, devhdl);
6206 6544 if (rval) {
6207 6545 NDBG20(("mpt%d reset target before remove "
6208 6546 "devhdl:%x, rval:%x", mpt->m_instance,
6209 6547 devhdl, rval));
6210 6548 }
6211 6549 }
6212 6550
6213 6551 /*
6214 6552 * Send SAS IO Unit Control to free the dev handle
6215 6553 */
6216 6554 rval = mptsas_free_devhdl(mpt, devhdl);
6217 6555 NDBG20(("mptsas%d handle_topo_change to remove "
6218 6556 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6219 6557 rval));
6220 6558 break;
6221 6559 }
6222 6560 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6223 6561 {
6224 6562 mptsas_smp_t smp;
6225 6563 dev_info_t *smpdip;
6226 6564
6227 6565 devhdl = topo_node->devhdl;
6228 6566
6229 6567 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6230 6568 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6231 6569 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6232 6570 if (rval != DDI_SUCCESS) {
6233 6571 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6234 6572 "handle %x", devhdl);
6235 6573 return;
6236 6574 }
6237 6575
6238 6576 psmp = mptsas_smp_alloc(mpt, &smp);
6239 6577 if (psmp == NULL) {
6240 6578 return;
6241 6579 }
6242 6580
6243 6581 mutex_exit(&mpt->m_mutex);
6244 6582 ndi_devi_enter(parent, &circ1);
6245 6583 (void) mptsas_online_smp(parent, psmp, &smpdip);
6246 6584 ndi_devi_exit(parent, circ1);
6247 6585
6248 6586 mutex_enter(&mpt->m_mutex);
6249 6587 break;
6250 6588 }
6251 6589 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6252 6590 {
6253 6591 devhdl = topo_node->devhdl;
6254 6592 uint32_t dev_info;
6255 6593
6256 6594 psmp = refhash_linear_search(mpt->m_smp_targets,
6257 6595 mptsas_smp_eval_devhdl, &devhdl);
6258 6596 if (psmp == NULL)
6259 6597 break;
6260 6598 /*
6261 6599 * The mptsas_smp_t data is released only if the dip is offlined
6262 6600 * successfully.
6263 6601 */
6264 6602 mutex_exit(&mpt->m_mutex);
6265 6603
6266 6604 ndi_devi_enter(parent, &circ1);
6267 6605 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6268 6606 ndi_devi_exit(parent, circ1);
6269 6607
6270 6608 dev_info = psmp->m_deviceinfo;
6271 6609 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6272 6610 DEVINFO_DIRECT_ATTACHED) {
6273 6611 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6274 6612 MPTSAS_VIRTUAL_PORT, 1) !=
6275 6613 DDI_PROP_SUCCESS) {
6276 6614 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6277 6615 MPTSAS_VIRTUAL_PORT);
6278 6616 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6279 6617 "prop update failed");
6280 6618 return;
6281 6619 }
6282 6620 /*
6283 6621 * Check whether the smp connected to the iport,
6284 6622 */
6285 6623 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6286 6624 MPTSAS_NUM_PHYS, 0) !=
6287 6625 DDI_PROP_SUCCESS) {
6288 6626 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6289 6627 MPTSAS_NUM_PHYS);
6290 6628 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6291 6629 "prop update failed");
6292 6630 return;
6293 6631 }
6294 6632 /*
6295 6633 * Clear parent's attached-port props
6296 6634 */
6297 6635 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6298 6636 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6299 6637 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6300 6638 DDI_PROP_SUCCESS) {
6301 6639 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6302 6640 SCSI_ADDR_PROP_ATTACHED_PORT);
6303 6641 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6304 6642 "prop update failed");
6305 6643 return;
6306 6644 }
6307 6645 }
6308 6646
6309 6647 mutex_enter(&mpt->m_mutex);
6310 6648 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6311 6649 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6312 6650 if (rval == DDI_SUCCESS) {
6313 6651 refhash_remove(mpt->m_smp_targets, psmp);
6314 6652 } else {
6315 6653 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6316 6654 }
6317 6655
6318 6656 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6319 6657
6320 6658 break;
6321 6659 }
6322 6660 default:
6323 6661 return;
6324 6662 }
6325 6663 }
6326 6664
6327 6665 /*
6328 6666 * Record the event if its type is enabled in mpt instance by ioctl.
6329 6667 */
6330 6668 static void
6331 6669 mptsas_record_event(void *args)
6332 6670 {
6333 6671 m_replyh_arg_t *replyh_arg;
6334 6672 pMpi2EventNotificationReply_t eventreply;
6335 6673 uint32_t event, rfm;
6336 6674 mptsas_t *mpt;
6337 6675 int i, j;
6338 6676 uint16_t event_data_len;
6339 6677 boolean_t sendAEN = FALSE;
6340 6678
6341 6679 replyh_arg = (m_replyh_arg_t *)args;
6342 6680 rfm = replyh_arg->rfm;
6343 6681 mpt = replyh_arg->mpt;
6344 6682
6345 6683 eventreply = (pMpi2EventNotificationReply_t)
6346 6684 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6347 6685 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6348 6686
6349 6687
6350 6688 /*
6351 6689 * Generate a system event to let anyone who cares know that a
6352 6690 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6353 6691 * event mask is set to.
6354 6692 */
6355 6693 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6356 6694 sendAEN = TRUE;
6357 6695 }
6358 6696
6359 6697 /*
6360 6698 * Record the event only if it is not masked. Determine which dword
6361 6699 * and bit of event mask to test.
6362 6700 */
6363 6701 i = (uint8_t)(event / 32);
6364 6702 j = (uint8_t)(event % 32);
6365 6703 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6366 6704 i = mpt->m_event_index;
6367 6705 mpt->m_events[i].Type = event;
6368 6706 mpt->m_events[i].Number = ++mpt->m_event_number;
6369 6707 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6370 6708 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6371 6709 &eventreply->EventDataLength);
6372 6710
6373 6711 if (event_data_len > 0) {
6374 6712 /*
6375 6713 * Limit data to size in m_event entry
6376 6714 */
6377 6715 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6378 6716 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6379 6717 }
6380 6718 for (j = 0; j < event_data_len; j++) {
6381 6719 mpt->m_events[i].Data[j] =
6382 6720 ddi_get32(mpt->m_acc_reply_frame_hdl,
6383 6721 &(eventreply->EventData[j]));
6384 6722 }
6385 6723
6386 6724 /*
6387 6725 * check for index wrap-around
6388 6726 */
6389 6727 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6390 6728 i = 0;
6391 6729 }
6392 6730 mpt->m_event_index = (uint8_t)i;
6393 6731
6394 6732 /*
6395 6733 * Set flag to send the event.
6396 6734 */
6397 6735 sendAEN = TRUE;
6398 6736 }
6399 6737 }
6400 6738
6401 6739 /*
6402 6740 * Generate a system event if flag is set to let anyone who cares know
6403 6741 * that an event has occurred.
6404 6742 */
6405 6743 if (sendAEN) {
6406 6744 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6407 6745 "SAS", NULL, NULL, DDI_NOSLEEP);
6408 6746 }
6409 6747 }
6410 6748
6411 6749 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6412 6750 /*
6413 6751 * handle sync events from ioc in interrupt
6414 6752 * return value:
6415 6753 * DDI_SUCCESS: The event is handled by this func
6416 6754 * DDI_FAILURE: Event is not handled
6417 6755 */
6418 6756 static int
6419 6757 mptsas_handle_event_sync(void *args)
6420 6758 {
6421 6759 m_replyh_arg_t *replyh_arg;
6422 6760 pMpi2EventNotificationReply_t eventreply;
6423 6761 uint32_t event, rfm;
6424 6762 mptsas_t *mpt;
6425 6763 uint_t iocstatus;
6426 6764
6427 6765 replyh_arg = (m_replyh_arg_t *)args;
6428 6766 rfm = replyh_arg->rfm;
6429 6767 mpt = replyh_arg->mpt;
6430 6768
6431 6769 ASSERT(mutex_owned(&mpt->m_mutex));
6432 6770
6433 6771 eventreply = (pMpi2EventNotificationReply_t)
6434 6772 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6435 6773 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6436 6774
6437 6775 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6438 6776 &eventreply->IOCStatus)) {
6439 6777 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6440 6778 mptsas_log(mpt, CE_WARN,
6441 6779 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6442 6780 "IOCLogInfo=0x%x", iocstatus,
6443 6781 ddi_get32(mpt->m_acc_reply_frame_hdl,
6444 6782 &eventreply->IOCLogInfo));
6445 6783 } else {
6446 6784 mptsas_log(mpt, CE_WARN,
6447 6785 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6448 6786 "IOCLogInfo=0x%x", iocstatus,
6449 6787 ddi_get32(mpt->m_acc_reply_frame_hdl,
6450 6788 &eventreply->IOCLogInfo));
6451 6789 }
6452 6790 }
6453 6791
6454 6792 /*
6455 6793 * figure out what kind of event we got and handle accordingly
6456 6794 */
6457 6795 switch (event) {
6458 6796 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6459 6797 {
6460 6798 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6461 6799 uint8_t num_entries, expstatus, phy;
6462 6800 uint8_t phystatus, physport, state, i;
6463 6801 uint8_t start_phy_num, link_rate;
6464 6802 uint16_t dev_handle, reason_code;
6465 6803 uint16_t enc_handle, expd_handle;
6466 6804 char string[80], curr[80], prev[80];
6467 6805 mptsas_topo_change_list_t *topo_head = NULL;
6468 6806 mptsas_topo_change_list_t *topo_tail = NULL;
6469 6807 mptsas_topo_change_list_t *topo_node = NULL;
6470 6808 mptsas_target_t *ptgt;
6471 6809 mptsas_smp_t *psmp;
6472 6810 uint8_t flags = 0, exp_flag;
6473 6811 smhba_info_t *pSmhba = NULL;
6474 6812
6475 6813 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6476 6814
6477 6815 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6478 6816 eventreply->EventData;
6479 6817
6480 6818 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6481 6819 &sas_topo_change_list->EnclosureHandle);
6482 6820 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6483 6821 &sas_topo_change_list->ExpanderDevHandle);
6484 6822 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6485 6823 &sas_topo_change_list->NumEntries);
6486 6824 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6487 6825 &sas_topo_change_list->StartPhyNum);
6488 6826 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6489 6827 &sas_topo_change_list->ExpStatus);
6490 6828 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6491 6829 &sas_topo_change_list->PhysicalPort);
6492 6830
6493 6831 string[0] = 0;
6494 6832 if (expd_handle) {
6495 6833 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6496 6834 switch (expstatus) {
6497 6835 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6498 6836 (void) sprintf(string, " added");
6499 6837 /*
6500 6838 * New expander device added
6501 6839 */
6502 6840 mpt->m_port_chng = 1;
6503 6841 topo_node = kmem_zalloc(
6504 6842 sizeof (mptsas_topo_change_list_t),
6505 6843 KM_SLEEP);
6506 6844 topo_node->mpt = mpt;
6507 6845 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6508 6846 topo_node->un.physport = physport;
6509 6847 topo_node->devhdl = expd_handle;
6510 6848 topo_node->flags = flags;
6511 6849 topo_node->object = NULL;
6512 6850 if (topo_head == NULL) {
6513 6851 topo_head = topo_tail = topo_node;
6514 6852 } else {
6515 6853 topo_tail->next = topo_node;
6516 6854 topo_tail = topo_node;
6517 6855 }
6518 6856 break;
6519 6857 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6520 6858 (void) sprintf(string, " not responding, "
6521 6859 "removed");
6522 6860 psmp = refhash_linear_search(mpt->m_smp_targets,
6523 6861 mptsas_smp_eval_devhdl, &expd_handle);
6524 6862 if (psmp == NULL)
6525 6863 break;
6526 6864
6527 6865 topo_node = kmem_zalloc(
6528 6866 sizeof (mptsas_topo_change_list_t),
6529 6867 KM_SLEEP);
6530 6868 topo_node->mpt = mpt;
6531 6869 topo_node->un.phymask =
6532 6870 psmp->m_addr.mta_phymask;
6533 6871 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6534 6872 topo_node->devhdl = expd_handle;
6535 6873 topo_node->flags = flags;
6536 6874 topo_node->object = NULL;
6537 6875 if (topo_head == NULL) {
6538 6876 topo_head = topo_tail = topo_node;
6539 6877 } else {
6540 6878 topo_tail->next = topo_node;
6541 6879 topo_tail = topo_node;
6542 6880 }
6543 6881 break;
6544 6882 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6545 6883 break;
6546 6884 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6547 6885 (void) sprintf(string, " not responding, "
6548 6886 "delaying removal");
6549 6887 break;
6550 6888 default:
6551 6889 break;
6552 6890 }
6553 6891 } else {
6554 6892 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6555 6893 }
6556 6894
6557 6895 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6558 6896 enc_handle, expd_handle, string));
6559 6897 for (i = 0; i < num_entries; i++) {
6560 6898 phy = i + start_phy_num;
6561 6899 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6562 6900 &sas_topo_change_list->PHY[i].PhyStatus);
6563 6901 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6564 6902 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6565 6903 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6566 6904 /*
6567 6905 * Filter out processing of Phy Vacant Status unless
6568 6906 * the reason code is "Not Responding". Process all
6569 6907 * other combinations of Phy Status and Reason Codes.
6570 6908 */
6571 6909 if ((phystatus &
6572 6910 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6573 6911 (reason_code !=
6574 6912 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6575 6913 continue;
6576 6914 }
6577 6915 curr[0] = 0;
6578 6916 prev[0] = 0;
6579 6917 string[0] = 0;
6580 6918 switch (reason_code) {
6581 6919 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6582 6920 {
6583 6921 NDBG20(("mptsas%d phy %d physical_port %d "
6584 6922 "dev_handle %d added", mpt->m_instance, phy,
6585 6923 physport, dev_handle));
6586 6924 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6587 6925 &sas_topo_change_list->PHY[i].LinkRate);
6588 6926 state = (link_rate &
6589 6927 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6590 6928 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6591 6929 switch (state) {
6592 6930 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6593 6931 (void) sprintf(curr, "is disabled");
6594 6932 break;
6595 6933 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6596 6934 (void) sprintf(curr, "is offline, "
6597 6935 "failed speed negotiation");
6598 6936 break;
6599 6937 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6600 6938 (void) sprintf(curr, "SATA OOB "
6601 6939 "complete");
6602 6940 break;
6603 6941 case SMP_RESET_IN_PROGRESS:
6604 6942 (void) sprintf(curr, "SMP reset in "
6605 6943 "progress");
6606 6944 break;
6607 6945 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6608 6946 (void) sprintf(curr, "is online at "
6609 6947 "1.5 Gbps");
6610 6948 break;
6611 6949 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6612 6950 (void) sprintf(curr, "is online at 3.0 "
6613 6951 "Gbps");
6614 6952 break;
6615 6953 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6616 6954 (void) sprintf(curr, "is online at 6.0 "
6617 6955 "Gbps");
6618 6956 break;
6619 6957 default:
6620 6958 (void) sprintf(curr, "state is "
6621 6959 "unknown");
6622 6960 break;
6623 6961 }
6624 6962 /*
6625 6963 * New target device added into the system.
6626 6964 * Set association flag according to if an
6627 6965 * expander is used or not.
6628 6966 */
6629 6967 exp_flag =
6630 6968 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6631 6969 if (flags ==
6632 6970 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6633 6971 flags = exp_flag;
6634 6972 }
6635 6973 topo_node = kmem_zalloc(
6636 6974 sizeof (mptsas_topo_change_list_t),
6637 6975 KM_SLEEP);
6638 6976 topo_node->mpt = mpt;
6639 6977 topo_node->event =
6640 6978 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6641 6979 if (expd_handle == 0) {
6642 6980 /*
6643 6981 * Per MPI 2, if expander dev handle
6644 6982 * is 0, it's a directly attached
6645 6983 * device. So driver use PHY to decide
6646 6984 * which iport is associated
6647 6985 */
6648 6986 physport = phy;
6649 6987 mpt->m_port_chng = 1;
6650 6988 }
6651 6989 topo_node->un.physport = physport;
6652 6990 topo_node->devhdl = dev_handle;
6653 6991 topo_node->flags = flags;
6654 6992 topo_node->object = NULL;
6655 6993 if (topo_head == NULL) {
6656 6994 topo_head = topo_tail = topo_node;
6657 6995 } else {
6658 6996 topo_tail->next = topo_node;
6659 6997 topo_tail = topo_node;
6660 6998 }
6661 6999 break;
6662 7000 }
6663 7001 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6664 7002 {
6665 7003 NDBG20(("mptsas%d phy %d physical_port %d "
6666 7004 "dev_handle %d removed", mpt->m_instance,
6667 7005 phy, physport, dev_handle));
6668 7006 /*
6669 7007 * Set association flag according to if an
6670 7008 * expander is used or not.
6671 7009 */
6672 7010 exp_flag =
6673 7011 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6674 7012 if (flags ==
6675 7013 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6676 7014 flags = exp_flag;
6677 7015 }
6678 7016 /*
6679 7017 * Target device is removed from the system
6680 7018 * Before the device is really offline from
6681 7019 * from system.
6682 7020 */
6683 7021 ptgt = refhash_linear_search(mpt->m_targets,
6684 7022 mptsas_target_eval_devhdl, &dev_handle);
6685 7023 /*
6686 7024 * If ptgt is NULL here, it means that the
6687 7025 * DevHandle is not in the hash table. This is
6688 7026 * reasonable sometimes. For example, if a
6689 7027 * disk was pulled, then added, then pulled
6690 7028 * again, the disk will not have been put into
6691 7029 * the hash table because the add event will
6692 7030 * have an invalid phymask. BUT, this does not
6693 7031 * mean that the DevHandle is invalid. The
6694 7032 * controller will still have a valid DevHandle
6695 7033 * that must be removed. To do this, use the
6696 7034 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6697 7035 */
6698 7036 if (ptgt == NULL) {
6699 7037 topo_node = kmem_zalloc(
6700 7038 sizeof (mptsas_topo_change_list_t),
6701 7039 KM_SLEEP);
6702 7040 topo_node->mpt = mpt;
6703 7041 topo_node->un.phymask = 0;
6704 7042 topo_node->event =
6705 7043 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6706 7044 topo_node->devhdl = dev_handle;
6707 7045 topo_node->flags = flags;
6708 7046 topo_node->object = NULL;
6709 7047 if (topo_head == NULL) {
6710 7048 topo_head = topo_tail =
6711 7049 topo_node;
6712 7050 } else {
6713 7051 topo_tail->next = topo_node;
6714 7052 topo_tail = topo_node;
6715 7053 }
6716 7054 break;
6717 7055 }
6718 7056
6719 7057 /*
6720 7058 * Update DR flag immediately avoid I/O failure
6721 7059 * before failover finish. Pay attention to the
6722 7060 * mutex protect, we need grab m_tx_waitq_mutex
6723 7061 * during set m_dr_flag because we won't add
6724 7062 * the following command into waitq, instead,
6725 7063 * we need return TRAN_BUSY in the tran_start
6726 7064 * context.
6727 7065 */
6728 7066 mutex_enter(&mpt->m_tx_waitq_mutex);
6729 7067 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6730 7068 mutex_exit(&mpt->m_tx_waitq_mutex);
6731 7069
6732 7070 topo_node = kmem_zalloc(
6733 7071 sizeof (mptsas_topo_change_list_t),
6734 7072 KM_SLEEP);
6735 7073 topo_node->mpt = mpt;
6736 7074 topo_node->un.phymask =
6737 7075 ptgt->m_addr.mta_phymask;
6738 7076 topo_node->event =
6739 7077 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6740 7078 topo_node->devhdl = dev_handle;
6741 7079 topo_node->flags = flags;
6742 7080 topo_node->object = NULL;
6743 7081 if (topo_head == NULL) {
6744 7082 topo_head = topo_tail = topo_node;
6745 7083 } else {
6746 7084 topo_tail->next = topo_node;
6747 7085 topo_tail = topo_node;
6748 7086 }
6749 7087 break;
6750 7088 }
6751 7089 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6752 7090 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6753 7091 &sas_topo_change_list->PHY[i].LinkRate);
6754 7092 state = (link_rate &
6755 7093 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6756 7094 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6757 7095 pSmhba = &mpt->m_phy_info[i].smhba_info;
6758 7096 pSmhba->negotiated_link_rate = state;
6759 7097 switch (state) {
6760 7098 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6761 7099 (void) sprintf(curr, "is disabled");
6762 7100 mptsas_smhba_log_sysevent(mpt,
6763 7101 ESC_SAS_PHY_EVENT,
6764 7102 SAS_PHY_REMOVE,
6765 7103 &mpt->m_phy_info[i].smhba_info);
6766 7104 mpt->m_phy_info[i].smhba_info.
6767 7105 negotiated_link_rate
6768 7106 = 0x1;
6769 7107 break;
6770 7108 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6771 7109 (void) sprintf(curr, "is offline, "
6772 7110 "failed speed negotiation");
6773 7111 mptsas_smhba_log_sysevent(mpt,
6774 7112 ESC_SAS_PHY_EVENT,
6775 7113 SAS_PHY_OFFLINE,
6776 7114 &mpt->m_phy_info[i].smhba_info);
6777 7115 break;
6778 7116 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6779 7117 (void) sprintf(curr, "SATA OOB "
6780 7118 "complete");
6781 7119 break;
6782 7120 case SMP_RESET_IN_PROGRESS:
6783 7121 (void) sprintf(curr, "SMP reset in "
6784 7122 "progress");
6785 7123 break;
6786 7124 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6787 7125 (void) sprintf(curr, "is online at "
6788 7126 "1.5 Gbps");
6789 7127 if ((expd_handle == 0) &&
6790 7128 (enc_handle == 1)) {
6791 7129 mpt->m_port_chng = 1;
6792 7130 }
6793 7131 mptsas_smhba_log_sysevent(mpt,
6794 7132 ESC_SAS_PHY_EVENT,
6795 7133 SAS_PHY_ONLINE,
6796 7134 &mpt->m_phy_info[i].smhba_info);
6797 7135 break;
6798 7136 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6799 7137 (void) sprintf(curr, "is online at 3.0 "
6800 7138 "Gbps");
6801 7139 if ((expd_handle == 0) &&
6802 7140 (enc_handle == 1)) {
6803 7141 mpt->m_port_chng = 1;
6804 7142 }
6805 7143 mptsas_smhba_log_sysevent(mpt,
6806 7144 ESC_SAS_PHY_EVENT,
6807 7145 SAS_PHY_ONLINE,
6808 7146 &mpt->m_phy_info[i].smhba_info);
6809 7147 break;
6810 7148 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6811 7149 (void) sprintf(curr, "is online at "
6812 7150 "6.0 Gbps");
6813 7151 if ((expd_handle == 0) &&
6814 7152 (enc_handle == 1)) {
6815 7153 mpt->m_port_chng = 1;
6816 7154 }
6817 7155 mptsas_smhba_log_sysevent(mpt,
6818 7156 ESC_SAS_PHY_EVENT,
6819 7157 SAS_PHY_ONLINE,
6820 7158 &mpt->m_phy_info[i].smhba_info);
6821 7159 break;
6822 7160 default:
6823 7161 (void) sprintf(curr, "state is "
6824 7162 "unknown");
6825 7163 break;
6826 7164 }
6827 7165
6828 7166 state = (link_rate &
6829 7167 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6830 7168 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6831 7169 switch (state) {
6832 7170 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6833 7171 (void) sprintf(prev, ", was disabled");
6834 7172 break;
6835 7173 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6836 7174 (void) sprintf(prev, ", was offline, "
6837 7175 "failed speed negotiation");
6838 7176 break;
6839 7177 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6840 7178 (void) sprintf(prev, ", was SATA OOB "
6841 7179 "complete");
6842 7180 break;
6843 7181 case SMP_RESET_IN_PROGRESS:
6844 7182 (void) sprintf(prev, ", was SMP reset "
6845 7183 "in progress");
6846 7184 break;
6847 7185 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6848 7186 (void) sprintf(prev, ", was online at "
6849 7187 "1.5 Gbps");
6850 7188 break;
6851 7189 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6852 7190 (void) sprintf(prev, ", was online at "
6853 7191 "3.0 Gbps");
6854 7192 break;
6855 7193 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6856 7194 (void) sprintf(prev, ", was online at "
6857 7195 "6.0 Gbps");
6858 7196 break;
6859 7197 default:
6860 7198 break;
6861 7199 }
6862 7200 (void) sprintf(&string[strlen(string)], "link "
6863 7201 "changed, ");
6864 7202 break;
6865 7203 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6866 7204 continue;
6867 7205 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6868 7206 (void) sprintf(&string[strlen(string)],
6869 7207 "target not responding, delaying "
6870 7208 "removal");
6871 7209 break;
6872 7210 }
6873 7211 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6874 7212 mpt->m_instance, phy, dev_handle, string, curr,
6875 7213 prev));
6876 7214 }
6877 7215 if (topo_head != NULL) {
6878 7216 /*
6879 7217 * Launch DR taskq to handle topology change
6880 7218 */
6881 7219 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6882 7220 mptsas_handle_dr, (void *)topo_head,
6883 7221 DDI_NOSLEEP)) != DDI_SUCCESS) {
6884 7222 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6885 7223 "for handle SAS DR event failed. \n");
6886 7224 }
6887 7225 }
6888 7226 break;
6889 7227 }
6890 7228 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6891 7229 {
6892 7230 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6893 7231 mptsas_topo_change_list_t *topo_head = NULL;
6894 7232 mptsas_topo_change_list_t *topo_tail = NULL;
6895 7233 mptsas_topo_change_list_t *topo_node = NULL;
6896 7234 mptsas_target_t *ptgt;
6897 7235 uint8_t num_entries, i, reason;
6898 7236 uint16_t volhandle, diskhandle;
6899 7237
6900 7238 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6901 7239 eventreply->EventData;
6902 7240 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6903 7241 &irChangeList->NumElements);
6904 7242
6905 7243 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6906 7244 mpt->m_instance));
6907 7245
6908 7246 for (i = 0; i < num_entries; i++) {
6909 7247 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6910 7248 &irChangeList->ConfigElement[i].ReasonCode);
6911 7249 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6912 7250 &irChangeList->ConfigElement[i].VolDevHandle);
6913 7251 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6914 7252 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6915 7253
6916 7254 switch (reason) {
6917 7255 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6918 7256 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6919 7257 {
6920 7258 NDBG20(("mptsas %d volume added\n",
6921 7259 mpt->m_instance));
6922 7260
6923 7261 topo_node = kmem_zalloc(
6924 7262 sizeof (mptsas_topo_change_list_t),
6925 7263 KM_SLEEP);
6926 7264
6927 7265 topo_node->mpt = mpt;
6928 7266 topo_node->event =
6929 7267 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6930 7268 topo_node->un.physport = 0xff;
6931 7269 topo_node->devhdl = volhandle;
6932 7270 topo_node->flags =
6933 7271 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6934 7272 topo_node->object = NULL;
6935 7273 if (topo_head == NULL) {
6936 7274 topo_head = topo_tail = topo_node;
6937 7275 } else {
6938 7276 topo_tail->next = topo_node;
6939 7277 topo_tail = topo_node;
6940 7278 }
6941 7279 break;
6942 7280 }
6943 7281 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6944 7282 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6945 7283 {
6946 7284 NDBG20(("mptsas %d volume deleted\n",
6947 7285 mpt->m_instance));
6948 7286 ptgt = refhash_linear_search(mpt->m_targets,
6949 7287 mptsas_target_eval_devhdl, &volhandle);
6950 7288 if (ptgt == NULL)
6951 7289 break;
6952 7290
6953 7291 /*
6954 7292 * Clear any flags related to volume
6955 7293 */
6956 7294 (void) mptsas_delete_volume(mpt, volhandle);
6957 7295
6958 7296 /*
6959 7297 * Update DR flag immediately avoid I/O failure
6960 7298 */
6961 7299 mutex_enter(&mpt->m_tx_waitq_mutex);
6962 7300 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6963 7301 mutex_exit(&mpt->m_tx_waitq_mutex);
6964 7302
6965 7303 topo_node = kmem_zalloc(
6966 7304 sizeof (mptsas_topo_change_list_t),
6967 7305 KM_SLEEP);
6968 7306 topo_node->mpt = mpt;
6969 7307 topo_node->un.phymask =
6970 7308 ptgt->m_addr.mta_phymask;
6971 7309 topo_node->event =
6972 7310 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6973 7311 topo_node->devhdl = volhandle;
6974 7312 topo_node->flags =
6975 7313 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6976 7314 topo_node->object = (void *)ptgt;
6977 7315 if (topo_head == NULL) {
6978 7316 topo_head = topo_tail = topo_node;
6979 7317 } else {
6980 7318 topo_tail->next = topo_node;
6981 7319 topo_tail = topo_node;
6982 7320 }
6983 7321 break;
6984 7322 }
6985 7323 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6986 7324 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6987 7325 {
6988 7326 ptgt = refhash_linear_search(mpt->m_targets,
6989 7327 mptsas_target_eval_devhdl, &diskhandle);
6990 7328 if (ptgt == NULL)
6991 7329 break;
6992 7330
6993 7331 /*
6994 7332 * Update DR flag immediately avoid I/O failure
6995 7333 */
6996 7334 mutex_enter(&mpt->m_tx_waitq_mutex);
6997 7335 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6998 7336 mutex_exit(&mpt->m_tx_waitq_mutex);
6999 7337
7000 7338 topo_node = kmem_zalloc(
7001 7339 sizeof (mptsas_topo_change_list_t),
7002 7340 KM_SLEEP);
7003 7341 topo_node->mpt = mpt;
7004 7342 topo_node->un.phymask =
7005 7343 ptgt->m_addr.mta_phymask;
7006 7344 topo_node->event =
7007 7345 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7008 7346 topo_node->devhdl = diskhandle;
7009 7347 topo_node->flags =
7010 7348 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7011 7349 topo_node->object = (void *)ptgt;
7012 7350 if (topo_head == NULL) {
7013 7351 topo_head = topo_tail = topo_node;
7014 7352 } else {
7015 7353 topo_tail->next = topo_node;
7016 7354 topo_tail = topo_node;
7017 7355 }
7018 7356 break;
7019 7357 }
7020 7358 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7021 7359 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7022 7360 {
7023 7361 /*
7024 7362 * The physical drive is released by a IR
7025 7363 * volume. But we cannot get the the physport
7026 7364 * or phynum from the event data, so we only
7027 7365 * can get the physport/phynum after SAS
7028 7366 * Device Page0 request for the devhdl.
7029 7367 */
7030 7368 topo_node = kmem_zalloc(
7031 7369 sizeof (mptsas_topo_change_list_t),
7032 7370 KM_SLEEP);
7033 7371 topo_node->mpt = mpt;
7034 7372 topo_node->un.phymask = 0;
7035 7373 topo_node->event =
7036 7374 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7037 7375 topo_node->devhdl = diskhandle;
7038 7376 topo_node->flags =
7039 7377 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7040 7378 topo_node->object = NULL;
7041 7379 mpt->m_port_chng = 1;
7042 7380 if (topo_head == NULL) {
7043 7381 topo_head = topo_tail = topo_node;
7044 7382 } else {
7045 7383 topo_tail->next = topo_node;
7046 7384 topo_tail = topo_node;
7047 7385 }
7048 7386 break;
7049 7387 }
7050 7388 default:
7051 7389 break;
7052 7390 }
7053 7391 }
7054 7392
7055 7393 if (topo_head != NULL) {
7056 7394 /*
7057 7395 * Launch DR taskq to handle topology change
7058 7396 */
7059 7397 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7060 7398 mptsas_handle_dr, (void *)topo_head,
7061 7399 DDI_NOSLEEP)) != DDI_SUCCESS) {
7062 7400 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7063 7401 "for handle SAS DR event failed. \n");
7064 7402 }
7065 7403 }
7066 7404 break;
7067 7405 }
7068 7406 default:
7069 7407 return (DDI_FAILURE);
7070 7408 }
7071 7409
7072 7410 return (DDI_SUCCESS);
7073 7411 }
7074 7412
7075 7413 /*
7076 7414 * handle events from ioc
7077 7415 */
7078 7416 static void
7079 7417 mptsas_handle_event(void *args)
7080 7418 {
7081 7419 m_replyh_arg_t *replyh_arg;
7082 7420 pMpi2EventNotificationReply_t eventreply;
7083 7421 uint32_t event, iocloginfo, rfm;
7084 7422 uint32_t status;
7085 7423 uint8_t port;
7086 7424 mptsas_t *mpt;
7087 7425 uint_t iocstatus;
7088 7426
7089 7427 replyh_arg = (m_replyh_arg_t *)args;
7090 7428 rfm = replyh_arg->rfm;
7091 7429 mpt = replyh_arg->mpt;
7092 7430
7093 7431 mutex_enter(&mpt->m_mutex);
7094 7432 /*
7095 7433 * If HBA is being reset, drop incoming event.
7096 7434 */
7097 7435 if (mpt->m_in_reset) {
7098 7436 NDBG20(("dropping event received prior to reset"));
7099 7437 mutex_exit(&mpt->m_mutex);
7100 7438 return;
7101 7439 }
7102 7440
7103 7441 eventreply = (pMpi2EventNotificationReply_t)
7104 7442 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7105 7443 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7106 7444
7107 7445 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7108 7446 &eventreply->IOCStatus)) {
7109 7447 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7110 7448 mptsas_log(mpt, CE_WARN,
7111 7449 "!mptsas_handle_event: IOCStatus=0x%x, "
7112 7450 "IOCLogInfo=0x%x", iocstatus,
7113 7451 ddi_get32(mpt->m_acc_reply_frame_hdl,
7114 7452 &eventreply->IOCLogInfo));
7115 7453 } else {
7116 7454 mptsas_log(mpt, CE_WARN,
7117 7455 "mptsas_handle_event: IOCStatus=0x%x, "
7118 7456 "IOCLogInfo=0x%x", iocstatus,
7119 7457 ddi_get32(mpt->m_acc_reply_frame_hdl,
7120 7458 &eventreply->IOCLogInfo));
7121 7459 }
7122 7460 }
7123 7461
7124 7462 /*
7125 7463 * figure out what kind of event we got and handle accordingly
7126 7464 */
7127 7465 switch (event) {
7128 7466 case MPI2_EVENT_LOG_ENTRY_ADDED:
7129 7467 break;
7130 7468 case MPI2_EVENT_LOG_DATA:
7131 7469 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7132 7470 &eventreply->IOCLogInfo);
7133 7471 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7134 7472 iocloginfo));
7135 7473 break;
7136 7474 case MPI2_EVENT_STATE_CHANGE:
7137 7475 NDBG20(("mptsas%d state change.", mpt->m_instance));
7138 7476 break;
7139 7477 case MPI2_EVENT_HARD_RESET_RECEIVED:
7140 7478 NDBG20(("mptsas%d event change.", mpt->m_instance));
7141 7479 break;
7142 7480 case MPI2_EVENT_SAS_DISCOVERY:
7143 7481 {
7144 7482 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7145 7483 char string[80];
7146 7484 uint8_t rc;
7147 7485
7148 7486 sasdiscovery =
7149 7487 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7150 7488
7151 7489 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7152 7490 &sasdiscovery->ReasonCode);
7153 7491 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7154 7492 &sasdiscovery->PhysicalPort);
7155 7493 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7156 7494 &sasdiscovery->DiscoveryStatus);
7157 7495
7158 7496 string[0] = 0;
7159 7497 switch (rc) {
7160 7498 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7161 7499 (void) sprintf(string, "STARTING");
7162 7500 break;
7163 7501 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7164 7502 (void) sprintf(string, "COMPLETED");
7165 7503 break;
7166 7504 default:
7167 7505 (void) sprintf(string, "UNKNOWN");
7168 7506 break;
7169 7507 }
7170 7508
7171 7509 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7172 7510 port, status));
7173 7511
7174 7512 break;
7175 7513 }
7176 7514 case MPI2_EVENT_EVENT_CHANGE:
7177 7515 NDBG20(("mptsas%d event change.", mpt->m_instance));
7178 7516 break;
7179 7517 case MPI2_EVENT_TASK_SET_FULL:
7180 7518 {
7181 7519 pMpi2EventDataTaskSetFull_t taskfull;
7182 7520
7183 7521 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7184 7522
7185 7523 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7186 7524 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7187 7525 &taskfull->CurrentDepth)));
7188 7526 break;
7189 7527 }
7190 7528 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7191 7529 {
7192 7530 /*
7193 7531 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7194 7532 * in mptsas_handle_event_sync() of interrupt context
7195 7533 */
7196 7534 break;
7197 7535 }
7198 7536 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7199 7537 {
7200 7538 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7201 7539 uint8_t rc;
7202 7540 char string[80];
7203 7541
7204 7542 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7205 7543 eventreply->EventData;
7206 7544
7207 7545 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7208 7546 &encstatus->ReasonCode);
7209 7547 switch (rc) {
7210 7548 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7211 7549 (void) sprintf(string, "added");
7212 7550 break;
7213 7551 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7214 7552 (void) sprintf(string, ", not responding");
7215 7553 break;
7216 7554 default:
7217 7555 break;
7218 7556 }
7219 7557 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7220 7558 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7221 7559 &encstatus->EnclosureHandle), string));
7222 7560 break;
7223 7561 }
7224 7562
7225 7563 /*
7226 7564 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7227 7565 * mptsas_handle_event_sync,in here just send ack message.
7228 7566 */
7229 7567 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7230 7568 {
7231 7569 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7232 7570 uint8_t rc;
7233 7571 uint16_t devhdl;
7234 7572 uint64_t wwn = 0;
7235 7573 uint32_t wwn_lo, wwn_hi;
7236 7574
7237 7575 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7238 7576 eventreply->EventData;
7239 7577 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7240 7578 &statuschange->ReasonCode);
7241 7579 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7242 7580 (uint32_t *)(void *)&statuschange->SASAddress);
7243 7581 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7244 7582 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7245 7583 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7246 7584 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7247 7585 &statuschange->DevHandle);
7248 7586
7249 7587 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7250 7588 wwn));
7251 7589
7252 7590 switch (rc) {
7253 7591 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7254 7592 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7255 7593 ddi_get8(mpt->m_acc_reply_frame_hdl,
7256 7594 &statuschange->ASC),
7257 7595 ddi_get8(mpt->m_acc_reply_frame_hdl,
7258 7596 &statuschange->ASCQ)));
7259 7597 break;
7260 7598
7261 7599 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7262 7600 NDBG20(("Device not supported"));
7263 7601 break;
7264 7602
7265 7603 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7266 7604 NDBG20(("IOC internally generated the Target Reset "
7267 7605 "for devhdl:%x", devhdl));
7268 7606 break;
7269 7607
7270 7608 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7271 7609 NDBG20(("IOC's internally generated Target Reset "
7272 7610 "completed for devhdl:%x", devhdl));
7273 7611 break;
7274 7612
7275 7613 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7276 7614 NDBG20(("IOC internally generated Abort Task"));
7277 7615 break;
7278 7616
7279 7617 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7280 7618 NDBG20(("IOC's internally generated Abort Task "
7281 7619 "completed"));
7282 7620 break;
7283 7621
7284 7622 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7285 7623 NDBG20(("IOC internally generated Abort Task Set"));
7286 7624 break;
7287 7625
7288 7626 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7289 7627 NDBG20(("IOC internally generated Clear Task Set"));
7290 7628 break;
7291 7629
7292 7630 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7293 7631 NDBG20(("IOC internally generated Query Task"));
7294 7632 break;
7295 7633
7296 7634 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7297 7635 NDBG20(("Device sent an Asynchronous Notification"));
7298 7636 break;
7299 7637
7300 7638 default:
7301 7639 break;
7302 7640 }
7303 7641 break;
7304 7642 }
7305 7643 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7306 7644 {
7307 7645 /*
7308 7646 * IR TOPOLOGY CHANGE LIST Event has already been handled
7309 7647 * in mpt_handle_event_sync() of interrupt context
7310 7648 */
7311 7649 break;
7312 7650 }
7313 7651 case MPI2_EVENT_IR_OPERATION_STATUS:
7314 7652 {
7315 7653 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7316 7654 char reason_str[80];
7317 7655 uint8_t rc, percent;
7318 7656 uint16_t handle;
7319 7657
7320 7658 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7321 7659 eventreply->EventData;
7322 7660 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7323 7661 &irOpStatus->RAIDOperation);
7324 7662 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7325 7663 &irOpStatus->PercentComplete);
7326 7664 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7327 7665 &irOpStatus->VolDevHandle);
7328 7666
7329 7667 switch (rc) {
7330 7668 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7331 7669 (void) sprintf(reason_str, "resync");
7332 7670 break;
7333 7671 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7334 7672 (void) sprintf(reason_str, "online capacity "
7335 7673 "expansion");
7336 7674 break;
7337 7675 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7338 7676 (void) sprintf(reason_str, "consistency check");
7339 7677 break;
7340 7678 default:
7341 7679 (void) sprintf(reason_str, "unknown reason %x",
7342 7680 rc);
7343 7681 }
7344 7682
7345 7683 NDBG20(("mptsas%d raid operational status: (%s)"
7346 7684 "\thandle(0x%04x), percent complete(%d)\n",
7347 7685 mpt->m_instance, reason_str, handle, percent));
7348 7686 break;
7349 7687 }
7350 7688 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7351 7689 {
7352 7690 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7353 7691 uint8_t phy_num;
7354 7692 uint8_t primitive;
7355 7693
7356 7694 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7357 7695 eventreply->EventData;
7358 7696
7359 7697 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7360 7698 &sas_broadcast->PhyNum);
7361 7699 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7362 7700 &sas_broadcast->Primitive);
7363 7701
7364 7702 switch (primitive) {
7365 7703 case MPI2_EVENT_PRIMITIVE_CHANGE:
7366 7704 mptsas_smhba_log_sysevent(mpt,
7367 7705 ESC_SAS_HBA_PORT_BROADCAST,
7368 7706 SAS_PORT_BROADCAST_CHANGE,
7369 7707 &mpt->m_phy_info[phy_num].smhba_info);
7370 7708 break;
7371 7709 case MPI2_EVENT_PRIMITIVE_SES:
7372 7710 mptsas_smhba_log_sysevent(mpt,
7373 7711 ESC_SAS_HBA_PORT_BROADCAST,
7374 7712 SAS_PORT_BROADCAST_SES,
7375 7713 &mpt->m_phy_info[phy_num].smhba_info);
7376 7714 break;
7377 7715 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7378 7716 mptsas_smhba_log_sysevent(mpt,
7379 7717 ESC_SAS_HBA_PORT_BROADCAST,
7380 7718 SAS_PORT_BROADCAST_D01_4,
7381 7719 &mpt->m_phy_info[phy_num].smhba_info);
7382 7720 break;
7383 7721 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7384 7722 mptsas_smhba_log_sysevent(mpt,
7385 7723 ESC_SAS_HBA_PORT_BROADCAST,
7386 7724 SAS_PORT_BROADCAST_D04_7,
7387 7725 &mpt->m_phy_info[phy_num].smhba_info);
7388 7726 break;
7389 7727 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7390 7728 mptsas_smhba_log_sysevent(mpt,
7391 7729 ESC_SAS_HBA_PORT_BROADCAST,
7392 7730 SAS_PORT_BROADCAST_D16_7,
7393 7731 &mpt->m_phy_info[phy_num].smhba_info);
7394 7732 break;
7395 7733 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7396 7734 mptsas_smhba_log_sysevent(mpt,
7397 7735 ESC_SAS_HBA_PORT_BROADCAST,
7398 7736 SAS_PORT_BROADCAST_D29_7,
7399 7737 &mpt->m_phy_info[phy_num].smhba_info);
7400 7738 break;
7401 7739 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7402 7740 mptsas_smhba_log_sysevent(mpt,
7403 7741 ESC_SAS_HBA_PORT_BROADCAST,
7404 7742 SAS_PORT_BROADCAST_D24_0,
7405 7743 &mpt->m_phy_info[phy_num].smhba_info);
7406 7744 break;
7407 7745 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7408 7746 mptsas_smhba_log_sysevent(mpt,
7409 7747 ESC_SAS_HBA_PORT_BROADCAST,
7410 7748 SAS_PORT_BROADCAST_D27_4,
7411 7749 &mpt->m_phy_info[phy_num].smhba_info);
7412 7750 break;
7413 7751 default:
7414 7752 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7415 7753 " %x received",
7416 7754 mpt->m_instance, primitive));
7417 7755 break;
7418 7756 }
7419 7757 NDBG20(("mptsas%d sas broadcast primitive: "
7420 7758 "\tprimitive(0x%04x), phy(%d) complete\n",
7421 7759 mpt->m_instance, primitive, phy_num));
7422 7760 break;
7423 7761 }
7424 7762 case MPI2_EVENT_IR_VOLUME:
7425 7763 {
7426 7764 Mpi2EventDataIrVolume_t *irVolume;
7427 7765 uint16_t devhandle;
7428 7766 uint32_t state;
7429 7767 int config, vol;
7430 7768 uint8_t found = FALSE;
7431 7769
7432 7770 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7433 7771 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7434 7772 &irVolume->NewValue);
7435 7773 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7436 7774 &irVolume->VolDevHandle);
7437 7775
7438 7776 NDBG20(("EVENT_IR_VOLUME event is received"));
7439 7777
7440 7778 /*
7441 7779 * Get latest RAID info and then find the DevHandle for this
7442 7780 * event in the configuration. If the DevHandle is not found
7443 7781 * just exit the event.
7444 7782 */
7445 7783 (void) mptsas_get_raid_info(mpt);
7446 7784 for (config = 0; (config < mpt->m_num_raid_configs) &&
7447 7785 (!found); config++) {
7448 7786 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7449 7787 if (mpt->m_raidconfig[config].m_raidvol[vol].
7450 7788 m_raidhandle == devhandle) {
7451 7789 found = TRUE;
7452 7790 break;
7453 7791 }
7454 7792 }
7455 7793 }
7456 7794 if (!found) {
7457 7795 break;
7458 7796 }
7459 7797
7460 7798 switch (irVolume->ReasonCode) {
7461 7799 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7462 7800 {
7463 7801 uint32_t i;
7464 7802 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7465 7803 state;
7466 7804
7467 7805 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7468 7806 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7469 7807 ", auto-config of hot-swap drives is %s"
7470 7808 ", write caching is %s"
7471 7809 ", hot-spare pool mask is %02x\n",
7472 7810 vol, state &
7473 7811 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7474 7812 ? "disabled" : "enabled",
7475 7813 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7476 7814 ? "controlled by member disks" :
7477 7815 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7478 7816 ? "disabled" :
7479 7817 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7480 7818 ? "enabled" :
7481 7819 "incorrectly set",
7482 7820 (state >> 16) & 0xff);
7483 7821 break;
7484 7822 }
7485 7823 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7486 7824 {
7487 7825 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7488 7826 (uint8_t)state;
7489 7827
7490 7828 mptsas_log(mpt, CE_NOTE,
7491 7829 "Volume %d is now %s\n", vol,
7492 7830 state == MPI2_RAID_VOL_STATE_OPTIMAL
7493 7831 ? "optimal" :
7494 7832 state == MPI2_RAID_VOL_STATE_DEGRADED
7495 7833 ? "degraded" :
7496 7834 state == MPI2_RAID_VOL_STATE_ONLINE
7497 7835 ? "online" :
7498 7836 state == MPI2_RAID_VOL_STATE_INITIALIZING
7499 7837 ? "initializing" :
7500 7838 state == MPI2_RAID_VOL_STATE_FAILED
7501 7839 ? "failed" :
7502 7840 state == MPI2_RAID_VOL_STATE_MISSING
7503 7841 ? "missing" :
7504 7842 "state unknown");
7505 7843 break;
7506 7844 }
7507 7845 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7508 7846 {
7509 7847 mpt->m_raidconfig[config].m_raidvol[vol].
7510 7848 m_statusflags = state;
7511 7849
7512 7850 mptsas_log(mpt, CE_NOTE,
7513 7851 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7514 7852 vol,
7515 7853 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7516 7854 ? ", enabled" : ", disabled",
7517 7855 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7518 7856 ? ", quiesced" : "",
7519 7857 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7520 7858 ? ", inactive" : ", active",
7521 7859 state &
7522 7860 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7523 7861 ? ", bad block table is full" : "",
7524 7862 state &
7525 7863 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7526 7864 ? ", resync in progress" : "",
7527 7865 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7528 7866 ? ", background initialization in progress" : "",
7529 7867 state &
7530 7868 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7531 7869 ? ", capacity expansion in progress" : "",
7532 7870 state &
7533 7871 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7534 7872 ? ", consistency check in progress" : "",
7535 7873 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7536 7874 ? ", data scrub in progress" : "");
7537 7875 break;
7538 7876 }
7539 7877 default:
7540 7878 break;
7541 7879 }
7542 7880 break;
7543 7881 }
7544 7882 case MPI2_EVENT_IR_PHYSICAL_DISK:
7545 7883 {
7546 7884 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7547 7885 uint16_t devhandle, enchandle, slot;
7548 7886 uint32_t status, state;
7549 7887 uint8_t physdisknum, reason;
7550 7888
7551 7889 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7552 7890 eventreply->EventData;
7553 7891 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7554 7892 &irPhysDisk->PhysDiskNum);
7555 7893 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7556 7894 &irPhysDisk->PhysDiskDevHandle);
7557 7895 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7558 7896 &irPhysDisk->EnclosureHandle);
7559 7897 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7560 7898 &irPhysDisk->Slot);
7561 7899 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7562 7900 &irPhysDisk->NewValue);
7563 7901 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7564 7902 &irPhysDisk->ReasonCode);
7565 7903
7566 7904 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7567 7905
7568 7906 switch (reason) {
7569 7907 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7570 7908 mptsas_log(mpt, CE_NOTE,
7571 7909 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7572 7910 "for enclosure with handle 0x%x is now in hot "
7573 7911 "spare pool %d",
7574 7912 physdisknum, devhandle, slot, enchandle,
7575 7913 (state >> 16) & 0xff);
7576 7914 break;
7577 7915
7578 7916 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7579 7917 status = state;
7580 7918 mptsas_log(mpt, CE_NOTE,
7581 7919 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7582 7920 "for enclosure with handle 0x%x is now "
7583 7921 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7584 7922 enchandle,
7585 7923 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7586 7924 ? ", inactive" : ", active",
7587 7925 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7588 7926 ? ", out of sync" : "",
7589 7927 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7590 7928 ? ", quiesced" : "",
7591 7929 status &
7592 7930 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7593 7931 ? ", write cache enabled" : "",
7594 7932 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7595 7933 ? ", capacity expansion target" : "");
7596 7934 break;
7597 7935
7598 7936 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7599 7937 mptsas_log(mpt, CE_NOTE,
7600 7938 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7601 7939 "for enclosure with handle 0x%x is now %s\n",
7602 7940 physdisknum, devhandle, slot, enchandle,
7603 7941 state == MPI2_RAID_PD_STATE_OPTIMAL
7604 7942 ? "optimal" :
7605 7943 state == MPI2_RAID_PD_STATE_REBUILDING
7606 7944 ? "rebuilding" :
7607 7945 state == MPI2_RAID_PD_STATE_DEGRADED
7608 7946 ? "degraded" :
7609 7947 state == MPI2_RAID_PD_STATE_HOT_SPARE
7610 7948 ? "a hot spare" :
7611 7949 state == MPI2_RAID_PD_STATE_ONLINE
7612 7950 ? "online" :
7613 7951 state == MPI2_RAID_PD_STATE_OFFLINE
7614 7952 ? "offline" :
7615 7953 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7616 7954 ? "not compatible" :
7617 7955 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7618 7956 ? "not configured" :
7619 7957 "state unknown");
7620 7958 break;
7621 7959 }
7622 7960 break;
7623 7961 }
7624 7962 default:
7625 7963 NDBG20(("mptsas%d: unknown event %x received",
7626 7964 mpt->m_instance, event));
7627 7965 break;
7628 7966 }
7629 7967
7630 7968 /*
7631 7969 * Return the reply frame to the free queue.
7632 7970 */
7633 7971 ddi_put32(mpt->m_acc_free_queue_hdl,
7634 7972 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7635 7973 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7636 7974 DDI_DMA_SYNC_FORDEV);
7637 7975 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7638 7976 mpt->m_free_index = 0;
7639 7977 }
7640 7978 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7641 7979 mpt->m_free_index);
7642 7980 mutex_exit(&mpt->m_mutex);
7643 7981 }
7644 7982
7645 7983 /*
7646 7984 * invoked from timeout() to restart qfull cmds with throttle == 0
7647 7985 */
7648 7986 static void
7649 7987 mptsas_restart_cmd(void *arg)
7650 7988 {
7651 7989 mptsas_t *mpt = arg;
7652 7990 mptsas_target_t *ptgt = NULL;
7653 7991
7654 7992 mutex_enter(&mpt->m_mutex);
7655 7993
7656 7994 mpt->m_restart_cmd_timeid = 0;
7657 7995
7658 7996 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
7659 7997 ptgt = refhash_next(mpt->m_targets, ptgt)) {
7660 7998 if (ptgt->m_reset_delay == 0) {
7661 7999 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7662 8000 mptsas_set_throttle(mpt, ptgt,
7663 8001 MAX_THROTTLE);
7664 8002 }
7665 8003 }
7666 8004 }
7667 8005 mptsas_restart_hba(mpt);
7668 8006 mutex_exit(&mpt->m_mutex);
7669 8007 }
7670 8008
7671 8009 void
7672 8010 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7673 8011 {
7674 8012 int slot;
7675 8013 mptsas_slots_t *slots = mpt->m_active;
7676 8014 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7677 8015
7678 8016 ASSERT(cmd != NULL);
7679 8017 ASSERT(cmd->cmd_queued == FALSE);
7680 8018
7681 8019 /*
7682 8020 * Task Management cmds are removed in their own routines. Also,
7683 8021 * we don't want to modify timeout based on TM cmds.
7684 8022 */
7685 8023 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7686 8024 return;
7687 8025 }
7688 8026
7689 8027 slot = cmd->cmd_slot;
7690 8028
7691 8029 /*
7692 8030 * remove the cmd.
7693 8031 */
7694 8032 if (cmd == slots->m_slot[slot]) {
7695 8033 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7696 8034 slots->m_slot[slot] = NULL;
7697 8035 mpt->m_ncmds--;
7698 8036
7699 8037 /*
7700 8038 * only decrement per target ncmds if command
7701 8039 * has a target associated with it.
7702 8040 */
7703 8041 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7704 8042 ptgt->m_t_ncmds--;
7705 8043 /*
7706 8044 * reset throttle if we just ran an untagged command
7707 8045 * to a tagged target
7708 8046 */
7709 8047 if ((ptgt->m_t_ncmds == 0) &&
7710 8048 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7711 8049 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7712 8050 }
7713 8051
7714 8052 /*
7715 8053 * Remove this command from the active queue.
7716 8054 */
7717 8055 if (cmd->cmd_active_expiration != 0) {
7718 8056 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
7719 8057 cmd_active_link);
7720 8058 cmd->cmd_active_expiration = 0;
7721 8059 }
7722 8060 }
7723 8061 }
7724 8062
7725 8063 /*
7726 8064 * This is all we need to do for ioc commands.
7727 8065 */
7728 8066 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7729 8067 mptsas_return_to_pool(mpt, cmd);
7730 8068 return;
7731 8069 }
7732 8070
7733 8071 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7734 8072 }
7735 8073
7736 8074 /*
7737 8075 * accept all cmds on the tx_waitq if any and then
7738 8076 * start a fresh request from the top of the device queue.
7739 8077 *
7740 8078 * since there are always cmds queued on the tx_waitq, and rare cmds on
7741 8079 * the instance waitq, so this function should not be invoked in the ISR,
7742 8080 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7743 8081 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7744 8082 */
7745 8083 static void
7746 8084 mptsas_restart_hba(mptsas_t *mpt)
7747 8085 {
7748 8086 ASSERT(mutex_owned(&mpt->m_mutex));
7749 8087
7750 8088 mutex_enter(&mpt->m_tx_waitq_mutex);
7751 8089 if (mpt->m_tx_waitq) {
7752 8090 mptsas_accept_tx_waitq(mpt);
7753 8091 }
7754 8092 mutex_exit(&mpt->m_tx_waitq_mutex);
7755 8093 mptsas_restart_waitq(mpt);
7756 8094 }
7757 8095
7758 8096 /*
7759 8097 * start a fresh request from the top of the device queue
7760 8098 */
7761 8099 static void
7762 8100 mptsas_restart_waitq(mptsas_t *mpt)
7763 8101 {
7764 8102 mptsas_cmd_t *cmd, *next_cmd;
7765 8103 mptsas_target_t *ptgt = NULL;
7766 8104
7767 8105 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7768 8106
7769 8107 ASSERT(mutex_owned(&mpt->m_mutex));
7770 8108
7771 8109 /*
7772 8110 * If there is a reset delay, don't start any cmds. Otherwise, start
7773 8111 * as many cmds as possible.
7774 8112 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7775 8113 * commands is m_max_requests - 2.
7776 8114 */
7777 8115 cmd = mpt->m_waitq;
7778 8116
7779 8117 while (cmd != NULL) {
7780 8118 next_cmd = cmd->cmd_linkp;
7781 8119 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7782 8120 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7783 8121 /*
7784 8122 * passthru command get slot need
7785 8123 * set CFLAG_PREPARED.
7786 8124 */
7787 8125 cmd->cmd_flags |= CFLAG_PREPARED;
7788 8126 mptsas_waitq_delete(mpt, cmd);
7789 8127 mptsas_start_passthru(mpt, cmd);
7790 8128 }
7791 8129 cmd = next_cmd;
7792 8130 continue;
7793 8131 }
7794 8132 if (cmd->cmd_flags & CFLAG_CONFIG) {
7795 8133 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7796 8134 /*
7797 8135 * Send the config page request and delete it
7798 8136 * from the waitq.
7799 8137 */
7800 8138 cmd->cmd_flags |= CFLAG_PREPARED;
7801 8139 mptsas_waitq_delete(mpt, cmd);
7802 8140 mptsas_start_config_page_access(mpt, cmd);
7803 8141 }
7804 8142 cmd = next_cmd;
7805 8143 continue;
7806 8144 }
7807 8145 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7808 8146 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7809 8147 /*
7810 8148 * Send the FW Diag request and delete if from
7811 8149 * the waitq.
7812 8150 */
7813 8151 cmd->cmd_flags |= CFLAG_PREPARED;
7814 8152 mptsas_waitq_delete(mpt, cmd);
7815 8153 mptsas_start_diag(mpt, cmd);
7816 8154 }
7817 8155 cmd = next_cmd;
7818 8156 continue;
7819 8157 }
7820 8158
7821 8159 ptgt = cmd->cmd_tgt_addr;
7822 8160 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7823 8161 (ptgt->m_t_ncmds == 0)) {
7824 8162 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7825 8163 }
7826 8164 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7827 8165 (ptgt && (ptgt->m_reset_delay == 0)) &&
7828 8166 (ptgt && (ptgt->m_t_ncmds <
7829 8167 ptgt->m_t_throttle))) {
7830 8168 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7831 8169 mptsas_waitq_delete(mpt, cmd);
7832 8170 (void) mptsas_start_cmd(mpt, cmd);
7833 8171 }
7834 8172 }
7835 8173 cmd = next_cmd;
7836 8174 }
7837 8175 }
7838 8176 /*
7839 8177 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7840 8178 * Accept all those queued cmds before new cmd is accept so that the
7841 8179 * cmds are sent in order.
7842 8180 */
7843 8181 static void
7844 8182 mptsas_accept_tx_waitq(mptsas_t *mpt)
7845 8183 {
7846 8184 mptsas_cmd_t *cmd;
7847 8185
7848 8186 ASSERT(mutex_owned(&mpt->m_mutex));
7849 8187 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7850 8188
7851 8189 /*
7852 8190 * A Bus Reset could occur at any time and flush the tx_waitq,
7853 8191 * so we cannot count on the tx_waitq to contain even one cmd.
7854 8192 * And when the m_tx_waitq_mutex is released and run
7855 8193 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7856 8194 */
7857 8195 cmd = mpt->m_tx_waitq;
7858 8196 for (;;) {
7859 8197 if ((cmd = mpt->m_tx_waitq) == NULL) {
7860 8198 mpt->m_tx_draining = 0;
7861 8199 break;
7862 8200 }
7863 8201 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7864 8202 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7865 8203 }
7866 8204 cmd->cmd_linkp = NULL;
7867 8205 mutex_exit(&mpt->m_tx_waitq_mutex);
7868 8206 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7869 8207 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7870 8208 "to accept cmd on queue\n");
7871 8209 mutex_enter(&mpt->m_tx_waitq_mutex);
7872 8210 }
7873 8211 }
7874 8212
7875 8213
7876 8214 /*
7877 8215 * mpt tag type lookup
7878 8216 */
7879 8217 static char mptsas_tag_lookup[] =
7880 8218 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7881 8219
7882 8220 static int
7883 8221 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7884 8222 {
7885 8223 struct scsi_pkt *pkt = CMD2PKT(cmd);
7886 8224 uint32_t control = 0;
7887 8225 caddr_t mem;
7888 8226 pMpi2SCSIIORequest_t io_request;
7889 8227 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7890 8228 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7891 8229 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7892 8230 uint16_t SMID, io_flags = 0;
7893 8231 uint32_t request_desc_low, request_desc_high;
7894 8232 mptsas_cmd_t *c;
7895 8233
7896 8234 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7897 8235
7898 8236 /*
7899 8237 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7900 8238 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7901 8239 */
7902 8240 SMID = cmd->cmd_slot;
7903 8241
7904 8242 /*
7905 8243 * It is possible for back to back device reset to
7906 8244 * happen before the reset delay has expired. That's
7907 8245 * ok, just let the device reset go out on the bus.
7908 8246 */
7909 8247 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7910 8248 ASSERT(ptgt->m_reset_delay == 0);
7911 8249 }
7912 8250
7913 8251 /*
7914 8252 * if a non-tagged cmd is submitted to an active tagged target
7915 8253 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7916 8254 * to be untagged
7917 8255 */
7918 8256 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7919 8257 (ptgt->m_t_ncmds > 1) &&
7920 8258 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7921 8259 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7922 8260 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7923 8261 NDBG23(("target=%d, untagged cmd, start draining\n",
7924 8262 ptgt->m_devhdl));
7925 8263
7926 8264 if (ptgt->m_reset_delay == 0) {
7927 8265 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7928 8266 }
7929 8267
7930 8268 mptsas_remove_cmd(mpt, cmd);
7931 8269 cmd->cmd_pkt_flags |= FLAG_HEAD;
7932 8270 mptsas_waitq_add(mpt, cmd);
7933 8271 }
7934 8272 return (DDI_FAILURE);
7935 8273 }
7936 8274
7937 8275 /*
7938 8276 * Set correct tag bits.
7939 8277 */
7940 8278 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7941 8279 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7942 8280 FLAG_TAGMASK) >> 12)]) {
7943 8281 case MSG_SIMPLE_QTAG:
7944 8282 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7945 8283 break;
7946 8284 case MSG_HEAD_QTAG:
7947 8285 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7948 8286 break;
7949 8287 case MSG_ORDERED_QTAG:
7950 8288 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7951 8289 break;
7952 8290 default:
7953 8291 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7954 8292 break;
7955 8293 }
7956 8294 } else {
7957 8295 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7958 8296 ptgt->m_t_throttle = 1;
7959 8297 }
7960 8298 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7961 8299 }
7962 8300
7963 8301 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7964 8302 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7965 8303 }
7966 8304
7967 8305 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7968 8306 io_request = (pMpi2SCSIIORequest_t)mem;
7969 8307
7970 8308 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7971 8309 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7972 8310 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7973 8311 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7974 8312 MPI2_FUNCTION_SCSI_IO_REQUEST);
7975 8313
7976 8314 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7977 8315 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7978 8316
7979 8317 io_flags = cmd->cmd_cdblen;
7980 8318 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7981 8319 /*
7982 8320 * setup the Scatter/Gather DMA list for this request
7983 8321 */
7984 8322 if (cmd->cmd_cookiec > 0) {
7985 8323 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7986 8324 } else {
7987 8325 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7988 8326 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7989 8327 MPI2_SGE_FLAGS_END_OF_BUFFER |
7990 8328 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7991 8329 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7992 8330 }
7993 8331
7994 8332 /*
7995 8333 * save ARQ information
7996 8334 */
7997 8335 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7998 8336 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7999 8337 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8000 8338 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8001 8339 cmd->cmd_ext_arqcookie.dmac_address);
8002 8340 } else {
8003 8341 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8004 8342 cmd->cmd_arqcookie.dmac_address);
8005 8343 }
8006 8344
8007 8345 ddi_put32(acc_hdl, &io_request->Control, control);
8008 8346
8009 8347 NDBG31(("starting message=0x%p, with cmd=0x%p",
8010 8348 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8011 8349
8012 8350 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8013 8351
8014 8352 /*
8015 8353 * Build request descriptor and write it to the request desc post reg.
8016 8354 */
8017 8355 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8018 8356 request_desc_high = ptgt->m_devhdl << 16;
8019 8357 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8020 8358
8021 8359 /*
8022 8360 * Start timeout.
8023 8361 */
8024 8362 cmd->cmd_active_expiration =
8025 8363 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8026 8364 #ifdef MPTSAS_TEST
8027 8365 /*
8028 8366 * Force timeouts to happen immediately.
8029 8367 */
8030 8368 if (mptsas_test_timeouts)
8031 8369 cmd->cmd_active_expiration = gethrtime();
8032 8370 #endif
8033 8371 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8034 8372 if (c == NULL ||
8035 8373 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8036 8374 /*
8037 8375 * Common case is that this is the last pending expiration
8038 8376 * (or queue is empty). Insert at head of the queue.
8039 8377 */
8040 8378 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8041 8379 } else {
8042 8380 /*
8043 8381 * Queue is not empty and first element expires later than
8044 8382 * this command. Search for element expiring sooner.
8045 8383 */
8046 8384 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8047 8385 if (c->cmd_active_expiration <
8048 8386 cmd->cmd_active_expiration) {
8049 8387 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8050 8388 break;
8051 8389 }
8052 8390 }
8053 8391 if (c == NULL) {
8054 8392 /*
8055 8393 * No element found expiring sooner, append to
8056 8394 * non-empty queue.
8057 8395 */
8058 8396 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8059 8397 cmd_active_link);
8060 8398 }
8061 8399 }
8062 8400
8063 8401 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8064 8402 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8065 8403 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8066 8404 return (DDI_FAILURE);
8067 8405 }
8068 8406 return (DDI_SUCCESS);
8069 8407 }
8070 8408
8071 8409 /*
8072 8410 * Select a helper thread to handle current doneq
8073 8411 */
8074 8412 static void
8075 8413 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8076 8414 {
8077 8415 uint64_t t, i;
8078 8416 uint32_t min = 0xffffffff;
8079 8417 mptsas_doneq_thread_list_t *item;
8080 8418
8081 8419 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8082 8420 item = &mpt->m_doneq_thread_id[i];
8083 8421 /*
8084 8422 * If the completed command on help thread[i] less than
8085 8423 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8086 8424 * pick a thread which has least completed command.
8087 8425 */
8088 8426
8089 8427 mutex_enter(&item->mutex);
8090 8428 if (item->len < mpt->m_doneq_thread_threshold) {
8091 8429 t = i;
8092 8430 mutex_exit(&item->mutex);
8093 8431 break;
8094 8432 }
8095 8433 if (item->len < min) {
8096 8434 min = item->len;
8097 8435 t = i;
8098 8436 }
8099 8437 mutex_exit(&item->mutex);
8100 8438 }
8101 8439 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8102 8440 mptsas_doneq_mv(mpt, t);
8103 8441 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8104 8442 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8105 8443 }
8106 8444
8107 8445 /*
8108 8446 * move the current global doneq to the doneq of thead[t]
8109 8447 */
8110 8448 static void
8111 8449 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8112 8450 {
8113 8451 mptsas_cmd_t *cmd;
8114 8452 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8115 8453
8116 8454 ASSERT(mutex_owned(&item->mutex));
8117 8455 while ((cmd = mpt->m_doneq) != NULL) {
8118 8456 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8119 8457 mpt->m_donetail = &mpt->m_doneq;
8120 8458 }
8121 8459 cmd->cmd_linkp = NULL;
8122 8460 *item->donetail = cmd;
8123 8461 item->donetail = &cmd->cmd_linkp;
8124 8462 mpt->m_doneq_len--;
8125 8463 item->len++;
8126 8464 }
8127 8465 }
8128 8466
8129 8467 void
8130 8468 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8131 8469 {
8132 8470 struct scsi_pkt *pkt = CMD2PKT(cmd);
8133 8471
8134 8472 /* Check all acc and dma handles */
8135 8473 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8136 8474 DDI_SUCCESS) ||
8137 8475 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8138 8476 DDI_SUCCESS) ||
8139 8477 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8140 8478 DDI_SUCCESS) ||
8141 8479 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8142 8480 DDI_SUCCESS) ||
8143 8481 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8144 8482 DDI_SUCCESS) ||
8145 8483 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8146 8484 DDI_SUCCESS) ||
8147 8485 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8148 8486 DDI_SUCCESS)) {
8149 8487 ddi_fm_service_impact(mpt->m_dip,
8150 8488 DDI_SERVICE_UNAFFECTED);
8151 8489 ddi_fm_acc_err_clear(mpt->m_config_handle,
8152 8490 DDI_FME_VER0);
8153 8491 pkt->pkt_reason = CMD_TRAN_ERR;
8154 8492 pkt->pkt_statistics = 0;
8155 8493 }
8156 8494 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8157 8495 DDI_SUCCESS) ||
8158 8496 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8159 8497 DDI_SUCCESS) ||
8160 8498 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8161 8499 DDI_SUCCESS) ||
8162 8500 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8163 8501 DDI_SUCCESS) ||
8164 8502 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8165 8503 DDI_SUCCESS)) {
8166 8504 ddi_fm_service_impact(mpt->m_dip,
8167 8505 DDI_SERVICE_UNAFFECTED);
8168 8506 pkt->pkt_reason = CMD_TRAN_ERR;
8169 8507 pkt->pkt_statistics = 0;
8170 8508 }
8171 8509 if (cmd->cmd_dmahandle &&
8172 8510 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8173 8511 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8174 8512 pkt->pkt_reason = CMD_TRAN_ERR;
8175 8513 pkt->pkt_statistics = 0;
8176 8514 }
8177 8515 if ((cmd->cmd_extra_frames &&
8178 8516 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8179 8517 DDI_SUCCESS) ||
8180 8518 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8181 8519 DDI_SUCCESS)))) {
8182 8520 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8183 8521 pkt->pkt_reason = CMD_TRAN_ERR;
8184 8522 pkt->pkt_statistics = 0;
8185 8523 }
8186 8524 if (cmd->cmd_arqhandle &&
8187 8525 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8188 8526 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8189 8527 pkt->pkt_reason = CMD_TRAN_ERR;
8190 8528 pkt->pkt_statistics = 0;
8191 8529 }
8192 8530 if (cmd->cmd_ext_arqhandle &&
8193 8531 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8194 8532 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8195 8533 pkt->pkt_reason = CMD_TRAN_ERR;
8196 8534 pkt->pkt_statistics = 0;
8197 8535 }
8198 8536 }
8199 8537
8200 8538 /*
8201 8539 * These routines manipulate the queue of commands that
8202 8540 * are waiting for their completion routines to be called.
8203 8541 * The queue is usually in FIFO order but on an MP system
8204 8542 * it's possible for the completion routines to get out
8205 8543 * of order. If that's a problem you need to add a global
8206 8544 * mutex around the code that calls the completion routine
8207 8545 * in the interrupt handler.
8208 8546 */
8209 8547 static void
8210 8548 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8211 8549 {
8212 8550 struct scsi_pkt *pkt = CMD2PKT(cmd);
8213 8551
8214 8552 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8215 8553
8216 8554 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8217 8555 cmd->cmd_linkp = NULL;
8218 8556 cmd->cmd_flags |= CFLAG_FINISHED;
8219 8557 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8220 8558
8221 8559 mptsas_fma_check(mpt, cmd);
8222 8560
8223 8561 /*
8224 8562 * only add scsi pkts that have completion routines to
8225 8563 * the doneq. no intr cmds do not have callbacks.
8226 8564 */
8227 8565 if (pkt && (pkt->pkt_comp)) {
8228 8566 *mpt->m_donetail = cmd;
8229 8567 mpt->m_donetail = &cmd->cmd_linkp;
8230 8568 mpt->m_doneq_len++;
8231 8569 }
8232 8570 }
8233 8571
8234 8572 static mptsas_cmd_t *
8235 8573 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8236 8574 {
8237 8575 mptsas_cmd_t *cmd;
8238 8576 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8239 8577
8240 8578 /* pop one off the done queue */
8241 8579 if ((cmd = item->doneq) != NULL) {
8242 8580 /* if the queue is now empty fix the tail pointer */
8243 8581 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8244 8582 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8245 8583 item->donetail = &item->doneq;
8246 8584 }
8247 8585 cmd->cmd_linkp = NULL;
8248 8586 item->len--;
8249 8587 }
8250 8588 return (cmd);
8251 8589 }
8252 8590
8253 8591 static void
8254 8592 mptsas_doneq_empty(mptsas_t *mpt)
8255 8593 {
8256 8594 if (mpt->m_doneq && !mpt->m_in_callback) {
8257 8595 mptsas_cmd_t *cmd, *next;
8258 8596 struct scsi_pkt *pkt;
8259 8597
8260 8598 mpt->m_in_callback = 1;
8261 8599 cmd = mpt->m_doneq;
8262 8600 mpt->m_doneq = NULL;
8263 8601 mpt->m_donetail = &mpt->m_doneq;
8264 8602 mpt->m_doneq_len = 0;
8265 8603
8266 8604 mutex_exit(&mpt->m_mutex);
8267 8605 /*
8268 8606 * run the completion routines of all the
8269 8607 * completed commands
8270 8608 */
8271 8609 while (cmd != NULL) {
8272 8610 next = cmd->cmd_linkp;
8273 8611 cmd->cmd_linkp = NULL;
8274 8612 /* run this command's completion routine */
8275 8613 cmd->cmd_flags |= CFLAG_COMPLETED;
8276 8614 pkt = CMD2PKT(cmd);
8277 8615 mptsas_pkt_comp(pkt, cmd);
8278 8616 cmd = next;
8279 8617 }
8280 8618 mutex_enter(&mpt->m_mutex);
8281 8619 mpt->m_in_callback = 0;
8282 8620 }
8283 8621 }
8284 8622
8285 8623 /*
8286 8624 * These routines manipulate the target's queue of pending requests
8287 8625 */
8288 8626 void
8289 8627 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8290 8628 {
8291 8629 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8292 8630 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8293 8631 cmd->cmd_queued = TRUE;
8294 8632 if (ptgt)
8295 8633 ptgt->m_t_nwait++;
8296 8634 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8297 8635 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8298 8636 mpt->m_waitqtail = &cmd->cmd_linkp;
8299 8637 }
8300 8638 mpt->m_waitq = cmd;
8301 8639 } else {
8302 8640 cmd->cmd_linkp = NULL;
8303 8641 *(mpt->m_waitqtail) = cmd;
8304 8642 mpt->m_waitqtail = &cmd->cmd_linkp;
8305 8643 }
8306 8644 }
8307 8645
8308 8646 static mptsas_cmd_t *
8309 8647 mptsas_waitq_rm(mptsas_t *mpt)
8310 8648 {
8311 8649 mptsas_cmd_t *cmd;
8312 8650 mptsas_target_t *ptgt;
8313 8651 NDBG7(("mptsas_waitq_rm"));
8314 8652
8315 8653 MPTSAS_WAITQ_RM(mpt, cmd);
8316 8654
8317 8655 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8318 8656 if (cmd) {
8319 8657 ptgt = cmd->cmd_tgt_addr;
8320 8658 if (ptgt) {
8321 8659 ptgt->m_t_nwait--;
8322 8660 ASSERT(ptgt->m_t_nwait >= 0);
8323 8661 }
8324 8662 }
8325 8663 return (cmd);
8326 8664 }
8327 8665
8328 8666 /*
8329 8667 * remove specified cmd from the middle of the wait queue.
8330 8668 */
8331 8669 static void
8332 8670 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8333 8671 {
8334 8672 mptsas_cmd_t *prevp = mpt->m_waitq;
8335 8673 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8336 8674
8337 8675 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8338 8676 (void *)mpt, (void *)cmd));
8339 8677 if (ptgt) {
8340 8678 ptgt->m_t_nwait--;
8341 8679 ASSERT(ptgt->m_t_nwait >= 0);
8342 8680 }
8343 8681
8344 8682 if (prevp == cmd) {
8345 8683 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8346 8684 mpt->m_waitqtail = &mpt->m_waitq;
8347 8685
8348 8686 cmd->cmd_linkp = NULL;
8349 8687 cmd->cmd_queued = FALSE;
8350 8688 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8351 8689 (void *)mpt, (void *)cmd));
8352 8690 return;
8353 8691 }
8354 8692
8355 8693 while (prevp != NULL) {
8356 8694 if (prevp->cmd_linkp == cmd) {
8357 8695 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8358 8696 mpt->m_waitqtail = &prevp->cmd_linkp;
8359 8697
8360 8698 cmd->cmd_linkp = NULL;
8361 8699 cmd->cmd_queued = FALSE;
8362 8700 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8363 8701 (void *)mpt, (void *)cmd));
8364 8702 return;
8365 8703 }
8366 8704 prevp = prevp->cmd_linkp;
8367 8705 }
8368 8706 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8369 8707 }
8370 8708
8371 8709 static mptsas_cmd_t *
8372 8710 mptsas_tx_waitq_rm(mptsas_t *mpt)
8373 8711 {
8374 8712 mptsas_cmd_t *cmd;
8375 8713 NDBG7(("mptsas_tx_waitq_rm"));
8376 8714
8377 8715 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8378 8716
8379 8717 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8380 8718
8381 8719 return (cmd);
8382 8720 }
8383 8721
8384 8722 /*
8385 8723 * remove specified cmd from the middle of the tx_waitq.
8386 8724 */
8387 8725 static void
8388 8726 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8389 8727 {
8390 8728 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8391 8729
8392 8730 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8393 8731 (void *)mpt, (void *)cmd));
8394 8732
8395 8733 if (prevp == cmd) {
8396 8734 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8397 8735 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8398 8736
8399 8737 cmd->cmd_linkp = NULL;
8400 8738 cmd->cmd_queued = FALSE;
8401 8739 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8402 8740 (void *)mpt, (void *)cmd));
8403 8741 return;
8404 8742 }
8405 8743
8406 8744 while (prevp != NULL) {
8407 8745 if (prevp->cmd_linkp == cmd) {
8408 8746 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8409 8747 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8410 8748
8411 8749 cmd->cmd_linkp = NULL;
8412 8750 cmd->cmd_queued = FALSE;
8413 8751 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8414 8752 (void *)mpt, (void *)cmd));
8415 8753 return;
8416 8754 }
8417 8755 prevp = prevp->cmd_linkp;
8418 8756 }
8419 8757 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8420 8758 }
8421 8759
8422 8760 /*
8423 8761 * device and bus reset handling
8424 8762 *
8425 8763 * Notes:
8426 8764 * - RESET_ALL: reset the controller
8427 8765 * - RESET_TARGET: reset the target specified in scsi_address
8428 8766 */
8429 8767 static int
8430 8768 mptsas_scsi_reset(struct scsi_address *ap, int level)
8431 8769 {
8432 8770 mptsas_t *mpt = ADDR2MPT(ap);
8433 8771 int rval;
8434 8772 mptsas_tgt_private_t *tgt_private;
8435 8773 mptsas_target_t *ptgt = NULL;
8436 8774
8437 8775 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8438 8776 ptgt = tgt_private->t_private;
8439 8777 if (ptgt == NULL) {
8440 8778 return (FALSE);
8441 8779 }
8442 8780 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8443 8781 level));
8444 8782
8445 8783 mutex_enter(&mpt->m_mutex);
8446 8784 /*
8447 8785 * if we are not in panic set up a reset delay for this target
8448 8786 */
8449 8787 if (!ddi_in_panic()) {
8450 8788 mptsas_setup_bus_reset_delay(mpt);
8451 8789 } else {
8452 8790 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8453 8791 }
8454 8792 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8455 8793 mutex_exit(&mpt->m_mutex);
8456 8794
8457 8795 /*
8458 8796 * The transport layer expect to only see TRUE and
8459 8797 * FALSE. Therefore, we will adjust the return value
8460 8798 * if mptsas_do_scsi_reset returns FAILED.
8461 8799 */
8462 8800 if (rval == FAILED)
8463 8801 rval = FALSE;
8464 8802 return (rval);
8465 8803 }
8466 8804
8467 8805 static int
8468 8806 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8469 8807 {
8470 8808 int rval = FALSE;
8471 8809 uint8_t config, disk;
8472 8810
8473 8811 ASSERT(mutex_owned(&mpt->m_mutex));
8474 8812
8475 8813 if (mptsas_debug_resets) {
8476 8814 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8477 8815 devhdl);
8478 8816 }
8479 8817
8480 8818 /*
8481 8819 * Issue a Target Reset message to the target specified but not to a
8482 8820 * disk making up a raid volume. Just look through the RAID config
8483 8821 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8484 8822 * list, then don't reset this target.
8485 8823 */
8486 8824 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8487 8825 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8488 8826 if (devhdl == mpt->m_raidconfig[config].
8489 8827 m_physdisk_devhdl[disk]) {
8490 8828 return (TRUE);
8491 8829 }
8492 8830 }
8493 8831 }
8494 8832
8495 8833 rval = mptsas_ioc_task_management(mpt,
8496 8834 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8497 8835
8498 8836 mptsas_doneq_empty(mpt);
8499 8837 return (rval);
8500 8838 }
8501 8839
8502 8840 static int
8503 8841 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8504 8842 void (*callback)(caddr_t), caddr_t arg)
8505 8843 {
8506 8844 mptsas_t *mpt = ADDR2MPT(ap);
8507 8845
8508 8846 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8509 8847
8510 8848 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8511 8849 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8512 8850 }
8513 8851
8514 8852 static int
8515 8853 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8516 8854 {
8517 8855 dev_info_t *lun_dip = NULL;
8518 8856
8519 8857 ASSERT(sd != NULL);
8520 8858 ASSERT(name != NULL);
8521 8859 lun_dip = sd->sd_dev;
8522 8860 ASSERT(lun_dip != NULL);
8523 8861
8524 8862 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8525 8863 return (1);
8526 8864 } else {
8527 8865 return (0);
8528 8866 }
8529 8867 }
8530 8868
8531 8869 static int
8532 8870 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8533 8871 {
8534 8872 return (mptsas_get_name(sd, name, len));
8535 8873 }
8536 8874
8537 8875 void
8538 8876 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8539 8877 {
8540 8878
8541 8879 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8542 8880
8543 8881 /*
8544 8882 * if the bus is draining/quiesced, no changes to the throttles
8545 8883 * are allowed. Not allowing change of throttles during draining
8546 8884 * limits error recovery but will reduce draining time
8547 8885 *
8548 8886 * all throttles should have been set to HOLD_THROTTLE
8549 8887 */
8550 8888 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8551 8889 return;
8552 8890 }
8553 8891
8554 8892 if (what == HOLD_THROTTLE) {
8555 8893 ptgt->m_t_throttle = HOLD_THROTTLE;
8556 8894 } else if (ptgt->m_reset_delay == 0) {
8557 8895 ptgt->m_t_throttle = what;
8558 8896 }
8559 8897 }
8560 8898
8561 8899 /*
8562 8900 * Clean up from a device reset.
8563 8901 * For the case of target reset, this function clears the waitq of all
8564 8902 * commands for a particular target. For the case of abort task set, this
8565 8903 * function clears the waitq of all commonds for a particular target/lun.
8566 8904 */
8567 8905 static void
8568 8906 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8569 8907 {
8570 8908 mptsas_slots_t *slots = mpt->m_active;
8571 8909 mptsas_cmd_t *cmd, *next_cmd;
8572 8910 int slot;
8573 8911 uchar_t reason;
8574 8912 uint_t stat;
8575 8913 hrtime_t timestamp;
8576 8914
8577 8915 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8578 8916
8579 8917 timestamp = gethrtime();
8580 8918
8581 8919 /*
8582 8920 * Make sure the I/O Controller has flushed all cmds
8583 8921 * that are associated with this target for a target reset
8584 8922 * and target/lun for abort task set.
8585 8923 * Account for TM requests, which use the last SMID.
8586 8924 */
8587 8925 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8588 8926 if ((cmd = slots->m_slot[slot]) == NULL)
8589 8927 continue;
8590 8928 reason = CMD_RESET;
8591 8929 stat = STAT_DEV_RESET;
8592 8930 switch (tasktype) {
8593 8931 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8594 8932 if (Tgt(cmd) == target) {
8595 8933 if (cmd->cmd_active_expiration <= timestamp) {
8596 8934 /*
8597 8935 * When timeout requested, propagate
8598 8936 * proper reason and statistics to
8599 8937 * target drivers.
8600 8938 */
8601 8939 reason = CMD_TIMEOUT;
8602 8940 stat |= STAT_TIMEOUT;
8603 8941 }
8604 8942 NDBG25(("mptsas_flush_target discovered non-"
8605 8943 "NULL cmd in slot %d, tasktype 0x%x", slot,
8606 8944 tasktype));
8607 8945 mptsas_dump_cmd(mpt, cmd);
8608 8946 mptsas_remove_cmd(mpt, cmd);
8609 8947 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8610 8948 mptsas_doneq_add(mpt, cmd);
8611 8949 }
8612 8950 break;
8613 8951 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8614 8952 reason = CMD_ABORTED;
8615 8953 stat = STAT_ABORTED;
8616 8954 /*FALLTHROUGH*/
8617 8955 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8618 8956 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8619 8957
8620 8958 NDBG25(("mptsas_flush_target discovered non-"
8621 8959 "NULL cmd in slot %d, tasktype 0x%x", slot,
8622 8960 tasktype));
8623 8961 mptsas_dump_cmd(mpt, cmd);
8624 8962 mptsas_remove_cmd(mpt, cmd);
8625 8963 mptsas_set_pkt_reason(mpt, cmd, reason,
8626 8964 stat);
8627 8965 mptsas_doneq_add(mpt, cmd);
8628 8966 }
8629 8967 break;
8630 8968 default:
8631 8969 break;
8632 8970 }
8633 8971 }
8634 8972
8635 8973 /*
8636 8974 * Flush the waitq and tx_waitq of this target's cmds
8637 8975 */
8638 8976 cmd = mpt->m_waitq;
8639 8977
8640 8978 reason = CMD_RESET;
8641 8979 stat = STAT_DEV_RESET;
8642 8980
8643 8981 switch (tasktype) {
8644 8982 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8645 8983 while (cmd != NULL) {
8646 8984 next_cmd = cmd->cmd_linkp;
8647 8985 if (Tgt(cmd) == target) {
8648 8986 mptsas_waitq_delete(mpt, cmd);
8649 8987 mptsas_set_pkt_reason(mpt, cmd,
8650 8988 reason, stat);
8651 8989 mptsas_doneq_add(mpt, cmd);
8652 8990 }
8653 8991 cmd = next_cmd;
8654 8992 }
8655 8993 mutex_enter(&mpt->m_tx_waitq_mutex);
8656 8994 cmd = mpt->m_tx_waitq;
8657 8995 while (cmd != NULL) {
8658 8996 next_cmd = cmd->cmd_linkp;
8659 8997 if (Tgt(cmd) == target) {
8660 8998 mptsas_tx_waitq_delete(mpt, cmd);
8661 8999 mutex_exit(&mpt->m_tx_waitq_mutex);
8662 9000 mptsas_set_pkt_reason(mpt, cmd,
8663 9001 reason, stat);
8664 9002 mptsas_doneq_add(mpt, cmd);
8665 9003 mutex_enter(&mpt->m_tx_waitq_mutex);
8666 9004 }
8667 9005 cmd = next_cmd;
8668 9006 }
8669 9007 mutex_exit(&mpt->m_tx_waitq_mutex);
8670 9008 break;
8671 9009 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8672 9010 reason = CMD_ABORTED;
8673 9011 stat = STAT_ABORTED;
8674 9012 /*FALLTHROUGH*/
8675 9013 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8676 9014 while (cmd != NULL) {
8677 9015 next_cmd = cmd->cmd_linkp;
8678 9016 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8679 9017 mptsas_waitq_delete(mpt, cmd);
8680 9018 mptsas_set_pkt_reason(mpt, cmd,
8681 9019 reason, stat);
8682 9020 mptsas_doneq_add(mpt, cmd);
8683 9021 }
8684 9022 cmd = next_cmd;
8685 9023 }
8686 9024 mutex_enter(&mpt->m_tx_waitq_mutex);
8687 9025 cmd = mpt->m_tx_waitq;
8688 9026 while (cmd != NULL) {
8689 9027 next_cmd = cmd->cmd_linkp;
8690 9028 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8691 9029 mptsas_tx_waitq_delete(mpt, cmd);
8692 9030 mutex_exit(&mpt->m_tx_waitq_mutex);
8693 9031 mptsas_set_pkt_reason(mpt, cmd,
8694 9032 reason, stat);
8695 9033 mptsas_doneq_add(mpt, cmd);
8696 9034 mutex_enter(&mpt->m_tx_waitq_mutex);
8697 9035 }
8698 9036 cmd = next_cmd;
8699 9037 }
8700 9038 mutex_exit(&mpt->m_tx_waitq_mutex);
8701 9039 break;
8702 9040 default:
8703 9041 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8704 9042 tasktype);
8705 9043 break;
8706 9044 }
8707 9045 }
8708 9046
8709 9047 /*
8710 9048 * Clean up hba state, abort all outstanding command and commands in waitq
8711 9049 * reset timeout of all targets.
8712 9050 */
8713 9051 static void
8714 9052 mptsas_flush_hba(mptsas_t *mpt)
8715 9053 {
8716 9054 mptsas_slots_t *slots = mpt->m_active;
8717 9055 mptsas_cmd_t *cmd;
8718 9056 int slot;
8719 9057
8720 9058 NDBG25(("mptsas_flush_hba"));
8721 9059
8722 9060 /*
8723 9061 * The I/O Controller should have already sent back
8724 9062 * all commands via the scsi I/O reply frame. Make
8725 9063 * sure all commands have been flushed.
8726 9064 * Account for TM request, which use the last SMID.
8727 9065 */
8728 9066 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8729 9067 if ((cmd = slots->m_slot[slot]) == NULL)
8730 9068 continue;
8731 9069
8732 9070 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8733 9071 /*
8734 9072 * Need to make sure to tell everyone that might be
8735 9073 * waiting on this command that it's going to fail. If
8736 9074 * we get here, this command will never timeout because
8737 9075 * the active command table is going to be re-allocated,
8738 9076 * so there will be nothing to check against a time out.
8739 9077 * Instead, mark the command as failed due to reset.
8740 9078 */
8741 9079 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8742 9080 STAT_BUS_RESET);
8743 9081 if ((cmd->cmd_flags &
8744 9082 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
8745 9083 cmd->cmd_flags |= CFLAG_FINISHED;
8746 9084 cv_broadcast(&mpt->m_passthru_cv);
8747 9085 cv_broadcast(&mpt->m_config_cv);
8748 9086 cv_broadcast(&mpt->m_fw_diag_cv);
8749 9087 }
8750 9088 continue;
8751 9089 }
8752 9090
8753 9091 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8754 9092 slot));
8755 9093 mptsas_dump_cmd(mpt, cmd);
8756 9094
8757 9095 mptsas_remove_cmd(mpt, cmd);
8758 9096 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8759 9097 mptsas_doneq_add(mpt, cmd);
8760 9098 }
8761 9099
8762 9100 /*
8763 9101 * Flush the waitq.
8764 9102 */
8765 9103 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8766 9104 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8767 9105 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8768 9106 (cmd->cmd_flags & CFLAG_CONFIG) ||
8769 9107 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8770 9108 cmd->cmd_flags |= CFLAG_FINISHED;
8771 9109 cv_broadcast(&mpt->m_passthru_cv);
8772 9110 cv_broadcast(&mpt->m_config_cv);
8773 9111 cv_broadcast(&mpt->m_fw_diag_cv);
8774 9112 } else {
8775 9113 mptsas_doneq_add(mpt, cmd);
8776 9114 }
8777 9115 }
8778 9116
8779 9117 /*
8780 9118 * Flush the tx_waitq
8781 9119 */
8782 9120 mutex_enter(&mpt->m_tx_waitq_mutex);
8783 9121 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8784 9122 mutex_exit(&mpt->m_tx_waitq_mutex);
8785 9123 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8786 9124 mptsas_doneq_add(mpt, cmd);
8787 9125 mutex_enter(&mpt->m_tx_waitq_mutex);
8788 9126 }
8789 9127 mutex_exit(&mpt->m_tx_waitq_mutex);
8790 9128
8791 9129 /*
8792 9130 * Drain the taskqs prior to reallocating resources.
8793 9131 */
8794 9132 mutex_exit(&mpt->m_mutex);
8795 9133 ddi_taskq_wait(mpt->m_event_taskq);
8796 9134 ddi_taskq_wait(mpt->m_dr_taskq);
8797 9135 mutex_enter(&mpt->m_mutex);
8798 9136 }
8799 9137
8800 9138 /*
8801 9139 * set pkt_reason and OR in pkt_statistics flag
8802 9140 */
8803 9141 static void
8804 9142 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8805 9143 uint_t stat)
8806 9144 {
8807 9145 #ifndef __lock_lint
8808 9146 _NOTE(ARGUNUSED(mpt))
8809 9147 #endif
8810 9148
8811 9149 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8812 9150 (void *)cmd, reason, stat));
8813 9151
8814 9152 if (cmd) {
8815 9153 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8816 9154 cmd->cmd_pkt->pkt_reason = reason;
8817 9155 }
8818 9156 cmd->cmd_pkt->pkt_statistics |= stat;
8819 9157 }
8820 9158 }
8821 9159
8822 9160 static void
8823 9161 mptsas_start_watch_reset_delay()
8824 9162 {
8825 9163 NDBG22(("mptsas_start_watch_reset_delay"));
8826 9164
8827 9165 mutex_enter(&mptsas_global_mutex);
8828 9166 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8829 9167 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8830 9168 drv_usectohz((clock_t)
8831 9169 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8832 9170 ASSERT(mptsas_reset_watch != NULL);
8833 9171 }
8834 9172 mutex_exit(&mptsas_global_mutex);
8835 9173 }
8836 9174
8837 9175 static void
8838 9176 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8839 9177 {
8840 9178 mptsas_target_t *ptgt = NULL;
8841 9179
8842 9180 ASSERT(MUTEX_HELD(&mpt->m_mutex));
8843 9181
8844 9182 NDBG22(("mptsas_setup_bus_reset_delay"));
8845 9183 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8846 9184 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8847 9185 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8848 9186 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8849 9187 }
8850 9188
8851 9189 mptsas_start_watch_reset_delay();
8852 9190 }
8853 9191
8854 9192 /*
8855 9193 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8856 9194 * mpt instance for active reset delays
8857 9195 */
8858 9196 static void
8859 9197 mptsas_watch_reset_delay(void *arg)
8860 9198 {
8861 9199 #ifndef __lock_lint
8862 9200 _NOTE(ARGUNUSED(arg))
8863 9201 #endif
8864 9202
8865 9203 mptsas_t *mpt;
8866 9204 int not_done = 0;
8867 9205
8868 9206 NDBG22(("mptsas_watch_reset_delay"));
8869 9207
8870 9208 mutex_enter(&mptsas_global_mutex);
8871 9209 mptsas_reset_watch = 0;
8872 9210 mutex_exit(&mptsas_global_mutex);
8873 9211 rw_enter(&mptsas_global_rwlock, RW_READER);
8874 9212 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8875 9213 if (mpt->m_tran == 0) {
8876 9214 continue;
8877 9215 }
8878 9216 mutex_enter(&mpt->m_mutex);
8879 9217 not_done += mptsas_watch_reset_delay_subr(mpt);
8880 9218 mutex_exit(&mpt->m_mutex);
8881 9219 }
8882 9220 rw_exit(&mptsas_global_rwlock);
8883 9221
8884 9222 if (not_done) {
8885 9223 mptsas_start_watch_reset_delay();
8886 9224 }
8887 9225 }
8888 9226
8889 9227 static int
8890 9228 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8891 9229 {
8892 9230 int done = 0;
8893 9231 int restart = 0;
8894 9232 mptsas_target_t *ptgt = NULL;
8895 9233
8896 9234 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8897 9235
8898 9236 ASSERT(mutex_owned(&mpt->m_mutex));
8899 9237
8900 9238 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8901 9239 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8902 9240 if (ptgt->m_reset_delay != 0) {
8903 9241 ptgt->m_reset_delay -=
8904 9242 MPTSAS_WATCH_RESET_DELAY_TICK;
8905 9243 if (ptgt->m_reset_delay <= 0) {
8906 9244 ptgt->m_reset_delay = 0;
8907 9245 mptsas_set_throttle(mpt, ptgt,
8908 9246 MAX_THROTTLE);
8909 9247 restart++;
8910 9248 } else {
8911 9249 done = -1;
8912 9250 }
8913 9251 }
8914 9252 }
8915 9253
8916 9254 if (restart > 0) {
8917 9255 mptsas_restart_hba(mpt);
8918 9256 }
8919 9257 return (done);
8920 9258 }
8921 9259
8922 9260 #ifdef MPTSAS_TEST
8923 9261 static void
8924 9262 mptsas_test_reset(mptsas_t *mpt, int target)
8925 9263 {
8926 9264 mptsas_target_t *ptgt = NULL;
8927 9265
8928 9266 if (mptsas_rtest == target) {
8929 9267 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8930 9268 mptsas_rtest = -1;
8931 9269 }
8932 9270 if (mptsas_rtest == -1) {
8933 9271 NDBG22(("mptsas_test_reset success"));
8934 9272 }
8935 9273 }
8936 9274 }
8937 9275 #endif
8938 9276
8939 9277 /*
8940 9278 * abort handling:
8941 9279 *
8942 9280 * Notes:
8943 9281 * - if pkt is not NULL, abort just that command
8944 9282 * - if pkt is NULL, abort all outstanding commands for target
8945 9283 */
8946 9284 static int
8947 9285 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8948 9286 {
8949 9287 mptsas_t *mpt = ADDR2MPT(ap);
8950 9288 int rval;
8951 9289 mptsas_tgt_private_t *tgt_private;
8952 9290 int target, lun;
8953 9291
8954 9292 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8955 9293 tran_tgt_private;
8956 9294 ASSERT(tgt_private != NULL);
8957 9295 target = tgt_private->t_private->m_devhdl;
8958 9296 lun = tgt_private->t_lun;
8959 9297
8960 9298 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8961 9299
8962 9300 mutex_enter(&mpt->m_mutex);
8963 9301 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8964 9302 mutex_exit(&mpt->m_mutex);
8965 9303 return (rval);
8966 9304 }
8967 9305
8968 9306 static int
8969 9307 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8970 9308 {
8971 9309 mptsas_cmd_t *sp = NULL;
8972 9310 mptsas_slots_t *slots = mpt->m_active;
8973 9311 int rval = FALSE;
8974 9312
8975 9313 ASSERT(mutex_owned(&mpt->m_mutex));
8976 9314
8977 9315 /*
8978 9316 * Abort the command pkt on the target/lun in ap. If pkt is
8979 9317 * NULL, abort all outstanding commands on that target/lun.
8980 9318 * If you can abort them, return 1, else return 0.
8981 9319 * Each packet that's aborted should be sent back to the target
8982 9320 * driver through the callback routine, with pkt_reason set to
8983 9321 * CMD_ABORTED.
8984 9322 *
8985 9323 * abort cmd pkt on HBA hardware; clean out of outstanding
8986 9324 * command lists, etc.
8987 9325 */
8988 9326 if (pkt != NULL) {
8989 9327 /* abort the specified packet */
8990 9328 sp = PKT2CMD(pkt);
8991 9329
8992 9330 if (sp->cmd_queued) {
8993 9331 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8994 9332 (void *)sp));
8995 9333 mptsas_waitq_delete(mpt, sp);
8996 9334 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8997 9335 STAT_ABORTED);
8998 9336 mptsas_doneq_add(mpt, sp);
8999 9337 rval = TRUE;
9000 9338 goto done;
9001 9339 }
9002 9340
9003 9341 /*
9004 9342 * Have mpt firmware abort this command
9005 9343 */
9006 9344
9007 9345 if (slots->m_slot[sp->cmd_slot] != NULL) {
9008 9346 rval = mptsas_ioc_task_management(mpt,
9009 9347 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9010 9348 lun, NULL, 0, 0);
9011 9349
9012 9350 /*
9013 9351 * The transport layer expects only TRUE and FALSE.
9014 9352 * Therefore, if mptsas_ioc_task_management returns
9015 9353 * FAILED we will return FALSE.
9016 9354 */
9017 9355 if (rval == FAILED)
9018 9356 rval = FALSE;
9019 9357 goto done;
9020 9358 }
9021 9359 }
9022 9360
9023 9361 /*
9024 9362 * If pkt is NULL then abort task set
9025 9363 */
9026 9364 rval = mptsas_ioc_task_management(mpt,
9027 9365 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9028 9366
9029 9367 /*
9030 9368 * The transport layer expects only TRUE and FALSE.
9031 9369 * Therefore, if mptsas_ioc_task_management returns
9032 9370 * FAILED we will return FALSE.
9033 9371 */
9034 9372 if (rval == FAILED)
9035 9373 rval = FALSE;
9036 9374
9037 9375 #ifdef MPTSAS_TEST
9038 9376 if (rval && mptsas_test_stop) {
9039 9377 debug_enter("mptsas_do_scsi_abort");
9040 9378 }
9041 9379 #endif
9042 9380
9043 9381 done:
9044 9382 mptsas_doneq_empty(mpt);
9045 9383 return (rval);
9046 9384 }
9047 9385
9048 9386 /*
9049 9387 * capability handling:
9050 9388 * (*tran_getcap). Get the capability named, and return its value.
9051 9389 */
9052 9390 static int
9053 9391 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9054 9392 {
9055 9393 mptsas_t *mpt = ADDR2MPT(ap);
9056 9394 int ckey;
9057 9395 int rval = FALSE;
9058 9396
9059 9397 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9060 9398 ap->a_target, cap, tgtonly));
9061 9399
9062 9400 mutex_enter(&mpt->m_mutex);
9063 9401
9064 9402 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9065 9403 mutex_exit(&mpt->m_mutex);
9066 9404 return (UNDEFINED);
9067 9405 }
9068 9406
9069 9407 switch (ckey) {
9070 9408 case SCSI_CAP_DMA_MAX:
9071 9409 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9072 9410 break;
9073 9411 case SCSI_CAP_ARQ:
9074 9412 rval = TRUE;
9075 9413 break;
9076 9414 case SCSI_CAP_MSG_OUT:
9077 9415 case SCSI_CAP_PARITY:
9078 9416 case SCSI_CAP_UNTAGGED_QING:
9079 9417 rval = TRUE;
9080 9418 break;
9081 9419 case SCSI_CAP_TAGGED_QING:
9082 9420 rval = TRUE;
9083 9421 break;
9084 9422 case SCSI_CAP_RESET_NOTIFICATION:
9085 9423 rval = TRUE;
9086 9424 break;
9087 9425 case SCSI_CAP_LINKED_CMDS:
9088 9426 rval = FALSE;
9089 9427 break;
9090 9428 case SCSI_CAP_QFULL_RETRIES:
9091 9429 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9092 9430 tran_tgt_private))->t_private->m_qfull_retries;
9093 9431 break;
9094 9432 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9095 9433 rval = drv_hztousec(((mptsas_tgt_private_t *)
9096 9434 (ap->a_hba_tran->tran_tgt_private))->
9097 9435 t_private->m_qfull_retry_interval) / 1000;
9098 9436 break;
9099 9437 case SCSI_CAP_CDB_LEN:
9100 9438 rval = CDB_GROUP4;
9101 9439 break;
9102 9440 case SCSI_CAP_INTERCONNECT_TYPE:
9103 9441 rval = INTERCONNECT_SAS;
9104 9442 break;
9105 9443 case SCSI_CAP_TRAN_LAYER_RETRIES:
9106 9444 if (mpt->m_ioc_capabilities &
9107 9445 MPI2_IOCFACTS_CAPABILITY_TLR)
9108 9446 rval = TRUE;
9109 9447 else
9110 9448 rval = FALSE;
9111 9449 break;
9112 9450 default:
9113 9451 rval = UNDEFINED;
9114 9452 break;
9115 9453 }
9116 9454
9117 9455 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9118 9456
9119 9457 mutex_exit(&mpt->m_mutex);
9120 9458 return (rval);
9121 9459 }
9122 9460
9123 9461 /*
9124 9462 * (*tran_setcap). Set the capability named to the value given.
9125 9463 */
9126 9464 static int
9127 9465 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9128 9466 {
9129 9467 mptsas_t *mpt = ADDR2MPT(ap);
9130 9468 int ckey;
9131 9469 int rval = FALSE;
9132 9470
9133 9471 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9134 9472 ap->a_target, cap, value, tgtonly));
9135 9473
9136 9474 if (!tgtonly) {
9137 9475 return (rval);
9138 9476 }
9139 9477
9140 9478 mutex_enter(&mpt->m_mutex);
9141 9479
9142 9480 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9143 9481 mutex_exit(&mpt->m_mutex);
9144 9482 return (UNDEFINED);
9145 9483 }
9146 9484
9147 9485 switch (ckey) {
9148 9486 case SCSI_CAP_DMA_MAX:
9149 9487 case SCSI_CAP_MSG_OUT:
9150 9488 case SCSI_CAP_PARITY:
9151 9489 case SCSI_CAP_INITIATOR_ID:
9152 9490 case SCSI_CAP_LINKED_CMDS:
9153 9491 case SCSI_CAP_UNTAGGED_QING:
9154 9492 case SCSI_CAP_RESET_NOTIFICATION:
9155 9493 /*
9156 9494 * None of these are settable via
9157 9495 * the capability interface.
9158 9496 */
9159 9497 break;
9160 9498 case SCSI_CAP_ARQ:
9161 9499 /*
9162 9500 * We cannot turn off arq so return false if asked to
9163 9501 */
9164 9502 if (value) {
9165 9503 rval = TRUE;
9166 9504 } else {
9167 9505 rval = FALSE;
9168 9506 }
9169 9507 break;
9170 9508 case SCSI_CAP_TAGGED_QING:
9171 9509 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9172 9510 (ap->a_hba_tran->tran_tgt_private))->t_private,
9173 9511 MAX_THROTTLE);
9174 9512 rval = TRUE;
9175 9513 break;
9176 9514 case SCSI_CAP_QFULL_RETRIES:
9177 9515 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9178 9516 t_private->m_qfull_retries = (uchar_t)value;
9179 9517 rval = TRUE;
9180 9518 break;
9181 9519 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9182 9520 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9183 9521 t_private->m_qfull_retry_interval =
9184 9522 drv_usectohz(value * 1000);
9185 9523 rval = TRUE;
9186 9524 break;
9187 9525 default:
9188 9526 rval = UNDEFINED;
9189 9527 break;
9190 9528 }
9191 9529 mutex_exit(&mpt->m_mutex);
9192 9530 return (rval);
9193 9531 }
9194 9532
9195 9533 /*
9196 9534 * Utility routine for mptsas_ifsetcap/ifgetcap
9197 9535 */
9198 9536 /*ARGSUSED*/
9199 9537 static int
9200 9538 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9201 9539 {
9202 9540 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9203 9541
9204 9542 if (!cap)
9205 9543 return (FALSE);
9206 9544
9207 9545 *cidxp = scsi_hba_lookup_capstr(cap);
9208 9546 return (TRUE);
9209 9547 }
9210 9548
9211 9549 static int
9212 9550 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9213 9551 {
9214 9552 mptsas_slots_t *old_active = mpt->m_active;
9215 9553 mptsas_slots_t *new_active;
9216 9554 size_t size;
9217 9555
9218 9556 /*
9219 9557 * if there are active commands, then we cannot
9220 9558 * change size of active slots array.
9221 9559 */
9222 9560 ASSERT(mpt->m_ncmds == 0);
9223 9561
9224 9562 size = MPTSAS_SLOTS_SIZE(mpt);
9225 9563 new_active = kmem_zalloc(size, flag);
9226 9564 if (new_active == NULL) {
9227 9565 NDBG1(("new active alloc failed"));
9228 9566 return (-1);
9229 9567 }
9230 9568 /*
9231 9569 * Since SMID 0 is reserved and the TM slot is reserved, the
9232 9570 * number of slots that can be used at any one time is
9233 9571 * m_max_requests - 2.
9234 9572 */
9235 9573 new_active->m_n_normal = (mpt->m_max_requests - 2);
9236 9574 new_active->m_size = size;
9237 9575 new_active->m_rotor = 1;
9238 9576 if (old_active)
9239 9577 mptsas_free_active_slots(mpt);
9240 9578 mpt->m_active = new_active;
9241 9579
9242 9580 return (0);
9243 9581 }
9244 9582
9245 9583 static void
9246 9584 mptsas_free_active_slots(mptsas_t *mpt)
9247 9585 {
9248 9586 mptsas_slots_t *active = mpt->m_active;
9249 9587 size_t size;
9250 9588
9251 9589 if (active == NULL)
9252 9590 return;
9253 9591 size = active->m_size;
9254 9592 kmem_free(active, size);
9255 9593 mpt->m_active = NULL;
9256 9594 }
9257 9595
9258 9596 /*
9259 9597 * Error logging, printing, and debug print routines.
9260 9598 */
9261 9599 static char *mptsas_label = "mpt_sas";
9262 9600
9263 9601 /*PRINTFLIKE3*/
9264 9602 void
9265 9603 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9266 9604 {
9267 9605 dev_info_t *dev;
9268 9606 va_list ap;
9269 9607
9270 9608 if (mpt) {
9271 9609 dev = mpt->m_dip;
9272 9610 } else {
9273 9611 dev = 0;
9274 9612 }
9275 9613
9276 9614 mutex_enter(&mptsas_log_mutex);
9277 9615
9278 9616 va_start(ap, fmt);
9279 9617 (void) vsprintf(mptsas_log_buf, fmt, ap);
9280 9618 va_end(ap);
9281 9619
9282 9620 if (level == CE_CONT) {
9283 9621 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9284 9622 } else {
9285 9623 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9286 9624 }
9287 9625
9288 9626 mutex_exit(&mptsas_log_mutex);
9289 9627 }
9290 9628
9291 9629 #ifdef MPTSAS_DEBUG
9292 9630 /*PRINTFLIKE1*/
9293 9631 void
9294 9632 mptsas_printf(char *fmt, ...)
9295 9633 {
9296 9634 dev_info_t *dev = 0;
9297 9635 va_list ap;
|
↓ open down ↓ |
3793 lines elided |
↑ open up ↑ |
9298 9636
9299 9637 mutex_enter(&mptsas_log_mutex);
9300 9638
9301 9639 va_start(ap, fmt);
9302 9640 (void) vsprintf(mptsas_log_buf, fmt, ap);
9303 9641 va_end(ap);
9304 9642
9305 9643 #ifdef PROM_PRINTF
9306 9644 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9307 9645 #else
9308 - scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9646 + scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
9309 9647 #endif
9310 9648 mutex_exit(&mptsas_log_mutex);
9311 9649 }
9312 9650 #endif
9313 9651
9314 9652 /*
9315 9653 * timeout handling
9316 9654 */
9317 9655 static void
9318 9656 mptsas_watch(void *arg)
9319 9657 {
9320 9658 #ifndef __lock_lint
9321 9659 _NOTE(ARGUNUSED(arg))
9322 9660 #endif
9323 9661
9324 9662 mptsas_t *mpt;
9325 9663 uint32_t doorbell;
9326 9664
9327 9665 NDBG30(("mptsas_watch"));
9328 9666
9329 9667 rw_enter(&mptsas_global_rwlock, RW_READER);
9330 9668 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9331 9669
9332 9670 mutex_enter(&mpt->m_mutex);
9333 9671
9334 9672 /* Skip device if not powered on */
9335 9673 if (mpt->m_options & MPTSAS_OPT_PM) {
9336 9674 if (mpt->m_power_level == PM_LEVEL_D0) {
9337 9675 (void) pm_busy_component(mpt->m_dip, 0);
9338 9676 mpt->m_busy = 1;
9339 9677 } else {
9340 9678 mutex_exit(&mpt->m_mutex);
9341 9679 continue;
9342 9680 }
9343 9681 }
9344 9682
9345 9683 /*
9346 9684 * Check if controller is in a FAULT state. If so, reset it.
9347 9685 */
9348 9686 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9349 9687 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9350 9688 doorbell &= MPI2_DOORBELL_DATA_MASK;
9351 9689 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9352 9690 "code: %04x", doorbell);
9353 9691 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9354 9692 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9355 9693 mptsas_log(mpt, CE_WARN, "Reset failed"
9356 9694 "after fault was detected");
9357 9695 }
9358 9696 }
9359 9697
9360 9698 /*
9361 9699 * For now, always call mptsas_watchsubr.
9362 9700 */
9363 9701 mptsas_watchsubr(mpt);
9364 9702
9365 9703 if (mpt->m_options & MPTSAS_OPT_PM) {
9366 9704 mpt->m_busy = 0;
9367 9705 (void) pm_idle_component(mpt->m_dip, 0);
9368 9706 }
9369 9707
9370 9708 mutex_exit(&mpt->m_mutex);
9371 9709 }
9372 9710 rw_exit(&mptsas_global_rwlock);
9373 9711
9374 9712 mutex_enter(&mptsas_global_mutex);
9375 9713 if (mptsas_timeouts_enabled)
9376 9714 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9377 9715 mutex_exit(&mptsas_global_mutex);
9378 9716 }
9379 9717
9380 9718 static void
9381 9719 mptsas_watchsubr(mptsas_t *mpt)
9382 9720 {
9383 9721 int i;
9384 9722 mptsas_cmd_t *cmd;
9385 9723 mptsas_target_t *ptgt = NULL;
9386 9724 hrtime_t timestamp = gethrtime();
9387 9725
9388 9726 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9389 9727
9390 9728 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9391 9729
9392 9730 #ifdef MPTSAS_TEST
9393 9731 if (mptsas_enable_untagged) {
9394 9732 mptsas_test_untagged++;
9395 9733 }
9396 9734 #endif
9397 9735
9398 9736 /*
9399 9737 * Check for commands stuck in active slot
9400 9738 * Account for TM requests, which use the last SMID.
9401 9739 */
9402 9740 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9403 9741 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9404 9742 if (cmd->cmd_active_expiration <= timestamp) {
9405 9743 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9406 9744 /*
9407 9745 * There seems to be a command stuck
9408 9746 * in the active slot. Drain throttle.
9409 9747 */
9410 9748 mptsas_set_throttle(mpt,
9411 9749 cmd->cmd_tgt_addr,
9412 9750 DRAIN_THROTTLE);
9413 9751 } else if (cmd->cmd_flags &
9414 9752 (CFLAG_PASSTHRU | CFLAG_CONFIG |
9415 9753 CFLAG_FW_DIAG)) {
9416 9754 /*
9417 9755 * passthrough command timeout
9418 9756 */
9419 9757 cmd->cmd_flags |= (CFLAG_FINISHED |
9420 9758 CFLAG_TIMEOUT);
9421 9759 cv_broadcast(&mpt->m_passthru_cv);
9422 9760 cv_broadcast(&mpt->m_config_cv);
9423 9761 cv_broadcast(&mpt->m_fw_diag_cv);
9424 9762 }
9425 9763 }
9426 9764 }
9427 9765 }
9428 9766
9429 9767 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9430 9768 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9431 9769 /*
9432 9770 * If we were draining due to a qfull condition,
9433 9771 * go back to full throttle.
9434 9772 */
9435 9773 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9436 9774 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9437 9775 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9438 9776 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9439 9777 mptsas_restart_hba(mpt);
9440 9778 }
9441 9779
9442 9780 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
9443 9781 if (cmd == NULL)
9444 9782 continue;
9445 9783
9446 9784 if (cmd->cmd_active_expiration <= timestamp) {
9447 9785 /*
9448 9786 * Earliest command timeout expired. Drain throttle.
9449 9787 */
9450 9788 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9451 9789
9452 9790 /*
9453 9791 * Check for remaining commands.
9454 9792 */
9455 9793 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
9456 9794 if (cmd->cmd_active_expiration > timestamp) {
9457 9795 /*
9458 9796 * Wait for remaining commands to complete or
9459 9797 * time out.
9460 9798 */
9461 9799 NDBG23(("command timed out, pending drain"));
9462 9800 continue;
9463 9801 }
9464 9802
9465 9803 /*
9466 9804 * All command timeouts expired.
9467 9805 */
9468 9806 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
9469 9807 "expired with %d commands on target %d lun %d.",
9470 9808 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
9471 9809 ptgt->m_devhdl, Lun(cmd));
9472 9810
9473 9811 mptsas_cmd_timeout(mpt, ptgt);
9474 9812 } else if (cmd->cmd_active_expiration <=
9475 9813 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
9476 9814 NDBG23(("pending timeout"));
9477 9815 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9478 9816 }
9479 9817 }
9480 9818 }
9481 9819
9482 9820 /*
9483 9821 * timeout recovery
9484 9822 */
9485 9823 static void
9486 9824 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
9487 9825 {
9488 9826 uint16_t devhdl;
9489 9827 uint64_t sas_wwn;
9490 9828 uint8_t phy;
9491 9829 char wwn_str[MPTSAS_WWN_STRLEN];
9492 9830
9493 9831 devhdl = ptgt->m_devhdl;
9494 9832 sas_wwn = ptgt->m_addr.mta_wwn;
9495 9833 phy = ptgt->m_phynum;
9496 9834 if (sas_wwn == 0) {
9497 9835 (void) sprintf(wwn_str, "p%x", phy);
9498 9836 } else {
9499 9837 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
9500 9838 }
9501 9839
9502 9840 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9503 9841 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9504 9842 "target %d %s.", devhdl, wwn_str);
9505 9843
9506 9844 /*
9507 9845 * Abort all outstanding commands on the device.
9508 9846 */
9509 9847 NDBG29(("mptsas_cmd_timeout: device reset"));
9510 9848 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9511 9849 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9512 9850 "recovery failed!", devhdl);
9513 9851 }
9514 9852 }
9515 9853
9516 9854 /*
9517 9855 * Device / Hotplug control
9518 9856 */
9519 9857 static int
9520 9858 mptsas_scsi_quiesce(dev_info_t *dip)
9521 9859 {
9522 9860 mptsas_t *mpt;
9523 9861 scsi_hba_tran_t *tran;
9524 9862
9525 9863 tran = ddi_get_driver_private(dip);
9526 9864 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9527 9865 return (-1);
9528 9866
9529 9867 return (mptsas_quiesce_bus(mpt));
9530 9868 }
9531 9869
9532 9870 static int
9533 9871 mptsas_scsi_unquiesce(dev_info_t *dip)
9534 9872 {
9535 9873 mptsas_t *mpt;
9536 9874 scsi_hba_tran_t *tran;
9537 9875
9538 9876 tran = ddi_get_driver_private(dip);
9539 9877 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9540 9878 return (-1);
9541 9879
9542 9880 return (mptsas_unquiesce_bus(mpt));
9543 9881 }
9544 9882
9545 9883 static int
9546 9884 mptsas_quiesce_bus(mptsas_t *mpt)
9547 9885 {
9548 9886 mptsas_target_t *ptgt = NULL;
9549 9887
9550 9888 NDBG28(("mptsas_quiesce_bus"));
9551 9889 mutex_enter(&mpt->m_mutex);
9552 9890
9553 9891 /* Set all the throttles to zero */
9554 9892 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9555 9893 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9556 9894 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9557 9895 }
9558 9896
9559 9897 /* If there are any outstanding commands in the queue */
9560 9898 if (mpt->m_ncmds) {
9561 9899 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9562 9900 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9563 9901 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9564 9902 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9565 9903 /*
9566 9904 * Quiesce has been interrupted
9567 9905 */
9568 9906 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9569 9907 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9570 9908 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9571 9909 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9572 9910 }
9573 9911 mptsas_restart_hba(mpt);
9574 9912 if (mpt->m_quiesce_timeid != 0) {
9575 9913 timeout_id_t tid = mpt->m_quiesce_timeid;
9576 9914 mpt->m_quiesce_timeid = 0;
9577 9915 mutex_exit(&mpt->m_mutex);
9578 9916 (void) untimeout(tid);
9579 9917 return (-1);
9580 9918 }
9581 9919 mutex_exit(&mpt->m_mutex);
9582 9920 return (-1);
9583 9921 } else {
9584 9922 /* Bus has been quiesced */
9585 9923 ASSERT(mpt->m_quiesce_timeid == 0);
9586 9924 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9587 9925 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9588 9926 mutex_exit(&mpt->m_mutex);
9589 9927 return (0);
9590 9928 }
9591 9929 }
9592 9930 /* Bus was not busy - QUIESCED */
9593 9931 mutex_exit(&mpt->m_mutex);
9594 9932
9595 9933 return (0);
9596 9934 }
9597 9935
9598 9936 static int
9599 9937 mptsas_unquiesce_bus(mptsas_t *mpt)
9600 9938 {
9601 9939 mptsas_target_t *ptgt = NULL;
9602 9940
9603 9941 NDBG28(("mptsas_unquiesce_bus"));
9604 9942 mutex_enter(&mpt->m_mutex);
9605 9943 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9606 9944 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9607 9945 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9608 9946 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9609 9947 }
9610 9948 mptsas_restart_hba(mpt);
9611 9949 mutex_exit(&mpt->m_mutex);
9612 9950 return (0);
9613 9951 }
9614 9952
9615 9953 static void
9616 9954 mptsas_ncmds_checkdrain(void *arg)
9617 9955 {
9618 9956 mptsas_t *mpt = arg;
9619 9957 mptsas_target_t *ptgt = NULL;
9620 9958
9621 9959 mutex_enter(&mpt->m_mutex);
9622 9960 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9623 9961 mpt->m_quiesce_timeid = 0;
9624 9962 if (mpt->m_ncmds == 0) {
9625 9963 /* Command queue has been drained */
9626 9964 cv_signal(&mpt->m_cv);
9627 9965 } else {
9628 9966 /*
9629 9967 * The throttle may have been reset because
9630 9968 * of a SCSI bus reset
9631 9969 */
9632 9970 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9633 9971 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9634 9972 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9635 9973 }
9636 9974
9637 9975 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9638 9976 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9639 9977 drv_usectohz(1000000)));
9640 9978 }
9641 9979 }
9642 9980 mutex_exit(&mpt->m_mutex);
9643 9981 }
9644 9982
9645 9983 /*ARGSUSED*/
9646 9984 static void
9647 9985 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9648 9986 {
9649 9987 int i;
9650 9988 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9651 9989 char buf[128];
9652 9990
9653 9991 buf[0] = '\0';
9654 9992 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9655 9993 Tgt(cmd), Lun(cmd)));
9656 9994 (void) sprintf(&buf[0], "\tcdb=[");
9657 9995 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9658 9996 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9659 9997 }
|
↓ open down ↓ |
341 lines elided |
↑ open up ↑ |
9660 9998 (void) sprintf(&buf[strlen(buf)], " ]");
9661 9999 NDBG25(("?%s\n", buf));
9662 10000 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9663 10001 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9664 10002 cmd->cmd_pkt->pkt_state));
9665 10003 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9666 10004 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9667 10005 }
9668 10006
9669 10007 static void
10008 +mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10009 + pMpi2SGESimple64_t sgep)
10010 +{
10011 + uint32_t sge_flags;
10012 + uint32_t data_size, dataout_size;
10013 + ddi_dma_cookie_t data_cookie;
10014 + ddi_dma_cookie_t dataout_cookie;
10015 +
10016 + data_size = pt->data_size;
10017 + dataout_size = pt->dataout_size;
10018 + data_cookie = pt->data_cookie;
10019 + dataout_cookie = pt->dataout_cookie;
10020 +
10021 + if (dataout_size) {
10022 + sge_flags = dataout_size |
10023 + ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10024 + MPI2_SGE_FLAGS_END_OF_BUFFER |
10025 + MPI2_SGE_FLAGS_HOST_TO_IOC |
10026 + MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10027 + MPI2_SGE_FLAGS_SHIFT);
10028 + ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10029 + ddi_put32(acc_hdl, &sgep->Address.Low,
10030 + (uint32_t)(dataout_cookie.dmac_laddress & 0xffffffffull));
10031 + ddi_put32(acc_hdl, &sgep->Address.High,
10032 + (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10033 + sgep++;
10034 + }
10035 + sge_flags = data_size;
10036 + sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10037 + MPI2_SGE_FLAGS_LAST_ELEMENT |
10038 + MPI2_SGE_FLAGS_END_OF_BUFFER |
10039 + MPI2_SGE_FLAGS_END_OF_LIST |
10040 + MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10041 + MPI2_SGE_FLAGS_SHIFT);
10042 + if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10043 + sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10044 + MPI2_SGE_FLAGS_SHIFT);
10045 + } else {
10046 + sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10047 + MPI2_SGE_FLAGS_SHIFT);
10048 + }
10049 + ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10050 + ddi_put32(acc_hdl, &sgep->Address.Low,
10051 + (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10052 + ddi_put32(acc_hdl, &sgep->Address.High,
10053 + (uint32_t)(data_cookie.dmac_laddress >> 32));
10054 +}
10055 +
10056 +static void
10057 +mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10058 + pMpi2IeeeSgeSimple64_t ieeesgep)
10059 +{
10060 + uint8_t sge_flags;
10061 + uint32_t data_size, dataout_size;
10062 + ddi_dma_cookie_t data_cookie;
10063 + ddi_dma_cookie_t dataout_cookie;
10064 +
10065 + data_size = pt->data_size;
10066 + dataout_size = pt->dataout_size;
10067 + data_cookie = pt->data_cookie;
10068 + dataout_cookie = pt->dataout_cookie;
10069 +
10070 + sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10071 + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10072 + if (dataout_size) {
10073 + ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10074 + ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10075 + (uint32_t)(dataout_cookie.dmac_laddress &
10076 + 0xffffffffull));
10077 + ddi_put32(acc_hdl, &ieeesgep->Address.High,
10078 + (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10079 + ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10080 + ieeesgep++;
10081 + }
10082 + sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10083 + ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10084 + ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10085 + (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10086 + ddi_put32(acc_hdl, &ieeesgep->Address.High,
10087 + (uint32_t)(data_cookie.dmac_laddress >> 32));
10088 + ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10089 +}
10090 +
10091 +static void
9670 10092 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9671 10093 {
9672 10094 caddr_t memp;
9673 10095 pMPI2RequestHeader_t request_hdrp;
9674 10096 struct scsi_pkt *pkt = cmd->cmd_pkt;
9675 10097 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9676 - uint32_t request_size, data_size, dataout_size;
9677 - uint32_t direction;
9678 - ddi_dma_cookie_t data_cookie;
9679 - ddi_dma_cookie_t dataout_cookie;
10098 + uint32_t request_size;
9680 10099 uint32_t request_desc_low, request_desc_high = 0;
9681 - uint32_t i, sense_bufp;
10100 + uint64_t sense_bufp;
9682 10101 uint8_t desc_type;
9683 10102 uint8_t *request, function;
9684 10103 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9685 10104 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9686 10105
9687 10106 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9688 10107
9689 10108 request = pt->request;
9690 - direction = pt->direction;
9691 10109 request_size = pt->request_size;
9692 - data_size = pt->data_size;
9693 - dataout_size = pt->dataout_size;
9694 - data_cookie = pt->data_cookie;
9695 - dataout_cookie = pt->dataout_cookie;
9696 10110
9697 10111 /*
9698 10112 * Store the passthrough message in memory location
9699 10113 * corresponding to our slot number
9700 10114 */
9701 10115 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9702 10116 request_hdrp = (pMPI2RequestHeader_t)memp;
9703 10117 bzero(memp, mpt->m_req_frame_size);
9704 10118
9705 - for (i = 0; i < request_size; i++) {
9706 - bcopy(request + i, memp + i, 1);
9707 - }
10119 + bcopy(request, memp, request_size);
9708 10120
9709 - if (data_size || dataout_size) {
9710 - pMpi2SGESimple64_t sgep;
9711 - uint32_t sge_flags;
10121 + NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10122 + "size=%d, in %d, out %d", request_hdrp->Function,
10123 + request_hdrp->MsgFlags, request_size,
10124 + pt->data_size, pt->dataout_size));
9712 10125
9713 - sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9714 - request_size);
9715 - if (dataout_size) {
9716 -
9717 - sge_flags = dataout_size |
9718 - ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9719 - MPI2_SGE_FLAGS_END_OF_BUFFER |
9720 - MPI2_SGE_FLAGS_HOST_TO_IOC |
9721 - MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9722 - MPI2_SGE_FLAGS_SHIFT);
9723 - ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9724 - ddi_put32(acc_hdl, &sgep->Address.Low,
9725 - (uint32_t)(dataout_cookie.dmac_laddress &
9726 - 0xffffffffull));
9727 - ddi_put32(acc_hdl, &sgep->Address.High,
9728 - (uint32_t)(dataout_cookie.dmac_laddress
9729 - >> 32));
9730 - sgep++;
9731 - }
9732 - sge_flags = data_size;
9733 - sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9734 - MPI2_SGE_FLAGS_LAST_ELEMENT |
9735 - MPI2_SGE_FLAGS_END_OF_BUFFER |
9736 - MPI2_SGE_FLAGS_END_OF_LIST |
9737 - MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9738 - MPI2_SGE_FLAGS_SHIFT);
9739 - if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9740 - sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9741 - MPI2_SGE_FLAGS_SHIFT);
9742 - } else {
9743 - sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9744 - MPI2_SGE_FLAGS_SHIFT);
9745 - }
9746 - ddi_put32(acc_hdl, &sgep->FlagsLength,
9747 - sge_flags);
9748 - ddi_put32(acc_hdl, &sgep->Address.Low,
9749 - (uint32_t)(data_cookie.dmac_laddress &
9750 - 0xffffffffull));
9751 - ddi_put32(acc_hdl, &sgep->Address.High,
9752 - (uint32_t)(data_cookie.dmac_laddress >> 32));
10126 + /*
10127 + * Add an SGE, even if the length is zero.
10128 + */
10129 + if (mpt->m_MPI25 && pt->simple == 0) {
10130 + mptsas_passthru_ieee_sge(acc_hdl, pt,
10131 + (pMpi2IeeeSgeSimple64_t)
10132 + ((uint8_t *)request_hdrp + pt->sgl_offset));
10133 + } else {
10134 + mptsas_passthru_sge(acc_hdl, pt,
10135 + (pMpi2SGESimple64_t)
10136 + ((uint8_t *)request_hdrp + pt->sgl_offset));
9753 10137 }
9754 10138
9755 10139 function = request_hdrp->Function;
9756 10140 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9757 10141 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9758 10142 pMpi2SCSIIORequest_t scsi_io_req;
9759 10143
10144 + NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
9760 10145 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9761 10146 /*
9762 10147 * Put SGE for data and data_out buffer at the end of
9763 10148 * scsi_io_request message header.(64 bytes in total)
9764 10149 * Following above SGEs, the residual space will be
9765 10150 * used by sense data.
9766 10151 */
9767 10152 ddi_put8(acc_hdl,
9768 10153 &scsi_io_req->SenseBufferLength,
9769 10154 (uint8_t)(request_size - 64));
9770 10155
9771 - sense_bufp = mpt->m_req_frame_dma_addr +
9772 - (mpt->m_req_frame_size * cmd->cmd_slot);
10156 + sense_bufp = (uint32_t)(mpt->m_req_frame_dma_addr +
10157 + (mpt->m_req_frame_size * cmd->cmd_slot) & 0xffffffffull);
9773 10158 sense_bufp += 64;
9774 10159 ddi_put32(acc_hdl,
9775 10160 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9776 10161
9777 10162 /*
9778 10163 * Set SGLOffset0 value
9779 10164 */
9780 10165 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9781 10166 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9782 10167
9783 10168 /*
9784 10169 * Setup descriptor info. RAID passthrough must use the
9785 10170 * default request descriptor which is already set, so if this
9786 10171 * is a SCSI IO request, change the descriptor to SCSI IO.
9787 10172 */
9788 10173 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9789 10174 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9790 10175 request_desc_high = (ddi_get16(acc_hdl,
9791 10176 &scsi_io_req->DevHandle) << 16);
9792 10177 }
9793 10178 }
9794 10179
9795 10180 /*
9796 10181 * We must wait till the message has been completed before
9797 10182 * beginning the next message so we wait for this one to
9798 10183 * finish.
9799 10184 */
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
9800 10185 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9801 10186 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9802 10187 cmd->cmd_rfm = NULL;
9803 10188 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9804 10189 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9805 10190 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9806 10191 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9807 10192 }
9808 10193 }
9809 10194
10195 +typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10196 +static mptsas_pre_f mpi_pre_ioc_facts;
10197 +static mptsas_pre_f mpi_pre_port_facts;
10198 +static mptsas_pre_f mpi_pre_fw_download;
10199 +static mptsas_pre_f mpi_pre_fw_25_download;
10200 +static mptsas_pre_f mpi_pre_fw_upload;
10201 +static mptsas_pre_f mpi_pre_fw_25_upload;
10202 +static mptsas_pre_f mpi_pre_sata_passthrough;
10203 +static mptsas_pre_f mpi_pre_smp_passthrough;
10204 +static mptsas_pre_f mpi_pre_config;
10205 +static mptsas_pre_f mpi_pre_sas_io_unit_control;
10206 +static mptsas_pre_f mpi_pre_scsi_io_req;
9810 10207
10208 +/*
10209 + * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10210 + */
10211 +static void
10212 +mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10213 +{
10214 + pMpi2FWDownloadTCSGE_t tcsge;
10215 + pMpi2FWDownloadRequest req;
9811 10216
10217 + /*
10218 + * If SAS3, call separate function.
10219 + */
10220 + if (mpt->m_MPI25) {
10221 + mpi_pre_fw_25_download(mpt, pt);
10222 + return;
10223 + }
10224 +
10225 + /*
10226 + * User requests should come in with the Transaction
10227 + * context element where the SGL will go. Putting the
10228 + * SGL after that seems to work, but don't really know
10229 + * why. Other drivers tend to create an extra SGL and
10230 + * refer to the TCE through that.
10231 + */
10232 + req = (pMpi2FWDownloadRequest)pt->request;
10233 + tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10234 + if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10235 + tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10236 + mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10237 + }
10238 +
10239 + pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10240 + sizeof (*tcsge);
10241 + if (pt->request_size != pt->sgl_offset)
10242 + NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10243 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10244 + (int)pt->request_size, (int)pt->sgl_offset,
10245 + (int)pt->dataout_size));
10246 + if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10247 + NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10248 + "0x%x, should be 0x%x", pt->data_size,
10249 + (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10250 +}
10251 +
10252 +/*
10253 + * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10254 + */
10255 +static void
10256 +mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10257 +{
10258 + pMpi2FWDownloadTCSGE_t tcsge;
10259 + pMpi2FWDownloadRequest req2;
10260 + pMpi25FWDownloadRequest req25;
10261 +
10262 + /*
10263 + * User requests should come in with the Transaction
10264 + * context element where the SGL will go. The new firmware
10265 + * Doesn't use TCE and has space in the main request for
10266 + * this information. So move to the right place.
10267 + */
10268 + req2 = (pMpi2FWDownloadRequest)pt->request;
10269 + req25 = (pMpi25FWDownloadRequest)pt->request;
10270 + tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10271 + if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10272 + tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10273 + mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10274 + }
10275 + req25->ImageOffset = tcsge->ImageOffset;
10276 + req25->ImageSize = tcsge->ImageSize;
10277 +
10278 + pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10279 + if (pt->request_size != pt->sgl_offset)
10280 + NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10281 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10282 + pt->request_size, pt->sgl_offset,
10283 + pt->dataout_size));
10284 + if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10285 + NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10286 + "0x%x, should be 0x%x", pt->data_size,
10287 + (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10288 +}
10289 +
10290 +/*
10291 + * Prepare the pt for a SAS2 FW_UPLOAD request.
10292 + */
10293 +static void
10294 +mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10295 +{
10296 + pMpi2FWUploadTCSGE_t tcsge;
10297 + pMpi2FWUploadRequest_t req;
10298 +
10299 + /*
10300 + * If SAS3, call separate function.
10301 + */
10302 + if (mpt->m_MPI25) {
10303 + mpi_pre_fw_25_upload(mpt, pt);
10304 + return;
10305 + }
10306 +
10307 + /*
10308 + * User requests should come in with the Transaction
10309 + * context element where the SGL will go. Putting the
10310 + * SGL after that seems to work, but don't really know
10311 + * why. Other drivers tend to create an extra SGL and
10312 + * refer to the TCE through that.
10313 + */
10314 + req = (pMpi2FWUploadRequest_t)pt->request;
10315 + tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10316 + if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10317 + tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10318 + mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10319 + }
10320 +
10321 + pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10322 + sizeof (*tcsge);
10323 + if (pt->request_size != pt->sgl_offset)
10324 + NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10325 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10326 + pt->request_size, pt->sgl_offset,
10327 + pt->dataout_size));
10328 + if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10329 + NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10330 + "0x%x, should be 0x%x", pt->data_size,
10331 + (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10332 +}
10333 +
10334 +/*
10335 + * Prepare the pt a SAS3 FW_UPLOAD request.
10336 + */
10337 +static void
10338 +mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10339 +{
10340 + pMpi2FWUploadTCSGE_t tcsge;
10341 + pMpi2FWUploadRequest_t req2;
10342 + pMpi25FWUploadRequest_t req25;
10343 +
10344 + /*
10345 + * User requests should come in with the Transaction
10346 + * context element where the SGL will go. The new firmware
10347 + * Doesn't use TCE and has space in the main request for
10348 + * this information. So move to the right place.
10349 + */
10350 + req2 = (pMpi2FWUploadRequest_t)pt->request;
10351 + req25 = (pMpi25FWUploadRequest_t)pt->request;
10352 + tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10353 + if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10354 + tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10355 + mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10356 + }
10357 + req25->ImageOffset = tcsge->ImageOffset;
10358 + req25->ImageSize = tcsge->ImageSize;
10359 +
10360 + pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10361 + if (pt->request_size != pt->sgl_offset)
10362 + NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10363 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10364 + pt->request_size, pt->sgl_offset,
10365 + pt->dataout_size));
10366 + if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10367 + NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10368 + "0x%x, should be 0x%x", pt->data_size,
10369 + (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10370 +}
10371 +
10372 +/*
10373 + * Prepare the pt for an IOC_FACTS request.
10374 + */
10375 +static void
10376 +mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10377 +{
10378 +#ifndef __lock_lint
10379 + _NOTE(ARGUNUSED(mpt))
10380 +#endif
10381 + if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10382 + NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10383 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10384 + pt->request_size,
10385 + (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10386 + pt->dataout_size));
10387 + if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10388 + NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10389 + "0x%x, should be 0x%x", pt->data_size,
10390 + (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10391 + pt->sgl_offset = (uint16_t)pt->request_size;
10392 +}
10393 +
10394 +/*
10395 + * Prepare the pt for a PORT_FACTS request.
10396 + */
10397 +static void
10398 +mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10399 +{
10400 +#ifndef __lock_lint
10401 + _NOTE(ARGUNUSED(mpt))
10402 +#endif
10403 + if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10404 + NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10405 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10406 + pt->request_size,
10407 + (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10408 + pt->dataout_size));
10409 + if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10410 + NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10411 + "0x%x, should be 0x%x", pt->data_size,
10412 + (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10413 + pt->sgl_offset = (uint16_t)pt->request_size;
10414 +}
10415 +
10416 +/*
10417 + * Prepare pt for a SATA_PASSTHROUGH request.
10418 + */
10419 +static void
10420 +mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10421 +{
10422 +#ifndef __lock_lint
10423 + _NOTE(ARGUNUSED(mpt))
10424 +#endif
10425 + pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10426 + if (pt->request_size != pt->sgl_offset)
10427 + NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10428 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10429 + pt->request_size, pt->sgl_offset,
10430 + pt->dataout_size));
10431 + if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10432 + NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10433 + "0x%x, should be 0x%x", pt->data_size,
10434 + (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10435 +}
10436 +
10437 +static void
10438 +mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10439 +{
10440 +#ifndef __lock_lint
10441 + _NOTE(ARGUNUSED(mpt))
10442 +#endif
10443 + pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10444 + if (pt->request_size != pt->sgl_offset)
10445 + NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10446 + "0x%x, should be 0x%x, dataoutsz 0x%x",
10447 + pt->request_size, pt->sgl_offset,
10448 + pt->dataout_size));
10449 + if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10450 + NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10451 + "0x%x, should be 0x%x", pt->data_size,
10452 + (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10453 +}
10454 +
10455 +/*
10456 + * Prepare pt for a CONFIG request.
10457 + */
10458 +static void
10459 +mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10460 +{
10461 +#ifndef __lock_lint
10462 + _NOTE(ARGUNUSED(mpt))
10463 +#endif
10464 + pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10465 + if (pt->request_size != pt->sgl_offset)
10466 + NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10467 + "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10468 + pt->sgl_offset, pt->dataout_size));
10469 + if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10470 + NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10471 + "should be 0x%x", pt->data_size,
10472 + (int)sizeof (MPI2_CONFIG_REPLY)));
10473 + pt->simple = 1;
10474 +}
10475 +
10476 +/*
10477 + * Prepare pt for a SCSI_IO_REQ request.
10478 + */
10479 +static void
10480 +mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10481 +{
10482 +#ifndef __lock_lint
10483 + _NOTE(ARGUNUSED(mpt))
10484 +#endif
10485 + pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10486 + if (pt->request_size != pt->sgl_offset)
10487 + NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10488 + "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10489 + pt->sgl_offset,
10490 + pt->dataout_size));
10491 + if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10492 + NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10493 + "should be 0x%x", pt->data_size,
10494 + (int)sizeof (MPI2_SCSI_IO_REPLY)));
10495 +}
10496 +
10497 +/*
10498 + * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10499 + */
10500 +static void
10501 +mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10502 +{
10503 +#ifndef __lock_lint
10504 + _NOTE(ARGUNUSED(mpt))
10505 +#endif
10506 + pt->sgl_offset = (uint16_t)pt->request_size;
10507 +}
10508 +
10509 +/*
10510 + * A set of functions to prepare an mptsas_cmd for the various
10511 + * supported requests.
10512 + */
10513 +static struct mptsas_func {
10514 + U8 Function;
10515 + char *Name;
10516 + mptsas_pre_f *f_pre;
10517 +} mptsas_func_list[] = {
10518 + { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
10519 + { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
10520 + { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
10521 + { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
10522 + { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
10523 + mpi_pre_sata_passthrough },
10524 + { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
10525 + mpi_pre_smp_passthrough},
10526 + { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
10527 + mpi_pre_scsi_io_req},
10528 + { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
10529 + { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
10530 + mpi_pre_sas_io_unit_control },
10531 + { 0xFF, NULL, NULL } /* list end */
10532 +};
10533 +
10534 +static void
10535 +mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
10536 +{
10537 + pMPI2RequestHeader_t hdr;
10538 + struct mptsas_func *f;
10539 +
10540 + hdr = (pMPI2RequestHeader_t)pt->request;
10541 +
10542 + for (f = mptsas_func_list; f->f_pre != NULL; f++) {
10543 + if (hdr->Function == f->Function) {
10544 + f->f_pre(mpt, pt);
10545 + NDBG15(("mptsas_prep_sgl_offset: Function %s,"
10546 + " sgl_offset 0x%x", f->Name,
10547 + pt->sgl_offset));
10548 + return;
10549 + }
10550 + }
10551 + NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
10552 + " returning req_size 0x%x for sgl_offset",
10553 + hdr->Function, pt->request_size));
10554 + pt->sgl_offset = (uint16_t)pt->request_size;
10555 +}
10556 +
10557 +
9812 10558 static int
9813 10559 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9814 10560 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9815 - uint32_t data_size, uint32_t direction, uint8_t *dataout,
10561 + uint32_t data_size, uint8_t direction, uint8_t *dataout,
9816 10562 uint32_t dataout_size, short timeout, int mode)
9817 10563 {
9818 10564 mptsas_pt_request_t pt;
9819 10565 mptsas_dma_alloc_state_t data_dma_state;
9820 10566 mptsas_dma_alloc_state_t dataout_dma_state;
9821 10567 caddr_t memp;
9822 10568 mptsas_cmd_t *cmd = NULL;
9823 10569 struct scsi_pkt *pkt;
9824 10570 uint32_t reply_len = 0, sense_len = 0;
9825 10571 pMPI2RequestHeader_t request_hdrp;
9826 10572 pMPI2RequestHeader_t request_msg;
9827 10573 pMPI2DefaultReply_t reply_msg;
9828 10574 Mpi2SCSIIOReply_t rep_msg;
9829 10575 int i, status = 0, pt_flags = 0, rv = 0;
9830 10576 int rvalue;
9831 10577 uint8_t function;
9832 10578
9833 10579 ASSERT(mutex_owned(&mpt->m_mutex));
9834 10580
9835 10581 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9836 10582 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9837 10583 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9838 10584
9839 10585 mutex_exit(&mpt->m_mutex);
9840 10586 /*
9841 10587 * copy in the request buffer since it could be used by
9842 10588 * another thread when the pt request into waitq
9843 10589 */
9844 10590 if (ddi_copyin(request, request_msg, request_size, mode)) {
9845 10591 mutex_enter(&mpt->m_mutex);
9846 10592 status = EFAULT;
9847 10593 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9848 10594 goto out;
9849 10595 }
9850 10596 mutex_enter(&mpt->m_mutex);
9851 10597
9852 10598 function = request_msg->Function;
9853 10599 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9854 10600 pMpi2SCSITaskManagementRequest_t task;
9855 10601 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9856 10602 mptsas_setup_bus_reset_delay(mpt);
9857 10603 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9858 10604 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9859 10605 mode);
9860 10606
9861 10607 if (rv != TRUE) {
9862 10608 status = EIO;
9863 10609 mptsas_log(mpt, CE_WARN, "task management failed");
9864 10610 }
9865 10611 goto out;
9866 10612 }
9867 10613
9868 10614 if (data_size != 0) {
9869 10615 data_dma_state.size = data_size;
9870 10616 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9871 10617 status = ENOMEM;
9872 10618 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9873 10619 "resource");
9874 10620 goto out;
9875 10621 }
9876 10622 pt_flags |= MPTSAS_DATA_ALLOCATED;
9877 10623 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9878 10624 mutex_exit(&mpt->m_mutex);
9879 10625 for (i = 0; i < data_size; i++) {
9880 10626 if (ddi_copyin(data + i, (uint8_t *)
9881 10627 data_dma_state.memp + i, 1, mode)) {
|
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
9882 10628 mutex_enter(&mpt->m_mutex);
9883 10629 status = EFAULT;
9884 10630 mptsas_log(mpt, CE_WARN, "failed to "
9885 10631 "copy read data");
9886 10632 goto out;
9887 10633 }
9888 10634 }
9889 10635 mutex_enter(&mpt->m_mutex);
9890 10636 }
9891 10637 }
10638 + else
10639 + bzero(&data_dma_state, sizeof (data_dma_state));
9892 10640
9893 10641 if (dataout_size != 0) {
9894 10642 dataout_dma_state.size = dataout_size;
9895 10643 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9896 10644 status = ENOMEM;
9897 10645 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9898 10646 "resource");
9899 10647 goto out;
9900 10648 }
9901 10649 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9902 10650 mutex_exit(&mpt->m_mutex);
9903 10651 for (i = 0; i < dataout_size; i++) {
9904 10652 if (ddi_copyin(dataout + i, (uint8_t *)
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
9905 10653 dataout_dma_state.memp + i, 1, mode)) {
9906 10654 mutex_enter(&mpt->m_mutex);
9907 10655 mptsas_log(mpt, CE_WARN, "failed to copy out"
9908 10656 " data");
9909 10657 status = EFAULT;
9910 10658 goto out;
9911 10659 }
9912 10660 }
9913 10661 mutex_enter(&mpt->m_mutex);
9914 10662 }
10663 + else
10664 + bzero(&dataout_dma_state, sizeof (dataout_dma_state));
9915 10665
9916 10666 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9917 10667 status = EAGAIN;
9918 10668 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9919 10669 goto out;
9920 10670 }
9921 10671 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9922 10672
9923 10673 bzero((caddr_t)cmd, sizeof (*cmd));
9924 10674 bzero((caddr_t)pkt, scsi_pkt_size());
9925 10675 bzero((caddr_t)&pt, sizeof (pt));
9926 10676
9927 10677 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9928 10678
9929 10679 pt.request = (uint8_t *)request_msg;
9930 10680 pt.direction = direction;
10681 + pt.simple = 0;
9931 10682 pt.request_size = request_size;
9932 10683 pt.data_size = data_size;
9933 10684 pt.dataout_size = dataout_size;
9934 10685 pt.data_cookie = data_dma_state.cookie;
9935 10686 pt.dataout_cookie = dataout_dma_state.cookie;
10687 + mptsas_prep_sgl_offset(mpt, &pt);
9936 10688
9937 10689 /*
9938 10690 * Form a blank cmd/pkt to store the acknowledgement message
9939 10691 */
9940 10692 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9941 10693 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9942 10694 pkt->pkt_ha_private = (opaque_t)&pt;
9943 10695 pkt->pkt_flags = FLAG_HEAD;
9944 10696 pkt->pkt_time = timeout;
9945 10697 cmd->cmd_pkt = pkt;
9946 10698 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9947 10699
9948 10700 /*
9949 10701 * Save the command in a slot
9950 10702 */
9951 10703 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9952 10704 /*
9953 10705 * Once passthru command get slot, set cmd_flags
9954 10706 * CFLAG_PREPARED.
9955 10707 */
9956 10708 cmd->cmd_flags |= CFLAG_PREPARED;
9957 10709 mptsas_start_passthru(mpt, cmd);
9958 10710 } else {
9959 10711 mptsas_waitq_add(mpt, cmd);
9960 10712 }
9961 10713
9962 10714 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9963 10715 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9964 10716 }
9965 10717
9966 10718 if (cmd->cmd_flags & CFLAG_PREPARED) {
9967 10719 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9968 10720 cmd->cmd_slot);
9969 10721 request_hdrp = (pMPI2RequestHeader_t)memp;
9970 10722 }
9971 10723
9972 10724 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9973 10725 status = ETIMEDOUT;
9974 10726 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9975 10727 pt_flags |= MPTSAS_CMD_TIMEOUT;
9976 10728 goto out;
9977 10729 }
9978 10730
9979 10731 if (cmd->cmd_rfm) {
9980 10732 /*
9981 10733 * cmd_rfm is zero means the command reply is a CONTEXT
9982 10734 * reply and no PCI Write to post the free reply SMFA
9983 10735 * because no reply message frame is used.
9984 10736 * cmd_rfm is non-zero means the reply is a ADDRESS
9985 10737 * reply and reply message frame is used.
9986 10738 */
9987 10739 pt_flags |= MPTSAS_ADDRESS_REPLY;
9988 10740 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9989 10741 DDI_DMA_SYNC_FORCPU);
9990 10742 reply_msg = (pMPI2DefaultReply_t)
9991 10743 (mpt->m_reply_frame + (cmd->cmd_rfm -
9992 10744 mpt->m_reply_frame_dma_addr));
9993 10745 }
9994 10746
9995 10747 mptsas_fma_check(mpt, cmd);
9996 10748 if (pkt->pkt_reason == CMD_TRAN_ERR) {
9997 10749 status = EAGAIN;
9998 10750 mptsas_log(mpt, CE_WARN, "passthru fma error");
9999 10751 goto out;
10000 10752 }
10001 10753 if (pkt->pkt_reason == CMD_RESET) {
10002 10754 status = EAGAIN;
10003 10755 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10004 10756 goto out;
10005 10757 }
10006 10758
10007 10759 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10008 10760 status = EIO;
10009 10761 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10010 10762 goto out;
10011 10763 }
10012 10764
10013 10765 mutex_exit(&mpt->m_mutex);
10014 10766 if (cmd->cmd_flags & CFLAG_PREPARED) {
10015 10767 function = request_hdrp->Function;
10016 10768 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10017 10769 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10018 10770 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10019 10771 sense_len = reply_size - reply_len;
10020 10772 } else {
10021 10773 reply_len = reply_size;
10022 10774 sense_len = 0;
10023 10775 }
10024 10776
10025 10777 for (i = 0; i < reply_len; i++) {
10026 10778 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10027 10779 mode)) {
10028 10780 mutex_enter(&mpt->m_mutex);
10029 10781 status = EFAULT;
10030 10782 mptsas_log(mpt, CE_WARN, "failed to copy out "
10031 10783 "reply data");
10032 10784 goto out;
10033 10785 }
10034 10786 }
10035 10787 for (i = 0; i < sense_len; i++) {
10036 10788 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10037 10789 reply + reply_len + i, 1, mode)) {
10038 10790 mutex_enter(&mpt->m_mutex);
10039 10791 status = EFAULT;
10040 10792 mptsas_log(mpt, CE_WARN, "failed to copy out "
10041 10793 "sense data");
10042 10794 goto out;
10043 10795 }
10044 10796 }
10045 10797 }
10046 10798
10047 10799 if (data_size) {
10048 10800 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10049 10801 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10050 10802 DDI_DMA_SYNC_FORCPU);
10051 10803 for (i = 0; i < data_size; i++) {
10052 10804 if (ddi_copyout((uint8_t *)(
10053 10805 data_dma_state.memp + i), data + i, 1,
10054 10806 mode)) {
10055 10807 mutex_enter(&mpt->m_mutex);
10056 10808 status = EFAULT;
10057 10809 mptsas_log(mpt, CE_WARN, "failed to "
10058 10810 "copy out the reply data");
10059 10811 goto out;
10060 10812 }
10061 10813 }
10062 10814 }
10063 10815 }
10064 10816 mutex_enter(&mpt->m_mutex);
10065 10817 out:
10066 10818 /*
10067 10819 * Put the reply frame back on the free queue, increment the free
10068 10820 * index, and write the new index to the free index register. But only
10069 10821 * if this reply is an ADDRESS reply.
10070 10822 */
10071 10823 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10072 10824 ddi_put32(mpt->m_acc_free_queue_hdl,
10073 10825 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10074 10826 cmd->cmd_rfm);
10075 10827 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10076 10828 DDI_DMA_SYNC_FORDEV);
10077 10829 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10078 10830 mpt->m_free_index = 0;
10079 10831 }
10080 10832 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10081 10833 mpt->m_free_index);
10082 10834 }
10083 10835 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10084 10836 mptsas_remove_cmd(mpt, cmd);
10085 10837 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10086 10838 }
10087 10839 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10088 10840 mptsas_return_to_pool(mpt, cmd);
10089 10841 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10090 10842 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10091 10843 DDI_SUCCESS) {
10092 10844 ddi_fm_service_impact(mpt->m_dip,
10093 10845 DDI_SERVICE_UNAFFECTED);
10094 10846 status = EFAULT;
10095 10847 }
10096 10848 mptsas_dma_free(&data_dma_state);
10097 10849 }
10098 10850 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10099 10851 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10100 10852 DDI_SUCCESS) {
10101 10853 ddi_fm_service_impact(mpt->m_dip,
10102 10854 DDI_SERVICE_UNAFFECTED);
10103 10855 status = EFAULT;
10104 10856 }
10105 10857 mptsas_dma_free(&dataout_dma_state);
10106 10858 }
10107 10859 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10108 10860 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10109 10861 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10110 10862 }
10111 10863 }
10112 10864 if (request_msg)
10113 10865 kmem_free(request_msg, request_size);
10114 10866
10115 10867 return (status);
10116 10868 }
10117 10869
10118 10870 static int
10119 10871 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10120 10872 {
10121 10873 /*
10122 10874 * If timeout is 0, set timeout to default of 60 seconds.
10123 10875 */
10124 10876 if (data->Timeout == 0) {
10125 10877 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10126 10878 }
10127 10879
10128 10880 if (((data->DataSize == 0) &&
10129 10881 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10130 10882 ((data->DataSize != 0) &&
10131 10883 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10132 10884 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10133 10885 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10134 10886 (data->DataOutSize != 0))))) {
10135 10887 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10136 10888 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10137 10889 } else {
|
↓ open down ↓ |
192 lines elided |
↑ open up ↑ |
10138 10890 data->DataOutSize = 0;
10139 10891 }
10140 10892 /*
10141 10893 * Send passthru request messages
10142 10894 */
10143 10895 return (mptsas_do_passthru(mpt,
10144 10896 (uint8_t *)((uintptr_t)data->PtrRequest),
10145 10897 (uint8_t *)((uintptr_t)data->PtrReply),
10146 10898 (uint8_t *)((uintptr_t)data->PtrData),
10147 10899 data->RequestSize, data->ReplySize,
10148 - data->DataSize, data->DataDirection,
10900 + data->DataSize, (uint8_t)data->DataDirection,
10149 10901 (uint8_t *)((uintptr_t)data->PtrDataOut),
10150 10902 data->DataOutSize, data->Timeout, mode));
10151 10903 } else {
10152 10904 return (EINVAL);
10153 10905 }
10154 10906 }
10155 10907
10156 10908 static uint8_t
10157 10909 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10158 10910 {
10159 10911 uint8_t index;
10160 10912
10161 10913 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10162 10914 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10163 10915 return (index);
10164 10916 }
10165 10917 }
10166 10918
10167 10919 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10168 10920 }
10169 10921
10170 10922 static void
10171 10923 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10172 10924 {
10173 10925 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10174 10926 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10175 10927 struct scsi_pkt *pkt = cmd->cmd_pkt;
10176 10928 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10177 10929 uint32_t request_desc_low, i;
10178 10930
10179 10931 ASSERT(mutex_owned(&mpt->m_mutex));
10180 10932
10181 10933 /*
10182 10934 * Form the diag message depending on the post or release function.
10183 10935 */
10184 10936 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10185 10937 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10186 10938 (mpt->m_req_frame + (mpt->m_req_frame_size *
10187 10939 cmd->cmd_slot));
10188 10940 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10189 10941 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10190 10942 diag->function);
10191 10943 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10192 10944 diag->pBuffer->buffer_type);
10193 10945 ddi_put8(mpt->m_acc_req_frame_hdl,
10194 10946 &pDiag_post_msg->ExtendedType,
10195 10947 diag->pBuffer->extended_type);
10196 10948 ddi_put32(mpt->m_acc_req_frame_hdl,
10197 10949 &pDiag_post_msg->BufferLength,
10198 10950 diag->pBuffer->buffer_data.size);
10199 10951 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10200 10952 i++) {
10201 10953 ddi_put32(mpt->m_acc_req_frame_hdl,
10202 10954 &pDiag_post_msg->ProductSpecific[i],
10203 10955 diag->pBuffer->product_specific[i]);
10204 10956 }
10205 10957 ddi_put32(mpt->m_acc_req_frame_hdl,
10206 10958 &pDiag_post_msg->BufferAddress.Low,
10207 10959 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10208 10960 & 0xffffffffull));
10209 10961 ddi_put32(mpt->m_acc_req_frame_hdl,
10210 10962 &pDiag_post_msg->BufferAddress.High,
10211 10963 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10212 10964 >> 32));
10213 10965 } else {
10214 10966 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10215 10967 (mpt->m_req_frame + (mpt->m_req_frame_size *
10216 10968 cmd->cmd_slot));
10217 10969 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10218 10970 ddi_put8(mpt->m_acc_req_frame_hdl,
10219 10971 &pDiag_release_msg->Function, diag->function);
10220 10972 ddi_put8(mpt->m_acc_req_frame_hdl,
10221 10973 &pDiag_release_msg->BufferType,
10222 10974 diag->pBuffer->buffer_type);
10223 10975 }
10224 10976
10225 10977 /*
10226 10978 * Send the message
10227 10979 */
10228 10980 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10229 10981 DDI_DMA_SYNC_FORDEV);
10230 10982 request_desc_low = (cmd->cmd_slot << 16) +
10231 10983 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10232 10984 cmd->cmd_rfm = NULL;
10233 10985 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10234 10986 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10235 10987 DDI_SUCCESS) ||
10236 10988 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10237 10989 DDI_SUCCESS)) {
10238 10990 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10239 10991 }
10240 10992 }
10241 10993
10242 10994 static int
10243 10995 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10244 10996 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10245 10997 {
10246 10998 mptsas_diag_request_t diag;
10247 10999 int status, slot_num, post_flags = 0;
10248 11000 mptsas_cmd_t *cmd = NULL;
10249 11001 struct scsi_pkt *pkt;
10250 11002 pMpi2DiagBufferPostReply_t reply;
10251 11003 uint16_t iocstatus;
10252 11004 uint32_t iocloginfo, transfer_length;
10253 11005
10254 11006 /*
10255 11007 * If buffer is not enabled, just leave.
10256 11008 */
10257 11009 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10258 11010 if (!pBuffer->enabled) {
10259 11011 status = DDI_FAILURE;
10260 11012 goto out;
10261 11013 }
10262 11014
10263 11015 /*
10264 11016 * Clear some flags initially.
10265 11017 */
10266 11018 pBuffer->force_release = FALSE;
10267 11019 pBuffer->valid_data = FALSE;
10268 11020 pBuffer->owned_by_firmware = FALSE;
10269 11021
10270 11022 /*
10271 11023 * Get a cmd buffer from the cmd buffer pool
10272 11024 */
10273 11025 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10274 11026 status = DDI_FAILURE;
10275 11027 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10276 11028 goto out;
10277 11029 }
10278 11030 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10279 11031
10280 11032 bzero((caddr_t)cmd, sizeof (*cmd));
10281 11033 bzero((caddr_t)pkt, scsi_pkt_size());
10282 11034
10283 11035 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10284 11036
10285 11037 diag.pBuffer = pBuffer;
10286 11038 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10287 11039
10288 11040 /*
10289 11041 * Form a blank cmd/pkt to store the acknowledgement message
10290 11042 */
10291 11043 pkt->pkt_ha_private = (opaque_t)&diag;
10292 11044 pkt->pkt_flags = FLAG_HEAD;
10293 11045 pkt->pkt_time = 60;
10294 11046 cmd->cmd_pkt = pkt;
10295 11047 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10296 11048
10297 11049 /*
10298 11050 * Save the command in a slot
10299 11051 */
10300 11052 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10301 11053 /*
10302 11054 * Once passthru command get slot, set cmd_flags
10303 11055 * CFLAG_PREPARED.
10304 11056 */
10305 11057 cmd->cmd_flags |= CFLAG_PREPARED;
10306 11058 mptsas_start_diag(mpt, cmd);
10307 11059 } else {
10308 11060 mptsas_waitq_add(mpt, cmd);
10309 11061 }
10310 11062
10311 11063 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10312 11064 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10313 11065 }
10314 11066
10315 11067 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10316 11068 status = DDI_FAILURE;
10317 11069 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10318 11070 goto out;
10319 11071 }
10320 11072
10321 11073 /*
10322 11074 * cmd_rfm points to the reply message if a reply was given. Check the
10323 11075 * IOCStatus to make sure everything went OK with the FW diag request
10324 11076 * and set buffer flags.
10325 11077 */
10326 11078 if (cmd->cmd_rfm) {
10327 11079 post_flags |= MPTSAS_ADDRESS_REPLY;
10328 11080 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10329 11081 DDI_DMA_SYNC_FORCPU);
10330 11082 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10331 11083 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10332 11084
10333 11085 /*
10334 11086 * Get the reply message data
10335 11087 */
10336 11088 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10337 11089 &reply->IOCStatus);
10338 11090 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10339 11091 &reply->IOCLogInfo);
10340 11092 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10341 11093 &reply->TransferLength);
10342 11094
10343 11095 /*
10344 11096 * If post failed quit.
10345 11097 */
10346 11098 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10347 11099 status = DDI_FAILURE;
10348 11100 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10349 11101 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10350 11102 iocloginfo, transfer_length));
10351 11103 goto out;
10352 11104 }
10353 11105
10354 11106 /*
10355 11107 * Post was successful.
10356 11108 */
10357 11109 pBuffer->valid_data = TRUE;
10358 11110 pBuffer->owned_by_firmware = TRUE;
10359 11111 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10360 11112 status = DDI_SUCCESS;
10361 11113 }
10362 11114
10363 11115 out:
10364 11116 /*
10365 11117 * Put the reply frame back on the free queue, increment the free
10366 11118 * index, and write the new index to the free index register. But only
10367 11119 * if this reply is an ADDRESS reply.
10368 11120 */
10369 11121 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10370 11122 ddi_put32(mpt->m_acc_free_queue_hdl,
10371 11123 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10372 11124 cmd->cmd_rfm);
10373 11125 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10374 11126 DDI_DMA_SYNC_FORDEV);
10375 11127 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10376 11128 mpt->m_free_index = 0;
10377 11129 }
10378 11130 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10379 11131 mpt->m_free_index);
10380 11132 }
10381 11133 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10382 11134 mptsas_remove_cmd(mpt, cmd);
10383 11135 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10384 11136 }
10385 11137 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10386 11138 mptsas_return_to_pool(mpt, cmd);
10387 11139 }
10388 11140
10389 11141 return (status);
10390 11142 }
10391 11143
10392 11144 static int
10393 11145 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10394 11146 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10395 11147 uint32_t diag_type)
10396 11148 {
10397 11149 mptsas_diag_request_t diag;
10398 11150 int status, slot_num, rel_flags = 0;
10399 11151 mptsas_cmd_t *cmd = NULL;
10400 11152 struct scsi_pkt *pkt;
10401 11153 pMpi2DiagReleaseReply_t reply;
10402 11154 uint16_t iocstatus;
10403 11155 uint32_t iocloginfo;
10404 11156
10405 11157 /*
10406 11158 * If buffer is not enabled, just leave.
10407 11159 */
10408 11160 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10409 11161 if (!pBuffer->enabled) {
10410 11162 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10411 11163 "by the IOC");
10412 11164 status = DDI_FAILURE;
10413 11165 goto out;
10414 11166 }
10415 11167
10416 11168 /*
10417 11169 * Clear some flags initially.
10418 11170 */
10419 11171 pBuffer->force_release = FALSE;
10420 11172 pBuffer->valid_data = FALSE;
10421 11173 pBuffer->owned_by_firmware = FALSE;
10422 11174
10423 11175 /*
10424 11176 * Get a cmd buffer from the cmd buffer pool
10425 11177 */
10426 11178 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10427 11179 status = DDI_FAILURE;
10428 11180 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10429 11181 "Diag");
10430 11182 goto out;
10431 11183 }
10432 11184 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10433 11185
10434 11186 bzero((caddr_t)cmd, sizeof (*cmd));
10435 11187 bzero((caddr_t)pkt, scsi_pkt_size());
10436 11188
10437 11189 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10438 11190
10439 11191 diag.pBuffer = pBuffer;
10440 11192 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10441 11193
10442 11194 /*
10443 11195 * Form a blank cmd/pkt to store the acknowledgement message
10444 11196 */
10445 11197 pkt->pkt_ha_private = (opaque_t)&diag;
10446 11198 pkt->pkt_flags = FLAG_HEAD;
10447 11199 pkt->pkt_time = 60;
10448 11200 cmd->cmd_pkt = pkt;
10449 11201 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10450 11202
10451 11203 /*
10452 11204 * Save the command in a slot
10453 11205 */
10454 11206 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10455 11207 /*
10456 11208 * Once passthru command get slot, set cmd_flags
10457 11209 * CFLAG_PREPARED.
10458 11210 */
10459 11211 cmd->cmd_flags |= CFLAG_PREPARED;
10460 11212 mptsas_start_diag(mpt, cmd);
10461 11213 } else {
10462 11214 mptsas_waitq_add(mpt, cmd);
10463 11215 }
10464 11216
10465 11217 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10466 11218 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10467 11219 }
10468 11220
10469 11221 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10470 11222 status = DDI_FAILURE;
10471 11223 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10472 11224 goto out;
10473 11225 }
10474 11226
10475 11227 /*
10476 11228 * cmd_rfm points to the reply message if a reply was given. Check the
10477 11229 * IOCStatus to make sure everything went OK with the FW diag request
10478 11230 * and set buffer flags.
10479 11231 */
10480 11232 if (cmd->cmd_rfm) {
10481 11233 rel_flags |= MPTSAS_ADDRESS_REPLY;
10482 11234 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10483 11235 DDI_DMA_SYNC_FORCPU);
10484 11236 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10485 11237 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10486 11238
10487 11239 /*
10488 11240 * Get the reply message data
10489 11241 */
10490 11242 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10491 11243 &reply->IOCStatus);
10492 11244 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10493 11245 &reply->IOCLogInfo);
10494 11246
10495 11247 /*
10496 11248 * If release failed quit.
10497 11249 */
10498 11250 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10499 11251 pBuffer->owned_by_firmware) {
10500 11252 status = DDI_FAILURE;
10501 11253 NDBG13(("release FW Diag Buffer failed: "
10502 11254 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10503 11255 iocloginfo));
10504 11256 goto out;
10505 11257 }
10506 11258
10507 11259 /*
10508 11260 * Release was successful.
10509 11261 */
10510 11262 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10511 11263 status = DDI_SUCCESS;
10512 11264
10513 11265 /*
10514 11266 * If this was for an UNREGISTER diag type command, clear the
10515 11267 * unique ID.
10516 11268 */
10517 11269 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10518 11270 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10519 11271 }
10520 11272 }
10521 11273
10522 11274 out:
10523 11275 /*
10524 11276 * Put the reply frame back on the free queue, increment the free
10525 11277 * index, and write the new index to the free index register. But only
10526 11278 * if this reply is an ADDRESS reply.
10527 11279 */
10528 11280 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10529 11281 ddi_put32(mpt->m_acc_free_queue_hdl,
10530 11282 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10531 11283 cmd->cmd_rfm);
10532 11284 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10533 11285 DDI_DMA_SYNC_FORDEV);
10534 11286 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10535 11287 mpt->m_free_index = 0;
10536 11288 }
10537 11289 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10538 11290 mpt->m_free_index);
10539 11291 }
10540 11292 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10541 11293 mptsas_remove_cmd(mpt, cmd);
10542 11294 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10543 11295 }
10544 11296 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10545 11297 mptsas_return_to_pool(mpt, cmd);
10546 11298 }
10547 11299
10548 11300 return (status);
10549 11301 }
10550 11302
10551 11303 static int
10552 11304 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10553 11305 uint32_t *return_code)
10554 11306 {
10555 11307 mptsas_fw_diagnostic_buffer_t *pBuffer;
10556 11308 uint8_t extended_type, buffer_type, i;
10557 11309 uint32_t buffer_size;
10558 11310 uint32_t unique_id;
10559 11311 int status;
10560 11312
10561 11313 ASSERT(mutex_owned(&mpt->m_mutex));
10562 11314
10563 11315 extended_type = diag_register->ExtendedType;
10564 11316 buffer_type = diag_register->BufferType;
10565 11317 buffer_size = diag_register->RequestedBufferSize;
10566 11318 unique_id = diag_register->UniqueId;
10567 11319
10568 11320 /*
10569 11321 * Check for valid buffer type
10570 11322 */
10571 11323 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10572 11324 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10573 11325 return (DDI_FAILURE);
10574 11326 }
10575 11327
10576 11328 /*
10577 11329 * Get the current buffer and look up the unique ID. The unique ID
10578 11330 * should not be found. If it is, the ID is already in use.
10579 11331 */
10580 11332 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10581 11333 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10582 11334 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10583 11335 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10584 11336 return (DDI_FAILURE);
10585 11337 }
10586 11338
10587 11339 /*
10588 11340 * The buffer's unique ID should not be registered yet, and the given
10589 11341 * unique ID cannot be 0.
10590 11342 */
10591 11343 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10592 11344 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10593 11345 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10594 11346 return (DDI_FAILURE);
10595 11347 }
10596 11348
10597 11349 /*
10598 11350 * If this buffer is already posted as immediate, just change owner.
10599 11351 */
10600 11352 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10601 11353 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10602 11354 pBuffer->immediate = FALSE;
10603 11355 pBuffer->unique_id = unique_id;
10604 11356 return (DDI_SUCCESS);
10605 11357 }
10606 11358
10607 11359 /*
10608 11360 * Post a new buffer after checking if it's enabled. The DMA buffer
10609 11361 * that is allocated will be contiguous (sgl_len = 1).
10610 11362 */
10611 11363 if (!pBuffer->enabled) {
10612 11364 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10613 11365 return (DDI_FAILURE);
10614 11366 }
10615 11367 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10616 11368 pBuffer->buffer_data.size = buffer_size;
10617 11369 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10618 11370 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10619 11371 "diag buffer: size = %d bytes", buffer_size);
10620 11372 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10621 11373 return (DDI_FAILURE);
10622 11374 }
10623 11375
10624 11376 /*
10625 11377 * Copy the given info to the diag buffer and post the buffer.
10626 11378 */
10627 11379 pBuffer->buffer_type = buffer_type;
10628 11380 pBuffer->immediate = FALSE;
10629 11381 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10630 11382 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10631 11383 i++) {
10632 11384 pBuffer->product_specific[i] =
10633 11385 diag_register->ProductSpecific[i];
10634 11386 }
10635 11387 }
10636 11388 pBuffer->extended_type = extended_type;
10637 11389 pBuffer->unique_id = unique_id;
10638 11390 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10639 11391
10640 11392 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10641 11393 DDI_SUCCESS) {
10642 11394 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10643 11395 "mptsas_diag_register.");
10644 11396 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10645 11397 status = DDI_FAILURE;
10646 11398 }
10647 11399
10648 11400 /*
10649 11401 * In case there was a failure, free the DMA buffer.
10650 11402 */
10651 11403 if (status == DDI_FAILURE) {
10652 11404 mptsas_dma_free(&pBuffer->buffer_data);
10653 11405 }
10654 11406
10655 11407 return (status);
10656 11408 }
10657 11409
10658 11410 static int
10659 11411 mptsas_diag_unregister(mptsas_t *mpt,
10660 11412 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10661 11413 {
10662 11414 mptsas_fw_diagnostic_buffer_t *pBuffer;
10663 11415 uint8_t i;
10664 11416 uint32_t unique_id;
10665 11417 int status;
10666 11418
10667 11419 ASSERT(mutex_owned(&mpt->m_mutex));
10668 11420
10669 11421 unique_id = diag_unregister->UniqueId;
10670 11422
10671 11423 /*
10672 11424 * Get the current buffer and look up the unique ID. The unique ID
10673 11425 * should be there.
10674 11426 */
10675 11427 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10676 11428 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10677 11429 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10678 11430 return (DDI_FAILURE);
10679 11431 }
10680 11432
10681 11433 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10682 11434
10683 11435 /*
10684 11436 * Try to release the buffer from FW before freeing it. If release
10685 11437 * fails, don't free the DMA buffer in case FW tries to access it
10686 11438 * later. If buffer is not owned by firmware, can't release it.
10687 11439 */
10688 11440 if (!pBuffer->owned_by_firmware) {
10689 11441 status = DDI_SUCCESS;
10690 11442 } else {
10691 11443 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10692 11444 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10693 11445 }
10694 11446
10695 11447 /*
10696 11448 * At this point, return the current status no matter what happens with
10697 11449 * the DMA buffer.
10698 11450 */
10699 11451 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10700 11452 if (status == DDI_SUCCESS) {
10701 11453 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10702 11454 DDI_SUCCESS) {
10703 11455 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10704 11456 "in mptsas_diag_unregister.");
10705 11457 ddi_fm_service_impact(mpt->m_dip,
10706 11458 DDI_SERVICE_UNAFFECTED);
10707 11459 }
10708 11460 mptsas_dma_free(&pBuffer->buffer_data);
10709 11461 }
10710 11462
10711 11463 return (status);
10712 11464 }
10713 11465
10714 11466 static int
10715 11467 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10716 11468 uint32_t *return_code)
10717 11469 {
10718 11470 mptsas_fw_diagnostic_buffer_t *pBuffer;
10719 11471 uint8_t i;
10720 11472 uint32_t unique_id;
10721 11473
10722 11474 ASSERT(mutex_owned(&mpt->m_mutex));
10723 11475
10724 11476 unique_id = diag_query->UniqueId;
10725 11477
10726 11478 /*
10727 11479 * If ID is valid, query on ID.
10728 11480 * If ID is invalid, query on buffer type.
10729 11481 */
10730 11482 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10731 11483 i = diag_query->BufferType;
10732 11484 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10733 11485 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10734 11486 return (DDI_FAILURE);
10735 11487 }
10736 11488 } else {
10737 11489 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10738 11490 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10739 11491 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10740 11492 return (DDI_FAILURE);
10741 11493 }
10742 11494 }
10743 11495
10744 11496 /*
10745 11497 * Fill query structure with the diag buffer info.
10746 11498 */
10747 11499 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10748 11500 diag_query->BufferType = pBuffer->buffer_type;
10749 11501 diag_query->ExtendedType = pBuffer->extended_type;
10750 11502 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10751 11503 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10752 11504 i++) {
10753 11505 diag_query->ProductSpecific[i] =
10754 11506 pBuffer->product_specific[i];
10755 11507 }
10756 11508 }
10757 11509 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10758 11510 diag_query->DriverAddedBufferSize = 0;
10759 11511 diag_query->UniqueId = pBuffer->unique_id;
10760 11512 diag_query->ApplicationFlags = 0;
10761 11513 diag_query->DiagnosticFlags = 0;
10762 11514
10763 11515 /*
10764 11516 * Set/Clear application flags
10765 11517 */
10766 11518 if (pBuffer->immediate) {
10767 11519 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10768 11520 } else {
10769 11521 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10770 11522 }
10771 11523 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10772 11524 diag_query->ApplicationFlags |=
10773 11525 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10774 11526 } else {
10775 11527 diag_query->ApplicationFlags &=
10776 11528 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10777 11529 }
10778 11530 if (pBuffer->owned_by_firmware) {
10779 11531 diag_query->ApplicationFlags |=
10780 11532 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10781 11533 } else {
10782 11534 diag_query->ApplicationFlags &=
10783 11535 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10784 11536 }
10785 11537
10786 11538 return (DDI_SUCCESS);
10787 11539 }
10788 11540
10789 11541 static int
10790 11542 mptsas_diag_read_buffer(mptsas_t *mpt,
10791 11543 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10792 11544 uint32_t *return_code, int ioctl_mode)
10793 11545 {
10794 11546 mptsas_fw_diagnostic_buffer_t *pBuffer;
10795 11547 uint8_t i, *pData;
10796 11548 uint32_t unique_id, byte;
10797 11549 int status;
10798 11550
10799 11551 ASSERT(mutex_owned(&mpt->m_mutex));
10800 11552
10801 11553 unique_id = diag_read_buffer->UniqueId;
10802 11554
10803 11555 /*
10804 11556 * Get the current buffer and look up the unique ID. The unique ID
10805 11557 * should be there.
10806 11558 */
10807 11559 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10808 11560 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10809 11561 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10810 11562 return (DDI_FAILURE);
10811 11563 }
10812 11564
10813 11565 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10814 11566
10815 11567 /*
10816 11568 * Make sure requested read is within limits
10817 11569 */
10818 11570 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10819 11571 pBuffer->buffer_data.size) {
10820 11572 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10821 11573 return (DDI_FAILURE);
10822 11574 }
10823 11575
10824 11576 /*
10825 11577 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10826 11578 * buffer that was allocated is one contiguous buffer.
10827 11579 */
10828 11580 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10829 11581 diag_read_buffer->StartingOffset);
10830 11582 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10831 11583 DDI_DMA_SYNC_FORCPU);
10832 11584 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10833 11585 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10834 11586 != 0) {
10835 11587 return (DDI_FAILURE);
10836 11588 }
10837 11589 }
10838 11590 diag_read_buffer->Status = 0;
10839 11591
10840 11592 /*
10841 11593 * Set or clear the Force Release flag.
10842 11594 */
10843 11595 if (pBuffer->force_release) {
10844 11596 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10845 11597 } else {
10846 11598 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10847 11599 }
10848 11600
10849 11601 /*
10850 11602 * If buffer is to be reregistered, make sure it's not already owned by
10851 11603 * firmware first.
10852 11604 */
10853 11605 status = DDI_SUCCESS;
10854 11606 if (!pBuffer->owned_by_firmware) {
10855 11607 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10856 11608 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10857 11609 return_code);
10858 11610 }
10859 11611 }
10860 11612
10861 11613 return (status);
10862 11614 }
10863 11615
10864 11616 static int
10865 11617 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10866 11618 uint32_t *return_code)
10867 11619 {
10868 11620 mptsas_fw_diagnostic_buffer_t *pBuffer;
10869 11621 uint8_t i;
10870 11622 uint32_t unique_id;
10871 11623 int status;
10872 11624
10873 11625 ASSERT(mutex_owned(&mpt->m_mutex));
10874 11626
10875 11627 unique_id = diag_release->UniqueId;
10876 11628
10877 11629 /*
10878 11630 * Get the current buffer and look up the unique ID. The unique ID
10879 11631 * should be there.
10880 11632 */
10881 11633 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10882 11634 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10883 11635 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10884 11636 return (DDI_FAILURE);
10885 11637 }
10886 11638
10887 11639 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10888 11640
10889 11641 /*
10890 11642 * If buffer is not owned by firmware, it's already been released.
10891 11643 */
10892 11644 if (!pBuffer->owned_by_firmware) {
10893 11645 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10894 11646 return (DDI_FAILURE);
10895 11647 }
10896 11648
10897 11649 /*
10898 11650 * Release the buffer.
10899 11651 */
10900 11652 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10901 11653 MPTSAS_FW_DIAG_TYPE_RELEASE);
10902 11654 return (status);
10903 11655 }
10904 11656
10905 11657 static int
10906 11658 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10907 11659 uint32_t length, uint32_t *return_code, int ioctl_mode)
10908 11660 {
10909 11661 mptsas_fw_diag_register_t diag_register;
10910 11662 mptsas_fw_diag_unregister_t diag_unregister;
10911 11663 mptsas_fw_diag_query_t diag_query;
10912 11664 mptsas_diag_read_buffer_t diag_read_buffer;
10913 11665 mptsas_fw_diag_release_t diag_release;
10914 11666 int status = DDI_SUCCESS;
10915 11667 uint32_t original_return_code, read_buf_len;
10916 11668
10917 11669 ASSERT(mutex_owned(&mpt->m_mutex));
10918 11670
10919 11671 original_return_code = *return_code;
10920 11672 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10921 11673
10922 11674 switch (action) {
10923 11675 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10924 11676 if (!length) {
10925 11677 *return_code =
10926 11678 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10927 11679 status = DDI_FAILURE;
10928 11680 break;
10929 11681 }
10930 11682 if (ddi_copyin(diag_action, &diag_register,
10931 11683 sizeof (diag_register), ioctl_mode) != 0) {
10932 11684 return (DDI_FAILURE);
10933 11685 }
10934 11686 status = mptsas_diag_register(mpt, &diag_register,
10935 11687 return_code);
10936 11688 break;
10937 11689
10938 11690 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10939 11691 if (length < sizeof (diag_unregister)) {
10940 11692 *return_code =
10941 11693 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10942 11694 status = DDI_FAILURE;
10943 11695 break;
10944 11696 }
10945 11697 if (ddi_copyin(diag_action, &diag_unregister,
10946 11698 sizeof (diag_unregister), ioctl_mode) != 0) {
10947 11699 return (DDI_FAILURE);
10948 11700 }
10949 11701 status = mptsas_diag_unregister(mpt, &diag_unregister,
10950 11702 return_code);
10951 11703 break;
10952 11704
10953 11705 case MPTSAS_FW_DIAG_TYPE_QUERY:
10954 11706 if (length < sizeof (diag_query)) {
10955 11707 *return_code =
10956 11708 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10957 11709 status = DDI_FAILURE;
10958 11710 break;
10959 11711 }
10960 11712 if (ddi_copyin(diag_action, &diag_query,
10961 11713 sizeof (diag_query), ioctl_mode) != 0) {
10962 11714 return (DDI_FAILURE);
10963 11715 }
10964 11716 status = mptsas_diag_query(mpt, &diag_query,
10965 11717 return_code);
10966 11718 if (status == DDI_SUCCESS) {
10967 11719 if (ddi_copyout(&diag_query, diag_action,
10968 11720 sizeof (diag_query), ioctl_mode) != 0) {
10969 11721 return (DDI_FAILURE);
10970 11722 }
10971 11723 }
10972 11724 break;
10973 11725
10974 11726 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10975 11727 if (ddi_copyin(diag_action, &diag_read_buffer,
10976 11728 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10977 11729 return (DDI_FAILURE);
10978 11730 }
10979 11731 read_buf_len = sizeof (diag_read_buffer) -
10980 11732 sizeof (diag_read_buffer.DataBuffer) +
10981 11733 diag_read_buffer.BytesToRead;
10982 11734 if (length < read_buf_len) {
10983 11735 *return_code =
10984 11736 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10985 11737 status = DDI_FAILURE;
10986 11738 break;
10987 11739 }
10988 11740 status = mptsas_diag_read_buffer(mpt,
10989 11741 &diag_read_buffer, diag_action +
10990 11742 sizeof (diag_read_buffer) - 4, return_code,
10991 11743 ioctl_mode);
10992 11744 if (status == DDI_SUCCESS) {
10993 11745 if (ddi_copyout(&diag_read_buffer, diag_action,
10994 11746 sizeof (diag_read_buffer) - 4, ioctl_mode)
10995 11747 != 0) {
10996 11748 return (DDI_FAILURE);
10997 11749 }
10998 11750 }
10999 11751 break;
11000 11752
11001 11753 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11002 11754 if (length < sizeof (diag_release)) {
11003 11755 *return_code =
11004 11756 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11005 11757 status = DDI_FAILURE;
11006 11758 break;
11007 11759 }
11008 11760 if (ddi_copyin(diag_action, &diag_release,
11009 11761 sizeof (diag_release), ioctl_mode) != 0) {
11010 11762 return (DDI_FAILURE);
11011 11763 }
11012 11764 status = mptsas_diag_release(mpt, &diag_release,
11013 11765 return_code);
11014 11766 break;
11015 11767
11016 11768 default:
11017 11769 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11018 11770 status = DDI_FAILURE;
11019 11771 break;
11020 11772 }
11021 11773
11022 11774 if ((status == DDI_FAILURE) &&
11023 11775 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11024 11776 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11025 11777 status = DDI_SUCCESS;
11026 11778 }
11027 11779
11028 11780 return (status);
11029 11781 }
11030 11782
11031 11783 static int
11032 11784 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11033 11785 {
11034 11786 int status;
11035 11787 mptsas_diag_action_t driver_data;
11036 11788
11037 11789 ASSERT(mutex_owned(&mpt->m_mutex));
11038 11790
11039 11791 /*
11040 11792 * Copy the user data to a driver data buffer.
11041 11793 */
11042 11794 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11043 11795 mode) == 0) {
11044 11796 /*
11045 11797 * Send diag action request if Action is valid
11046 11798 */
11047 11799 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11048 11800 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11049 11801 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11050 11802 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11051 11803 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11052 11804 status = mptsas_do_diag_action(mpt, driver_data.Action,
11053 11805 (void *)(uintptr_t)driver_data.PtrDiagAction,
11054 11806 driver_data.Length, &driver_data.ReturnCode,
11055 11807 mode);
11056 11808 if (status == DDI_SUCCESS) {
11057 11809 if (ddi_copyout(&driver_data.ReturnCode,
11058 11810 &user_data->ReturnCode,
11059 11811 sizeof (user_data->ReturnCode), mode)
11060 11812 != 0) {
11061 11813 status = EFAULT;
11062 11814 } else {
11063 11815 status = 0;
11064 11816 }
11065 11817 } else {
11066 11818 status = EIO;
11067 11819 }
11068 11820 } else {
11069 11821 status = EINVAL;
11070 11822 }
11071 11823 } else {
11072 11824 status = EFAULT;
11073 11825 }
11074 11826
11075 11827 return (status);
11076 11828 }
11077 11829
11078 11830 /*
11079 11831 * This routine handles the "event query" ioctl.
11080 11832 */
11081 11833 static int
11082 11834 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11083 11835 int *rval)
11084 11836 {
11085 11837 int status;
11086 11838 mptsas_event_query_t driverdata;
11087 11839 uint8_t i;
11088 11840
11089 11841 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11090 11842
11091 11843 mutex_enter(&mpt->m_mutex);
11092 11844 for (i = 0; i < 4; i++) {
11093 11845 driverdata.Types[i] = mpt->m_event_mask[i];
11094 11846 }
11095 11847 mutex_exit(&mpt->m_mutex);
11096 11848
11097 11849 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11098 11850 status = EFAULT;
11099 11851 } else {
11100 11852 *rval = MPTIOCTL_STATUS_GOOD;
11101 11853 status = 0;
11102 11854 }
11103 11855
11104 11856 return (status);
11105 11857 }
11106 11858
11107 11859 /*
11108 11860 * This routine handles the "event enable" ioctl.
11109 11861 */
11110 11862 static int
11111 11863 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11112 11864 int *rval)
11113 11865 {
11114 11866 int status;
11115 11867 mptsas_event_enable_t driverdata;
11116 11868 uint8_t i;
11117 11869
11118 11870 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11119 11871 mutex_enter(&mpt->m_mutex);
11120 11872 for (i = 0; i < 4; i++) {
11121 11873 mpt->m_event_mask[i] = driverdata.Types[i];
11122 11874 }
11123 11875 mutex_exit(&mpt->m_mutex);
11124 11876
11125 11877 *rval = MPTIOCTL_STATUS_GOOD;
11126 11878 status = 0;
11127 11879 } else {
11128 11880 status = EFAULT;
11129 11881 }
11130 11882 return (status);
11131 11883 }
11132 11884
11133 11885 /*
11134 11886 * This routine handles the "event report" ioctl.
11135 11887 */
11136 11888 static int
11137 11889 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11138 11890 int *rval)
11139 11891 {
11140 11892 int status;
11141 11893 mptsas_event_report_t driverdata;
11142 11894
11143 11895 mutex_enter(&mpt->m_mutex);
11144 11896
11145 11897 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11146 11898 mode) == 0) {
11147 11899 if (driverdata.Size >= sizeof (mpt->m_events)) {
11148 11900 if (ddi_copyout(mpt->m_events, data->Events,
11149 11901 sizeof (mpt->m_events), mode) != 0) {
11150 11902 status = EFAULT;
11151 11903 } else {
11152 11904 if (driverdata.Size > sizeof (mpt->m_events)) {
11153 11905 driverdata.Size =
11154 11906 sizeof (mpt->m_events);
11155 11907 if (ddi_copyout(&driverdata.Size,
11156 11908 &data->Size,
11157 11909 sizeof (driverdata.Size),
11158 11910 mode) != 0) {
11159 11911 status = EFAULT;
11160 11912 } else {
11161 11913 *rval = MPTIOCTL_STATUS_GOOD;
11162 11914 status = 0;
11163 11915 }
11164 11916 } else {
11165 11917 *rval = MPTIOCTL_STATUS_GOOD;
11166 11918 status = 0;
11167 11919 }
11168 11920 }
11169 11921 } else {
11170 11922 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11171 11923 status = 0;
11172 11924 }
11173 11925 } else {
11174 11926 status = EFAULT;
11175 11927 }
11176 11928
11177 11929 mutex_exit(&mpt->m_mutex);
11178 11930 return (status);
11179 11931 }
11180 11932
11181 11933 static void
11182 11934 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11183 11935 {
11184 11936 int *reg_data;
11185 11937 uint_t reglen;
11186 11938
11187 11939 /*
11188 11940 * Lookup the 'reg' property and extract the other data
11189 11941 */
11190 11942 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11191 11943 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11192 11944 DDI_PROP_SUCCESS) {
11193 11945 /*
11194 11946 * Extract the PCI data from the 'reg' property first DWORD.
11195 11947 * The entry looks like the following:
11196 11948 * First DWORD:
11197 11949 * Bits 0 - 7 8-bit Register number
11198 11950 * Bits 8 - 10 3-bit Function number
11199 11951 * Bits 11 - 15 5-bit Device number
11200 11952 * Bits 16 - 23 8-bit Bus number
11201 11953 * Bits 24 - 25 2-bit Address Space type identifier
11202 11954 *
11203 11955 */
11204 11956 adapter_data->PciInformation.u.bits.BusNumber =
11205 11957 (reg_data[0] & 0x00FF0000) >> 16;
11206 11958 adapter_data->PciInformation.u.bits.DeviceNumber =
11207 11959 (reg_data[0] & 0x0000F800) >> 11;
11208 11960 adapter_data->PciInformation.u.bits.FunctionNumber =
11209 11961 (reg_data[0] & 0x00000700) >> 8;
11210 11962 ddi_prop_free((void *)reg_data);
11211 11963 } else {
11212 11964 /*
11213 11965 * If we can't determine the PCI data then we fill in FF's for
11214 11966 * the data to indicate this.
11215 11967 */
11216 11968 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11217 11969 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11218 11970 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11219 11971 }
11220 11972
11221 11973 /*
11222 11974 * Saved in the mpt->m_fwversion
|
↓ open down ↓ |
1064 lines elided |
↑ open up ↑ |
11223 11975 */
11224 11976 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11225 11977 }
11226 11978
11227 11979 static void
11228 11980 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11229 11981 {
11230 11982 char *driver_verstr = MPTSAS_MOD_STRING;
11231 11983
11232 11984 mptsas_lookup_pci_data(mpt, adapter_data);
11233 - adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11985 + adapter_data->AdapterType = mpt->m_MPI25 ?
11986 + MPTIOCTL_ADAPTER_TYPE_SAS3 :
11987 + MPTIOCTL_ADAPTER_TYPE_SAS2;
11234 11988 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11235 11989 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11236 11990 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11237 11991 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11238 11992 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11239 11993 adapter_data->BiosVersion = 0;
11240 11994 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11241 11995 }
11242 11996
11243 11997 static void
11244 11998 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11245 11999 {
11246 12000 int *reg_data, i;
11247 12001 uint_t reglen;
11248 12002
11249 12003 /*
11250 12004 * Lookup the 'reg' property and extract the other data
11251 12005 */
11252 12006 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11253 12007 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11254 12008 DDI_PROP_SUCCESS) {
11255 12009 /*
11256 12010 * Extract the PCI data from the 'reg' property first DWORD.
11257 12011 * The entry looks like the following:
11258 12012 * First DWORD:
11259 12013 * Bits 8 - 10 3-bit Function number
11260 12014 * Bits 11 - 15 5-bit Device number
11261 12015 * Bits 16 - 23 8-bit Bus number
11262 12016 */
11263 12017 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11264 12018 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11265 12019 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11266 12020 ddi_prop_free((void *)reg_data);
11267 12021 } else {
11268 12022 /*
11269 12023 * If we can't determine the PCI info then we fill in FF's for
11270 12024 * the data to indicate this.
11271 12025 */
11272 12026 pci_info->BusNumber = 0xFFFFFFFF;
11273 12027 pci_info->DeviceNumber = 0xFF;
11274 12028 pci_info->FunctionNumber = 0xFF;
11275 12029 }
11276 12030
11277 12031 /*
11278 12032 * Now get the interrupt vector and the pci header. The vector can
11279 12033 * only be 0 right now. The header is the first 256 bytes of config
11280 12034 * space.
11281 12035 */
11282 12036 pci_info->InterruptVector = 0;
11283 12037 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11284 12038 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11285 12039 i);
11286 12040 }
11287 12041 }
11288 12042
11289 12043 static int
11290 12044 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11291 12045 {
11292 12046 int status = 0;
11293 12047 mptsas_reg_access_t driverdata;
11294 12048
11295 12049 mutex_enter(&mpt->m_mutex);
11296 12050 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11297 12051 switch (driverdata.Command) {
11298 12052 /*
11299 12053 * IO access is not supported.
11300 12054 */
11301 12055 case REG_IO_READ:
11302 12056 case REG_IO_WRITE:
11303 12057 mptsas_log(mpt, CE_WARN, "IO access is not "
11304 12058 "supported. Use memory access.");
11305 12059 status = EINVAL;
11306 12060 break;
11307 12061
11308 12062 case REG_MEM_READ:
11309 12063 driverdata.RegData = ddi_get32(mpt->m_datap,
11310 12064 (uint32_t *)(void *)mpt->m_reg +
11311 12065 driverdata.RegOffset);
11312 12066 if (ddi_copyout(&driverdata.RegData,
11313 12067 &data->RegData,
11314 12068 sizeof (driverdata.RegData), mode) != 0) {
11315 12069 mptsas_log(mpt, CE_WARN, "Register "
11316 12070 "Read Failed");
11317 12071 status = EFAULT;
11318 12072 }
11319 12073 break;
11320 12074
11321 12075 case REG_MEM_WRITE:
11322 12076 ddi_put32(mpt->m_datap,
11323 12077 (uint32_t *)(void *)mpt->m_reg +
11324 12078 driverdata.RegOffset,
11325 12079 driverdata.RegData);
11326 12080 break;
11327 12081
11328 12082 default:
11329 12083 status = EINVAL;
11330 12084 break;
11331 12085 }
11332 12086 } else {
11333 12087 status = EFAULT;
11334 12088 }
11335 12089
11336 12090 mutex_exit(&mpt->m_mutex);
11337 12091 return (status);
11338 12092 }
11339 12093
11340 12094 static int
11341 12095 led_control(mptsas_t *mpt, intptr_t data, int mode)
11342 12096 {
11343 12097 int ret = 0;
11344 12098 mptsas_led_control_t lc;
11345 12099 mptsas_target_t *ptgt;
11346 12100
11347 12101 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
11348 12102 return (EFAULT);
11349 12103 }
11350 12104
11351 12105 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
11352 12106 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
11353 12107 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
11354 12108 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
11355 12109 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
11356 12110 lc.LedStatus != 1)) {
11357 12111 return (EINVAL);
11358 12112 }
11359 12113
11360 12114 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
11361 12115 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
11362 12116 return (EACCES);
11363 12117
11364 12118 /* Locate the target we're interrogating... */
11365 12119 mutex_enter(&mpt->m_mutex);
11366 12120 ptgt = refhash_linear_search(mpt->m_targets,
11367 12121 mptsas_target_eval_slot, &lc);
11368 12122 if (ptgt == NULL) {
11369 12123 /* We could not find a target for that enclosure/slot. */
11370 12124 mutex_exit(&mpt->m_mutex);
11371 12125 return (ENOENT);
11372 12126 }
11373 12127
11374 12128 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
11375 12129 /* Update our internal LED state. */
11376 12130 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
11377 12131 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
11378 12132
11379 12133 /* Flush it to the controller. */
11380 12134 ret = mptsas_flush_led_status(mpt, ptgt);
11381 12135 mutex_exit(&mpt->m_mutex);
11382 12136 return (ret);
11383 12137 }
11384 12138
11385 12139 /* Return our internal LED state. */
11386 12140 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
11387 12141 mutex_exit(&mpt->m_mutex);
11388 12142
11389 12143 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
11390 12144 return (EFAULT);
11391 12145 }
11392 12146
11393 12147 return (0);
11394 12148 }
11395 12149
11396 12150 static int
11397 12151 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
11398 12152 {
11399 12153 uint16_t i = 0;
11400 12154 uint16_t count = 0;
11401 12155 int ret = 0;
11402 12156 mptsas_target_t *ptgt;
11403 12157 mptsas_disk_info_t *di;
11404 12158 STRUCT_DECL(mptsas_get_disk_info, gdi);
11405 12159
11406 12160 if ((mode & FREAD) == 0)
11407 12161 return (EACCES);
11408 12162
11409 12163 STRUCT_INIT(gdi, get_udatamodel());
11410 12164
11411 12165 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
11412 12166 mode) != 0) {
11413 12167 return (EFAULT);
11414 12168 }
11415 12169
11416 12170 /* Find out how many targets there are. */
11417 12171 mutex_enter(&mpt->m_mutex);
11418 12172 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11419 12173 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11420 12174 count++;
11421 12175 }
11422 12176 mutex_exit(&mpt->m_mutex);
11423 12177
11424 12178 /*
11425 12179 * If we haven't been asked to copy out information on each target,
11426 12180 * then just return the count.
11427 12181 */
11428 12182 STRUCT_FSET(gdi, DiskCount, count);
11429 12183 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
11430 12184 goto copy_out;
11431 12185
11432 12186 /*
11433 12187 * If we haven't been given a large enough buffer to copy out into,
11434 12188 * let the caller know.
11435 12189 */
11436 12190 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
11437 12191 count * sizeof (mptsas_disk_info_t)) {
11438 12192 ret = ENOSPC;
11439 12193 goto copy_out;
11440 12194 }
11441 12195
11442 12196 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
11443 12197
11444 12198 mutex_enter(&mpt->m_mutex);
11445 12199 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11446 12200 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11447 12201 if (i >= count) {
11448 12202 /*
11449 12203 * The number of targets changed while we weren't
11450 12204 * looking, so give up.
11451 12205 */
11452 12206 refhash_rele(mpt->m_targets, ptgt);
11453 12207 mutex_exit(&mpt->m_mutex);
11454 12208 kmem_free(di, count * sizeof (mptsas_disk_info_t));
11455 12209 return (EAGAIN);
11456 12210 }
11457 12211 di[i].Instance = mpt->m_instance;
11458 12212 di[i].Enclosure = ptgt->m_enclosure;
11459 12213 di[i].Slot = ptgt->m_slot_num;
11460 12214 di[i].SasAddress = ptgt->m_addr.mta_wwn;
11461 12215 i++;
11462 12216 }
11463 12217 mutex_exit(&mpt->m_mutex);
11464 12218 STRUCT_FSET(gdi, DiskCount, i);
11465 12219
11466 12220 /* Copy out the disk information to the caller. */
11467 12221 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
11468 12222 i * sizeof (mptsas_disk_info_t), mode) != 0) {
11469 12223 ret = EFAULT;
11470 12224 }
11471 12225
11472 12226 kmem_free(di, count * sizeof (mptsas_disk_info_t));
11473 12227
11474 12228 copy_out:
11475 12229 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
11476 12230 mode) != 0) {
11477 12231 ret = EFAULT;
11478 12232 }
11479 12233
11480 12234 return (ret);
11481 12235 }
11482 12236
11483 12237 static int
11484 12238 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11485 12239 int *rval)
11486 12240 {
11487 12241 int status = 0;
11488 12242 mptsas_t *mpt;
11489 12243 mptsas_update_flash_t flashdata;
11490 12244 mptsas_pass_thru_t passthru_data;
11491 12245 mptsas_adapter_data_t adapter_data;
11492 12246 mptsas_pci_info_t pci_info;
11493 12247 int copylen;
11494 12248
11495 12249 int iport_flag = 0;
11496 12250 dev_info_t *dip = NULL;
11497 12251 mptsas_phymask_t phymask = 0;
11498 12252 struct devctl_iocdata *dcp = NULL;
11499 12253 char *addr = NULL;
11500 12254 mptsas_target_t *ptgt = NULL;
11501 12255
11502 12256 *rval = MPTIOCTL_STATUS_GOOD;
11503 12257 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11504 12258 return (EPERM);
11505 12259 }
11506 12260
11507 12261 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11508 12262 if (mpt == NULL) {
11509 12263 /*
11510 12264 * Called from iport node, get the states
11511 12265 */
11512 12266 iport_flag = 1;
11513 12267 dip = mptsas_get_dip_from_dev(dev, &phymask);
11514 12268 if (dip == NULL) {
11515 12269 return (ENXIO);
11516 12270 }
11517 12271 mpt = DIP2MPT(dip);
11518 12272 }
11519 12273 /* Make sure power level is D0 before accessing registers */
11520 12274 mutex_enter(&mpt->m_mutex);
11521 12275 if (mpt->m_options & MPTSAS_OPT_PM) {
11522 12276 (void) pm_busy_component(mpt->m_dip, 0);
11523 12277 if (mpt->m_power_level != PM_LEVEL_D0) {
11524 12278 mutex_exit(&mpt->m_mutex);
11525 12279 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11526 12280 DDI_SUCCESS) {
11527 12281 mptsas_log(mpt, CE_WARN,
11528 12282 "mptsas%d: mptsas_ioctl: Raise power "
11529 12283 "request failed.", mpt->m_instance);
11530 12284 (void) pm_idle_component(mpt->m_dip, 0);
11531 12285 return (ENXIO);
11532 12286 }
11533 12287 } else {
11534 12288 mutex_exit(&mpt->m_mutex);
11535 12289 }
11536 12290 } else {
11537 12291 mutex_exit(&mpt->m_mutex);
11538 12292 }
11539 12293
11540 12294 if (iport_flag) {
11541 12295 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11542 12296 if (status != 0) {
11543 12297 goto out;
11544 12298 }
11545 12299 /*
11546 12300 * The following code control the OK2RM LED, it doesn't affect
11547 12301 * the ioctl return status.
11548 12302 */
11549 12303 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11550 12304 (cmd == DEVCTL_DEVICE_OFFLINE)) {
11551 12305 if (ndi_dc_allochdl((void *)data, &dcp) !=
11552 12306 NDI_SUCCESS) {
11553 12307 goto out;
11554 12308 }
11555 12309 addr = ndi_dc_getaddr(dcp);
11556 12310 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11557 12311 if (ptgt == NULL) {
11558 12312 NDBG14(("mptsas_ioctl led control: tgt %s not "
11559 12313 "found", addr));
11560 12314 ndi_dc_freehdl(dcp);
11561 12315 goto out;
11562 12316 }
11563 12317 mutex_enter(&mpt->m_mutex);
11564 12318 if (cmd == DEVCTL_DEVICE_ONLINE) {
11565 12319 ptgt->m_tgt_unconfigured = 0;
11566 12320 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11567 12321 ptgt->m_tgt_unconfigured = 1;
11568 12322 }
11569 12323 if (cmd == DEVCTL_DEVICE_OFFLINE) {
11570 12324 ptgt->m_led_status |=
11571 12325 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
11572 12326 } else {
11573 12327 ptgt->m_led_status &=
11574 12328 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
11575 12329 }
11576 12330 (void) mptsas_flush_led_status(mpt, ptgt);
11577 12331 mutex_exit(&mpt->m_mutex);
11578 12332 ndi_dc_freehdl(dcp);
11579 12333 }
11580 12334 goto out;
11581 12335 }
11582 12336 switch (cmd) {
11583 12337 case MPTIOCTL_GET_DISK_INFO:
11584 12338 status = get_disk_info(mpt, data, mode);
11585 12339 break;
11586 12340 case MPTIOCTL_LED_CONTROL:
11587 12341 status = led_control(mpt, data, mode);
11588 12342 break;
11589 12343 case MPTIOCTL_UPDATE_FLASH:
11590 12344 if (ddi_copyin((void *)data, &flashdata,
11591 12345 sizeof (struct mptsas_update_flash), mode)) {
11592 12346 status = EFAULT;
11593 12347 break;
11594 12348 }
11595 12349
11596 12350 mutex_enter(&mpt->m_mutex);
11597 12351 if (mptsas_update_flash(mpt,
11598 12352 (caddr_t)(long)flashdata.PtrBuffer,
11599 12353 flashdata.ImageSize, flashdata.ImageType, mode)) {
11600 12354 status = EFAULT;
11601 12355 }
11602 12356
11603 12357 /*
11604 12358 * Reset the chip to start using the new
11605 12359 * firmware. Reset if failed also.
11606 12360 */
11607 12361 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11608 12362 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11609 12363 status = EFAULT;
11610 12364 }
11611 12365 mutex_exit(&mpt->m_mutex);
11612 12366 break;
11613 12367 case MPTIOCTL_PASS_THRU:
11614 12368 /*
11615 12369 * The user has requested to pass through a command to
11616 12370 * be executed by the MPT firmware. Call our routine
11617 12371 * which does this. Only allow one passthru IOCTL at
11618 12372 * one time. Other threads will block on
11619 12373 * m_passthru_mutex, which is of adaptive variant.
11620 12374 */
11621 12375 if (ddi_copyin((void *)data, &passthru_data,
11622 12376 sizeof (mptsas_pass_thru_t), mode)) {
11623 12377 status = EFAULT;
11624 12378 break;
11625 12379 }
11626 12380 mutex_enter(&mpt->m_passthru_mutex);
11627 12381 mutex_enter(&mpt->m_mutex);
11628 12382 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11629 12383 mutex_exit(&mpt->m_mutex);
11630 12384 mutex_exit(&mpt->m_passthru_mutex);
11631 12385
11632 12386 break;
11633 12387 case MPTIOCTL_GET_ADAPTER_DATA:
11634 12388 /*
11635 12389 * The user has requested to read adapter data. Call
11636 12390 * our routine which does this.
11637 12391 */
11638 12392 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11639 12393 if (ddi_copyin((void *)data, (void *)&adapter_data,
11640 12394 sizeof (mptsas_adapter_data_t), mode)) {
11641 12395 status = EFAULT;
11642 12396 break;
11643 12397 }
11644 12398 if (adapter_data.StructureLength >=
11645 12399 sizeof (mptsas_adapter_data_t)) {
11646 12400 adapter_data.StructureLength = (uint32_t)
11647 12401 sizeof (mptsas_adapter_data_t);
11648 12402 copylen = sizeof (mptsas_adapter_data_t);
11649 12403 mutex_enter(&mpt->m_mutex);
11650 12404 mptsas_read_adapter_data(mpt, &adapter_data);
11651 12405 mutex_exit(&mpt->m_mutex);
11652 12406 } else {
11653 12407 adapter_data.StructureLength = (uint32_t)
11654 12408 sizeof (mptsas_adapter_data_t);
11655 12409 copylen = sizeof (adapter_data.StructureLength);
11656 12410 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11657 12411 }
11658 12412 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11659 12413 copylen, mode) != 0) {
11660 12414 status = EFAULT;
11661 12415 }
11662 12416 break;
11663 12417 case MPTIOCTL_GET_PCI_INFO:
11664 12418 /*
11665 12419 * The user has requested to read pci info. Call
11666 12420 * our routine which does this.
11667 12421 */
11668 12422 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11669 12423 mutex_enter(&mpt->m_mutex);
11670 12424 mptsas_read_pci_info(mpt, &pci_info);
11671 12425 mutex_exit(&mpt->m_mutex);
11672 12426 if (ddi_copyout((void *)(&pci_info), (void *)data,
11673 12427 sizeof (mptsas_pci_info_t), mode) != 0) {
11674 12428 status = EFAULT;
11675 12429 }
11676 12430 break;
11677 12431 case MPTIOCTL_RESET_ADAPTER:
11678 12432 mutex_enter(&mpt->m_mutex);
11679 12433 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11680 12434 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11681 12435 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11682 12436 "failed");
11683 12437 status = EFAULT;
11684 12438 }
11685 12439 mutex_exit(&mpt->m_mutex);
11686 12440 break;
11687 12441 case MPTIOCTL_DIAG_ACTION:
11688 12442 /*
11689 12443 * The user has done a diag buffer action. Call our
11690 12444 * routine which does this. Only allow one diag action
11691 12445 * at one time.
11692 12446 */
11693 12447 mutex_enter(&mpt->m_mutex);
11694 12448 if (mpt->m_diag_action_in_progress) {
11695 12449 mutex_exit(&mpt->m_mutex);
11696 12450 return (EBUSY);
11697 12451 }
11698 12452 mpt->m_diag_action_in_progress = 1;
11699 12453 status = mptsas_diag_action(mpt,
11700 12454 (mptsas_diag_action_t *)data, mode);
11701 12455 mpt->m_diag_action_in_progress = 0;
11702 12456 mutex_exit(&mpt->m_mutex);
11703 12457 break;
11704 12458 case MPTIOCTL_EVENT_QUERY:
11705 12459 /*
11706 12460 * The user has done an event query. Call our routine
11707 12461 * which does this.
11708 12462 */
11709 12463 status = mptsas_event_query(mpt,
11710 12464 (mptsas_event_query_t *)data, mode, rval);
11711 12465 break;
11712 12466 case MPTIOCTL_EVENT_ENABLE:
11713 12467 /*
11714 12468 * The user has done an event enable. Call our routine
11715 12469 * which does this.
11716 12470 */
11717 12471 status = mptsas_event_enable(mpt,
11718 12472 (mptsas_event_enable_t *)data, mode, rval);
11719 12473 break;
11720 12474 case MPTIOCTL_EVENT_REPORT:
11721 12475 /*
11722 12476 * The user has done an event report. Call our routine
11723 12477 * which does this.
11724 12478 */
11725 12479 status = mptsas_event_report(mpt,
11726 12480 (mptsas_event_report_t *)data, mode, rval);
11727 12481 break;
11728 12482 case MPTIOCTL_REG_ACCESS:
11729 12483 /*
11730 12484 * The user has requested register access. Call our
11731 12485 * routine which does this.
11732 12486 */
11733 12487 status = mptsas_reg_access(mpt,
11734 12488 (mptsas_reg_access_t *)data, mode);
11735 12489 break;
11736 12490 default:
11737 12491 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11738 12492 rval);
11739 12493 break;
11740 12494 }
11741 12495
11742 12496 out:
11743 12497 return (status);
11744 12498 }
11745 12499
11746 12500 int
11747 12501 mptsas_restart_ioc(mptsas_t *mpt)
11748 12502 {
11749 12503 int rval = DDI_SUCCESS;
11750 12504 mptsas_target_t *ptgt = NULL;
11751 12505
11752 12506 ASSERT(mutex_owned(&mpt->m_mutex));
11753 12507
11754 12508 /*
11755 12509 * Set a flag telling I/O path that we're processing a reset. This is
11756 12510 * needed because after the reset is complete, the hash table still
11757 12511 * needs to be rebuilt. If I/Os are started before the hash table is
11758 12512 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11759 12513 * so that they can be retried.
11760 12514 */
11761 12515 mpt->m_in_reset = TRUE;
11762 12516
11763 12517 /*
11764 12518 * Set all throttles to HOLD
11765 12519 */
11766 12520 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11767 12521 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11768 12522 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11769 12523 }
11770 12524
11771 12525 /*
11772 12526 * Disable interrupts
11773 12527 */
11774 12528 MPTSAS_DISABLE_INTR(mpt);
11775 12529
11776 12530 /*
11777 12531 * Abort all commands: outstanding commands, commands in waitq and
11778 12532 * tx_waitq.
11779 12533 */
11780 12534 mptsas_flush_hba(mpt);
11781 12535
11782 12536 /*
11783 12537 * Reinitialize the chip.
11784 12538 */
11785 12539 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11786 12540 rval = DDI_FAILURE;
11787 12541 }
11788 12542
11789 12543 /*
11790 12544 * Enable interrupts again
11791 12545 */
11792 12546 MPTSAS_ENABLE_INTR(mpt);
11793 12547
11794 12548 /*
11795 12549 * If mptsas_init_chip was successful, update the driver data.
11796 12550 */
11797 12551 if (rval == DDI_SUCCESS) {
11798 12552 mptsas_update_driver_data(mpt);
11799 12553 }
11800 12554
11801 12555 /*
11802 12556 * Reset the throttles
11803 12557 */
11804 12558 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11805 12559 ptgt = refhash_next(mpt->m_targets, ptgt)) {
11806 12560 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11807 12561 }
11808 12562
11809 12563 mptsas_doneq_empty(mpt);
11810 12564 mptsas_restart_hba(mpt);
11811 12565
11812 12566 if (rval != DDI_SUCCESS) {
11813 12567 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11814 12568 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11815 12569 }
11816 12570
11817 12571 /*
11818 12572 * Clear the reset flag so that I/Os can continue.
11819 12573 */
11820 12574 mpt->m_in_reset = FALSE;
11821 12575
11822 12576 return (rval);
11823 12577 }
11824 12578
11825 12579 static int
11826 12580 mptsas_init_chip(mptsas_t *mpt, int first_time)
11827 12581 {
11828 12582 ddi_dma_cookie_t cookie;
11829 12583 uint32_t i;
11830 12584 int rval;
11831 12585
11832 12586 /*
11833 12587 * Check to see if the firmware image is valid
11834 12588 */
11835 12589 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11836 12590 MPI2_DIAG_FLASH_BAD_SIG) {
11837 12591 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11838 12592 goto fail;
11839 12593 }
11840 12594
11841 12595 /*
11842 12596 * Reset the chip
11843 12597 */
11844 12598 rval = mptsas_ioc_reset(mpt, first_time);
11845 12599 if (rval == MPTSAS_RESET_FAIL) {
11846 12600 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11847 12601 goto fail;
11848 12602 }
11849 12603
11850 12604 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11851 12605 goto mur;
11852 12606 }
11853 12607 /*
11854 12608 * Setup configuration space
11855 12609 */
11856 12610 if (mptsas_config_space_init(mpt) == FALSE) {
11857 12611 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11858 12612 "failed!");
11859 12613 goto fail;
11860 12614 }
11861 12615
11862 12616 /*
11863 12617 * IOC facts can change after a diag reset so all buffers that are
11864 12618 * based on these numbers must be de-allocated and re-allocated. Get
11865 12619 * new IOC facts each time chip is initialized.
11866 12620 */
11867 12621 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11868 12622 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11869 12623 goto fail;
11870 12624 }
11871 12625
11872 12626 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
11873 12627 mptsas_target_addr_hash, mptsas_target_addr_cmp,
11874 12628 mptsas_target_free, sizeof (mptsas_target_t),
11875 12629 offsetof(mptsas_target_t, m_link),
11876 12630 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
11877 12631
11878 12632 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11879 12633 goto fail;
11880 12634 }
11881 12635 /*
11882 12636 * Allocate request message frames, reply free queue, reply descriptor
11883 12637 * post queue, and reply message frames using latest IOC facts.
11884 12638 */
11885 12639 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11886 12640 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11887 12641 goto fail;
11888 12642 }
11889 12643 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11890 12644 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11891 12645 goto fail;
11892 12646 }
11893 12647 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11894 12648 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11895 12649 goto fail;
11896 12650 }
11897 12651 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11898 12652 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11899 12653 goto fail;
11900 12654 }
11901 12655
11902 12656 mur:
11903 12657 /*
11904 12658 * Re-Initialize ioc to operational state
11905 12659 */
11906 12660 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11907 12661 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11908 12662 goto fail;
11909 12663 }
11910 12664
11911 12665 mptsas_alloc_reply_args(mpt);
11912 12666
11913 12667 /*
11914 12668 * Initialize reply post index. Reply free index is initialized after
11915 12669 * the next loop.
11916 12670 */
11917 12671 mpt->m_post_index = 0;
11918 12672
11919 12673 /*
11920 12674 * Initialize the Reply Free Queue with the physical addresses of our
11921 12675 * reply frames.
11922 12676 */
11923 12677 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11924 12678 for (i = 0; i < mpt->m_max_replies; i++) {
11925 12679 ddi_put32(mpt->m_acc_free_queue_hdl,
11926 12680 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11927 12681 cookie.dmac_address);
11928 12682 cookie.dmac_address += mpt->m_reply_frame_size;
11929 12683 }
11930 12684 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11931 12685 DDI_DMA_SYNC_FORDEV);
11932 12686
11933 12687 /*
11934 12688 * Initialize the reply free index to one past the last frame on the
11935 12689 * queue. This will signify that the queue is empty to start with.
11936 12690 */
11937 12691 mpt->m_free_index = i;
11938 12692 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11939 12693
11940 12694 /*
11941 12695 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11942 12696 */
11943 12697 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11944 12698 ddi_put64(mpt->m_acc_post_queue_hdl,
11945 12699 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11946 12700 0xFFFFFFFFFFFFFFFF);
11947 12701 }
11948 12702 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11949 12703 DDI_DMA_SYNC_FORDEV);
11950 12704
11951 12705 /*
11952 12706 * Enable ports
11953 12707 */
11954 12708 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11955 12709 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11956 12710 goto fail;
11957 12711 }
11958 12712
11959 12713 /*
11960 12714 * enable events
11961 12715 */
11962 12716 if (mptsas_ioc_enable_event_notification(mpt)) {
11963 12717 goto fail;
11964 12718 }
11965 12719
11966 12720 /*
11967 12721 * We need checks in attach and these.
11968 12722 * chip_init is called in mult. places
11969 12723 */
11970 12724
11971 12725 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11972 12726 DDI_SUCCESS) ||
11973 12727 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11974 12728 DDI_SUCCESS) ||
11975 12729 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11976 12730 DDI_SUCCESS) ||
11977 12731 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11978 12732 DDI_SUCCESS) ||
11979 12733 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11980 12734 DDI_SUCCESS)) {
11981 12735 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11982 12736 goto fail;
11983 12737 }
11984 12738
11985 12739 /* Check all acc handles */
11986 12740 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11987 12741 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11988 12742 DDI_SUCCESS) ||
11989 12743 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11990 12744 DDI_SUCCESS) ||
11991 12745 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11992 12746 DDI_SUCCESS) ||
11993 12747 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11994 12748 DDI_SUCCESS) ||
11995 12749 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11996 12750 DDI_SUCCESS) ||
11997 12751 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11998 12752 DDI_SUCCESS)) {
11999 12753 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12000 12754 goto fail;
12001 12755 }
12002 12756
12003 12757 return (DDI_SUCCESS);
12004 12758
12005 12759 fail:
12006 12760 return (DDI_FAILURE);
12007 12761 }
12008 12762
12009 12763 static int
12010 12764 mptsas_get_pci_cap(mptsas_t *mpt)
12011 12765 {
12012 12766 ushort_t caps_ptr, cap, cap_count;
12013 12767
12014 12768 if (mpt->m_config_handle == NULL)
12015 12769 return (FALSE);
12016 12770 /*
12017 12771 * Check if capabilities list is supported and if so,
12018 12772 * get initial capabilities pointer and clear bits 0,1.
12019 12773 */
12020 12774 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12021 12775 & PCI_STAT_CAP) {
12022 12776 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12023 12777 PCI_CONF_CAP_PTR), 4);
12024 12778 } else {
12025 12779 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12026 12780 }
12027 12781
12028 12782 /*
12029 12783 * Walk capabilities if supported.
12030 12784 */
12031 12785 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12032 12786
12033 12787 /*
12034 12788 * Check that we haven't exceeded the maximum number of
12035 12789 * capabilities and that the pointer is in a valid range.
12036 12790 */
12037 12791 if (++cap_count > 48) {
12038 12792 mptsas_log(mpt, CE_WARN,
12039 12793 "too many device capabilities.\n");
12040 12794 break;
12041 12795 }
12042 12796 if (caps_ptr < 64) {
12043 12797 mptsas_log(mpt, CE_WARN,
12044 12798 "capabilities pointer 0x%x out of range.\n",
12045 12799 caps_ptr);
12046 12800 break;
12047 12801 }
12048 12802
12049 12803 /*
12050 12804 * Get next capability and check that it is valid.
12051 12805 * For now, we only support power management.
12052 12806 */
12053 12807 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12054 12808 switch (cap) {
12055 12809 case PCI_CAP_ID_PM:
12056 12810 mptsas_log(mpt, CE_NOTE,
12057 12811 "?mptsas%d supports power management.\n",
12058 12812 mpt->m_instance);
12059 12813 mpt->m_options |= MPTSAS_OPT_PM;
12060 12814
12061 12815 /* Save PMCSR offset */
12062 12816 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12063 12817 break;
12064 12818 /*
12065 12819 * The following capabilities are valid. Any others
12066 12820 * will cause a message to be logged.
12067 12821 */
12068 12822 case PCI_CAP_ID_VPD:
12069 12823 case PCI_CAP_ID_MSI:
12070 12824 case PCI_CAP_ID_PCIX:
12071 12825 case PCI_CAP_ID_PCI_E:
12072 12826 case PCI_CAP_ID_MSI_X:
12073 12827 break;
12074 12828 default:
12075 12829 mptsas_log(mpt, CE_NOTE,
12076 12830 "?mptsas%d unrecognized capability "
12077 12831 "0x%x.\n", mpt->m_instance, cap);
12078 12832 break;
12079 12833 }
12080 12834
12081 12835 /*
12082 12836 * Get next capabilities pointer and clear bits 0,1.
12083 12837 */
12084 12838 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12085 12839 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12086 12840 }
12087 12841 return (TRUE);
12088 12842 }
12089 12843
12090 12844 static int
12091 12845 mptsas_init_pm(mptsas_t *mpt)
12092 12846 {
12093 12847 char pmc_name[16];
12094 12848 char *pmc[] = {
12095 12849 NULL,
12096 12850 "0=Off (PCI D3 State)",
12097 12851 "3=On (PCI D0 State)",
12098 12852 NULL
12099 12853 };
12100 12854 uint16_t pmcsr_stat;
12101 12855
12102 12856 if (mptsas_get_pci_cap(mpt) == FALSE) {
12103 12857 return (DDI_FAILURE);
12104 12858 }
12105 12859 /*
12106 12860 * If PCI's capability does not support PM, then don't need
12107 12861 * to registe the pm-components
12108 12862 */
12109 12863 if (!(mpt->m_options & MPTSAS_OPT_PM))
12110 12864 return (DDI_SUCCESS);
12111 12865 /*
12112 12866 * If power management is supported by this chip, create
12113 12867 * pm-components property for the power management framework
12114 12868 */
12115 12869 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12116 12870 pmc[0] = pmc_name;
12117 12871 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12118 12872 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12119 12873 mpt->m_options &= ~MPTSAS_OPT_PM;
12120 12874 mptsas_log(mpt, CE_WARN,
12121 12875 "mptsas%d: pm-component property creation failed.",
12122 12876 mpt->m_instance);
12123 12877 return (DDI_FAILURE);
12124 12878 }
12125 12879
12126 12880 /*
12127 12881 * Power on device.
12128 12882 */
12129 12883 (void) pm_busy_component(mpt->m_dip, 0);
12130 12884 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12131 12885 mpt->m_pmcsr_offset);
12132 12886 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12133 12887 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12134 12888 mpt->m_instance);
12135 12889 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12136 12890 PCI_PMCSR_D0);
12137 12891 }
12138 12892 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12139 12893 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12140 12894 return (DDI_FAILURE);
12141 12895 }
12142 12896 mpt->m_power_level = PM_LEVEL_D0;
12143 12897 /*
12144 12898 * Set pm idle delay.
12145 12899 */
12146 12900 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12147 12901 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12148 12902
12149 12903 return (DDI_SUCCESS);
12150 12904 }
12151 12905
12152 12906 static int
12153 12907 mptsas_register_intrs(mptsas_t *mpt)
12154 12908 {
12155 12909 dev_info_t *dip;
12156 12910 int intr_types;
12157 12911
12158 12912 dip = mpt->m_dip;
12159 12913
12160 12914 /* Get supported interrupt types */
12161 12915 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12162 12916 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12163 12917 "failed\n");
12164 12918 return (FALSE);
12165 12919 }
12166 12920
12167 12921 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12168 12922
12169 12923 /*
12170 12924 * Try MSI, but fall back to FIXED
12171 12925 */
12172 12926 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12173 12927 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12174 12928 NDBG0(("Using MSI interrupt type"));
12175 12929 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12176 12930 return (TRUE);
12177 12931 }
12178 12932 }
12179 12933 if (intr_types & DDI_INTR_TYPE_FIXED) {
12180 12934 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12181 12935 NDBG0(("Using FIXED interrupt type"));
12182 12936 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12183 12937 return (TRUE);
12184 12938 } else {
12185 12939 NDBG0(("FIXED interrupt registration failed"));
12186 12940 return (FALSE);
12187 12941 }
12188 12942 }
12189 12943
12190 12944 return (FALSE);
12191 12945 }
12192 12946
12193 12947 static void
12194 12948 mptsas_unregister_intrs(mptsas_t *mpt)
12195 12949 {
12196 12950 mptsas_rem_intrs(mpt);
12197 12951 }
12198 12952
12199 12953 /*
12200 12954 * mptsas_add_intrs:
12201 12955 *
12202 12956 * Register FIXED or MSI interrupts.
12203 12957 */
12204 12958 static int
12205 12959 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12206 12960 {
12207 12961 dev_info_t *dip = mpt->m_dip;
12208 12962 int avail, actual, count = 0;
12209 12963 int i, flag, ret;
12210 12964
12211 12965 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12212 12966
12213 12967 /* Get number of interrupts */
12214 12968 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12215 12969 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12216 12970 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12217 12971 "ret %d count %d\n", ret, count);
12218 12972
12219 12973 return (DDI_FAILURE);
12220 12974 }
12221 12975
12222 12976 /* Get number of available interrupts */
12223 12977 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12224 12978 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12225 12979 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12226 12980 "ret %d avail %d\n", ret, avail);
12227 12981
12228 12982 return (DDI_FAILURE);
12229 12983 }
12230 12984
12231 12985 if (avail < count) {
12232 12986 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12233 12987 "navail() returned %d", count, avail);
12234 12988 }
12235 12989
12236 12990 /* Mpt only have one interrupt routine */
12237 12991 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12238 12992 count = 1;
12239 12993 }
12240 12994
12241 12995 /* Allocate an array of interrupt handles */
12242 12996 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12243 12997 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12244 12998
12245 12999 flag = DDI_INTR_ALLOC_NORMAL;
12246 13000
12247 13001 /* call ddi_intr_alloc() */
12248 13002 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12249 13003 count, &actual, flag);
12250 13004
12251 13005 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12252 13006 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12253 13007 ret);
12254 13008 kmem_free(mpt->m_htable, mpt->m_intr_size);
12255 13009 return (DDI_FAILURE);
12256 13010 }
12257 13011
12258 13012 /* use interrupt count returned or abort? */
12259 13013 if (actual < count) {
12260 13014 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12261 13015 count, actual);
12262 13016 }
12263 13017
12264 13018 mpt->m_intr_cnt = actual;
12265 13019
12266 13020 /*
12267 13021 * Get priority for first msi, assume remaining are all the same
12268 13022 */
12269 13023 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12270 13024 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12271 13025 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12272 13026
12273 13027 /* Free already allocated intr */
12274 13028 for (i = 0; i < actual; i++) {
12275 13029 (void) ddi_intr_free(mpt->m_htable[i]);
12276 13030 }
12277 13031
12278 13032 kmem_free(mpt->m_htable, mpt->m_intr_size);
12279 13033 return (DDI_FAILURE);
12280 13034 }
12281 13035
12282 13036 /* Test for high level mutex */
12283 13037 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12284 13038 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12285 13039 "Hi level interrupt not supported\n");
12286 13040
12287 13041 /* Free already allocated intr */
12288 13042 for (i = 0; i < actual; i++) {
12289 13043 (void) ddi_intr_free(mpt->m_htable[i]);
12290 13044 }
12291 13045
12292 13046 kmem_free(mpt->m_htable, mpt->m_intr_size);
12293 13047 return (DDI_FAILURE);
12294 13048 }
12295 13049
12296 13050 /* Call ddi_intr_add_handler() */
12297 13051 for (i = 0; i < actual; i++) {
12298 13052 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12299 13053 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12300 13054 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12301 13055 "failed %d\n", ret);
12302 13056
12303 13057 /* Free already allocated intr */
12304 13058 for (i = 0; i < actual; i++) {
12305 13059 (void) ddi_intr_free(mpt->m_htable[i]);
12306 13060 }
12307 13061
12308 13062 kmem_free(mpt->m_htable, mpt->m_intr_size);
12309 13063 return (DDI_FAILURE);
12310 13064 }
12311 13065 }
12312 13066
12313 13067 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12314 13068 != DDI_SUCCESS) {
12315 13069 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12316 13070
12317 13071 /* Free already allocated intr */
12318 13072 for (i = 0; i < actual; i++) {
12319 13073 (void) ddi_intr_free(mpt->m_htable[i]);
12320 13074 }
12321 13075
12322 13076 kmem_free(mpt->m_htable, mpt->m_intr_size);
12323 13077 return (DDI_FAILURE);
12324 13078 }
12325 13079
12326 13080 /*
12327 13081 * Enable interrupts
12328 13082 */
12329 13083 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12330 13084 /* Call ddi_intr_block_enable() for MSI interrupts */
12331 13085 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12332 13086 } else {
12333 13087 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12334 13088 for (i = 0; i < mpt->m_intr_cnt; i++) {
12335 13089 (void) ddi_intr_enable(mpt->m_htable[i]);
12336 13090 }
12337 13091 }
12338 13092 return (DDI_SUCCESS);
12339 13093 }
12340 13094
12341 13095 /*
12342 13096 * mptsas_rem_intrs:
12343 13097 *
12344 13098 * Unregister FIXED or MSI interrupts
12345 13099 */
12346 13100 static void
12347 13101 mptsas_rem_intrs(mptsas_t *mpt)
12348 13102 {
12349 13103 int i;
12350 13104
12351 13105 NDBG6(("mptsas_rem_intrs"));
12352 13106
12353 13107 /* Disable all interrupts */
12354 13108 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12355 13109 /* Call ddi_intr_block_disable() */
12356 13110 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12357 13111 } else {
12358 13112 for (i = 0; i < mpt->m_intr_cnt; i++) {
12359 13113 (void) ddi_intr_disable(mpt->m_htable[i]);
12360 13114 }
12361 13115 }
12362 13116
12363 13117 /* Call ddi_intr_remove_handler() */
12364 13118 for (i = 0; i < mpt->m_intr_cnt; i++) {
12365 13119 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12366 13120 (void) ddi_intr_free(mpt->m_htable[i]);
12367 13121 }
12368 13122
12369 13123 kmem_free(mpt->m_htable, mpt->m_intr_size);
12370 13124 }
12371 13125
12372 13126 /*
12373 13127 * The IO fault service error handling callback function
12374 13128 */
12375 13129 /*ARGSUSED*/
12376 13130 static int
12377 13131 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12378 13132 {
12379 13133 /*
12380 13134 * as the driver can always deal with an error in any dma or
12381 13135 * access handle, we can just return the fme_status value.
12382 13136 */
12383 13137 pci_ereport_post(dip, err, NULL);
12384 13138 return (err->fme_status);
12385 13139 }
12386 13140
12387 13141 /*
12388 13142 * mptsas_fm_init - initialize fma capabilities and register with IO
12389 13143 * fault services.
12390 13144 */
12391 13145 static void
12392 13146 mptsas_fm_init(mptsas_t *mpt)
12393 13147 {
12394 13148 /*
12395 13149 * Need to change iblock to priority for new MSI intr
12396 13150 */
12397 13151 ddi_iblock_cookie_t fm_ibc;
12398 13152
12399 13153 /* Only register with IO Fault Services if we have some capability */
12400 13154 if (mpt->m_fm_capabilities) {
12401 13155 /* Adjust access and dma attributes for FMA */
12402 13156 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12403 13157 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12404 13158 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12405 13159
12406 13160 /*
12407 13161 * Register capabilities with IO Fault Services.
12408 13162 * mpt->m_fm_capabilities will be updated to indicate
12409 13163 * capabilities actually supported (not requested.)
12410 13164 */
12411 13165 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12412 13166
12413 13167 /*
12414 13168 * Initialize pci ereport capabilities if ereport
12415 13169 * capable (should always be.)
12416 13170 */
12417 13171 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12418 13172 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12419 13173 pci_ereport_setup(mpt->m_dip);
12420 13174 }
12421 13175
12422 13176 /*
12423 13177 * Register error callback if error callback capable.
12424 13178 */
12425 13179 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12426 13180 ddi_fm_handler_register(mpt->m_dip,
12427 13181 mptsas_fm_error_cb, (void *) mpt);
12428 13182 }
12429 13183 }
12430 13184 }
12431 13185
12432 13186 /*
12433 13187 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12434 13188 * fault services.
12435 13189 *
12436 13190 */
12437 13191 static void
12438 13192 mptsas_fm_fini(mptsas_t *mpt)
12439 13193 {
12440 13194 /* Only unregister FMA capabilities if registered */
12441 13195 if (mpt->m_fm_capabilities) {
12442 13196
12443 13197 /*
12444 13198 * Un-register error callback if error callback capable.
12445 13199 */
12446 13200
12447 13201 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12448 13202 ddi_fm_handler_unregister(mpt->m_dip);
12449 13203 }
12450 13204
12451 13205 /*
12452 13206 * Release any resources allocated by pci_ereport_setup()
12453 13207 */
12454 13208
12455 13209 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12456 13210 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12457 13211 pci_ereport_teardown(mpt->m_dip);
12458 13212 }
12459 13213
12460 13214 /* Unregister from IO Fault Services */
12461 13215 ddi_fm_fini(mpt->m_dip);
12462 13216
12463 13217 /* Adjust access and dma attributes for FMA */
12464 13218 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12465 13219 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12466 13220 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12467 13221
12468 13222 }
12469 13223 }
12470 13224
12471 13225 int
12472 13226 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12473 13227 {
12474 13228 ddi_fm_error_t de;
12475 13229
12476 13230 if (handle == NULL)
12477 13231 return (DDI_FAILURE);
12478 13232 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12479 13233 return (de.fme_status);
12480 13234 }
12481 13235
12482 13236 int
12483 13237 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12484 13238 {
12485 13239 ddi_fm_error_t de;
12486 13240
12487 13241 if (handle == NULL)
12488 13242 return (DDI_FAILURE);
12489 13243 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12490 13244 return (de.fme_status);
12491 13245 }
12492 13246
12493 13247 void
12494 13248 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12495 13249 {
12496 13250 uint64_t ena;
12497 13251 char buf[FM_MAX_CLASS];
12498 13252
12499 13253 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12500 13254 ena = fm_ena_generate(0, FM_ENA_FMT1);
12501 13255 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12502 13256 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12503 13257 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12504 13258 }
12505 13259 }
12506 13260
12507 13261 static int
12508 13262 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12509 13263 uint16_t *dev_handle, mptsas_target_t **pptgt)
12510 13264 {
12511 13265 int rval;
12512 13266 uint32_t dev_info;
12513 13267 uint64_t sas_wwn;
12514 13268 mptsas_phymask_t phymask;
12515 13269 uint8_t physport, phynum, config, disk;
12516 13270 uint64_t devicename;
12517 13271 uint16_t pdev_hdl;
12518 13272 mptsas_target_t *tmp_tgt = NULL;
12519 13273 uint16_t bay_num, enclosure;
12520 13274
12521 13275 ASSERT(*pptgt == NULL);
12522 13276
12523 13277 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12524 13278 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12525 13279 &bay_num, &enclosure);
12526 13280 if (rval != DDI_SUCCESS) {
12527 13281 rval = DEV_INFO_FAIL_PAGE0;
12528 13282 return (rval);
12529 13283 }
12530 13284
12531 13285 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12532 13286 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12533 13287 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12534 13288 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12535 13289 return (rval);
12536 13290 }
12537 13291
12538 13292 /*
12539 13293 * Check if the dev handle is for a Phys Disk. If so, set return value
12540 13294 * and exit. Don't add Phys Disks to hash.
12541 13295 */
12542 13296 for (config = 0; config < mpt->m_num_raid_configs; config++) {
12543 13297 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12544 13298 if (*dev_handle == mpt->m_raidconfig[config].
12545 13299 m_physdisk_devhdl[disk]) {
12546 13300 rval = DEV_INFO_PHYS_DISK;
12547 13301 return (rval);
12548 13302 }
12549 13303 }
12550 13304 }
12551 13305
12552 13306 /*
12553 13307 * Get SATA Device Name from SAS device page0 for
12554 13308 * sata device, if device name doesn't exist, set mta_wwn to
12555 13309 * 0 for direct attached SATA. For the device behind the expander
12556 13310 * we still can use STP address assigned by expander.
12557 13311 */
12558 13312 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12559 13313 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12560 13314 mutex_exit(&mpt->m_mutex);
12561 13315 /* alloc a tmp_tgt to send the cmd */
12562 13316 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12563 13317 KM_SLEEP);
12564 13318 tmp_tgt->m_devhdl = *dev_handle;
12565 13319 tmp_tgt->m_deviceinfo = dev_info;
12566 13320 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12567 13321 tmp_tgt->m_qfull_retry_interval =
12568 13322 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12569 13323 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12570 13324 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12571 13325 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12572 13326 mutex_enter(&mpt->m_mutex);
12573 13327 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12574 13328 sas_wwn = devicename;
12575 13329 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12576 13330 sas_wwn = 0;
12577 13331 }
12578 13332 }
12579 13333
12580 13334 phymask = mptsas_physport_to_phymask(mpt, physport);
12581 13335 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
12582 13336 dev_info, phymask, phynum);
12583 13337 if (*pptgt == NULL) {
12584 13338 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12585 13339 "structure!");
12586 13340 rval = DEV_INFO_FAIL_ALLOC;
12587 13341 return (rval);
12588 13342 }
12589 13343 (*pptgt)->m_enclosure = enclosure;
12590 13344 (*pptgt)->m_slot_num = bay_num;
12591 13345 return (DEV_INFO_SUCCESS);
12592 13346 }
12593 13347
12594 13348 uint64_t
12595 13349 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12596 13350 {
12597 13351 uint64_t sata_guid = 0, *pwwn = NULL;
12598 13352 int target = ptgt->m_devhdl;
12599 13353 uchar_t *inq83 = NULL;
12600 13354 int inq83_len = 0xFF;
12601 13355 uchar_t *dblk = NULL;
12602 13356 int inq83_retry = 3;
12603 13357 int rval = DDI_FAILURE;
12604 13358
12605 13359 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12606 13360
12607 13361 inq83_retry:
12608 13362 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12609 13363 inq83_len, NULL, 1);
12610 13364 if (rval != DDI_SUCCESS) {
12611 13365 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12612 13366 "0x83 for target:%x, lun:%x failed!", target, lun);
12613 13367 goto out;
12614 13368 }
12615 13369 /* According to SAT2, the first descriptor is logic unit name */
12616 13370 dblk = &inq83[4];
12617 13371 if ((dblk[1] & 0x30) != 0) {
12618 13372 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12619 13373 goto out;
12620 13374 }
12621 13375 pwwn = (uint64_t *)(void *)(&dblk[4]);
12622 13376 if ((dblk[4] & 0xf0) == 0x50) {
12623 13377 sata_guid = BE_64(*pwwn);
12624 13378 goto out;
12625 13379 } else if (dblk[4] == 'A') {
12626 13380 NDBG20(("SATA drive has no NAA format GUID."));
12627 13381 goto out;
12628 13382 } else {
12629 13383 /* The data is not ready, wait and retry */
12630 13384 inq83_retry--;
12631 13385 if (inq83_retry <= 0) {
12632 13386 goto out;
12633 13387 }
12634 13388 NDBG20(("The GUID is not ready, retry..."));
12635 13389 delay(1 * drv_usectohz(1000000));
12636 13390 goto inq83_retry;
12637 13391 }
12638 13392 out:
12639 13393 kmem_free(inq83, inq83_len);
12640 13394 return (sata_guid);
12641 13395 }
12642 13396
12643 13397 static int
12644 13398 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12645 13399 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12646 13400 {
12647 13401 uchar_t cdb[CDB_GROUP0];
12648 13402 struct scsi_address ap;
12649 13403 struct buf *data_bp = NULL;
12650 13404 int resid = 0;
12651 13405 int ret = DDI_FAILURE;
12652 13406
12653 13407 ASSERT(len <= 0xffff);
12654 13408
12655 13409 ap.a_target = MPTSAS_INVALID_DEVHDL;
12656 13410 ap.a_lun = (uchar_t)(lun);
12657 13411 ap.a_hba_tran = mpt->m_tran;
12658 13412
12659 13413 data_bp = scsi_alloc_consistent_buf(&ap,
12660 13414 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12661 13415 if (data_bp == NULL) {
12662 13416 return (ret);
12663 13417 }
12664 13418 bzero(cdb, CDB_GROUP0);
12665 13419 cdb[0] = SCMD_INQUIRY;
12666 13420 cdb[1] = evpd;
12667 13421 cdb[2] = page;
12668 13422 cdb[3] = (len & 0xff00) >> 8;
12669 13423 cdb[4] = (len & 0x00ff);
12670 13424 cdb[5] = 0;
12671 13425
12672 13426 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12673 13427 &resid);
12674 13428 if (ret == DDI_SUCCESS) {
12675 13429 if (reallen) {
12676 13430 *reallen = len - resid;
12677 13431 }
12678 13432 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12679 13433 }
12680 13434 if (data_bp) {
12681 13435 scsi_free_consistent_buf(data_bp);
12682 13436 }
12683 13437 return (ret);
12684 13438 }
12685 13439
12686 13440 static int
12687 13441 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12688 13442 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12689 13443 int *resid)
12690 13444 {
12691 13445 struct scsi_pkt *pktp = NULL;
12692 13446 scsi_hba_tran_t *tran_clone = NULL;
12693 13447 mptsas_tgt_private_t *tgt_private = NULL;
12694 13448 int ret = DDI_FAILURE;
12695 13449
12696 13450 /*
12697 13451 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12698 13452 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12699 13453 * to simulate the cmds from sd
12700 13454 */
12701 13455 tran_clone = kmem_alloc(
12702 13456 sizeof (scsi_hba_tran_t), KM_SLEEP);
12703 13457 if (tran_clone == NULL) {
12704 13458 goto out;
12705 13459 }
12706 13460 bcopy((caddr_t)mpt->m_tran,
12707 13461 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12708 13462 tgt_private = kmem_alloc(
12709 13463 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12710 13464 if (tgt_private == NULL) {
12711 13465 goto out;
12712 13466 }
12713 13467 tgt_private->t_lun = ap->a_lun;
12714 13468 tgt_private->t_private = ptgt;
12715 13469 tran_clone->tran_tgt_private = tgt_private;
12716 13470 ap->a_hba_tran = tran_clone;
12717 13471
12718 13472 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12719 13473 data_bp, cdblen, sizeof (struct scsi_arq_status),
12720 13474 0, PKT_CONSISTENT, NULL, NULL);
12721 13475 if (pktp == NULL) {
12722 13476 goto out;
12723 13477 }
12724 13478 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12725 13479 pktp->pkt_flags = FLAG_NOPARITY;
12726 13480 if (scsi_poll(pktp) < 0) {
12727 13481 goto out;
12728 13482 }
12729 13483 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12730 13484 goto out;
12731 13485 }
12732 13486 if (resid != NULL) {
12733 13487 *resid = pktp->pkt_resid;
12734 13488 }
12735 13489
12736 13490 ret = DDI_SUCCESS;
12737 13491 out:
12738 13492 if (pktp) {
12739 13493 scsi_destroy_pkt(pktp);
12740 13494 }
12741 13495 if (tran_clone) {
12742 13496 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12743 13497 }
12744 13498 if (tgt_private) {
12745 13499 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12746 13500 }
12747 13501 return (ret);
12748 13502 }
12749 13503 static int
12750 13504 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12751 13505 {
12752 13506 char *cp = NULL;
12753 13507 char *ptr = NULL;
12754 13508 size_t s = 0;
12755 13509 char *wwid_str = NULL;
12756 13510 char *lun_str = NULL;
12757 13511 long lunnum;
12758 13512 long phyid = -1;
12759 13513 int rc = DDI_FAILURE;
12760 13514
12761 13515 ptr = name;
12762 13516 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12763 13517 ptr++;
12764 13518 if ((cp = strchr(ptr, ',')) == NULL) {
12765 13519 return (DDI_FAILURE);
12766 13520 }
12767 13521
12768 13522 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12769 13523 s = (uintptr_t)cp - (uintptr_t)ptr;
12770 13524
12771 13525 bcopy(ptr, wwid_str, s);
12772 13526 wwid_str[s] = '\0';
12773 13527
12774 13528 ptr = ++cp;
12775 13529
12776 13530 if ((cp = strchr(ptr, '\0')) == NULL) {
12777 13531 goto out;
12778 13532 }
12779 13533 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12780 13534 s = (uintptr_t)cp - (uintptr_t)ptr;
12781 13535
12782 13536 bcopy(ptr, lun_str, s);
12783 13537 lun_str[s] = '\0';
12784 13538
12785 13539 if (name[0] == 'p') {
12786 13540 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12787 13541 } else {
12788 13542 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12789 13543 }
12790 13544 if (rc != DDI_SUCCESS)
12791 13545 goto out;
12792 13546
12793 13547 if (phyid != -1) {
12794 13548 ASSERT(phyid < MPTSAS_MAX_PHYS);
12795 13549 *phy = (uint8_t)phyid;
12796 13550 }
12797 13551 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12798 13552 if (rc != 0)
12799 13553 goto out;
12800 13554
12801 13555 *lun = (int)lunnum;
12802 13556 rc = DDI_SUCCESS;
12803 13557 out:
12804 13558 if (wwid_str)
12805 13559 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12806 13560 if (lun_str)
12807 13561 kmem_free(lun_str, SCSI_MAXNAMELEN);
12808 13562
12809 13563 return (rc);
12810 13564 }
12811 13565
12812 13566 /*
12813 13567 * mptsas_parse_smp_name() is to parse sas wwn string
12814 13568 * which format is "wWWN"
12815 13569 */
12816 13570 static int
12817 13571 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12818 13572 {
12819 13573 char *ptr = name;
12820 13574
12821 13575 if (*ptr != 'w') {
12822 13576 return (DDI_FAILURE);
12823 13577 }
12824 13578
12825 13579 ptr++;
12826 13580 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12827 13581 return (DDI_FAILURE);
12828 13582 }
12829 13583 return (DDI_SUCCESS);
12830 13584 }
12831 13585
12832 13586 static int
12833 13587 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12834 13588 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12835 13589 {
12836 13590 int ret = NDI_FAILURE;
12837 13591 int circ = 0;
12838 13592 int circ1 = 0;
12839 13593 mptsas_t *mpt;
12840 13594 char *ptr = NULL;
12841 13595 char *devnm = NULL;
12842 13596 uint64_t wwid = 0;
12843 13597 uint8_t phy = 0xFF;
12844 13598 int lun = 0;
12845 13599 uint_t mflags = flag;
12846 13600 int bconfig = TRUE;
12847 13601
12848 13602 if (scsi_hba_iport_unit_address(pdip) == 0) {
12849 13603 return (DDI_FAILURE);
12850 13604 }
12851 13605
12852 13606 mpt = DIP2MPT(pdip);
12853 13607 if (!mpt) {
12854 13608 return (DDI_FAILURE);
12855 13609 }
12856 13610 /*
12857 13611 * Hold the nexus across the bus_config
12858 13612 */
12859 13613 ndi_devi_enter(scsi_vhci_dip, &circ);
12860 13614 ndi_devi_enter(pdip, &circ1);
12861 13615 switch (op) {
12862 13616 case BUS_CONFIG_ONE:
12863 13617 /* parse wwid/target name out of name given */
12864 13618 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12865 13619 ret = NDI_FAILURE;
12866 13620 break;
12867 13621 }
12868 13622 ptr++;
12869 13623 if (strncmp((char *)arg, "smp", 3) == 0) {
12870 13624 /*
12871 13625 * This is a SMP target device
12872 13626 */
12873 13627 ret = mptsas_parse_smp_name(ptr, &wwid);
12874 13628 if (ret != DDI_SUCCESS) {
12875 13629 ret = NDI_FAILURE;
12876 13630 break;
12877 13631 }
12878 13632 ret = mptsas_config_smp(pdip, wwid, childp);
12879 13633 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12880 13634 /*
12881 13635 * OBP could pass down a non-canonical form
12882 13636 * bootpath without LUN part when LUN is 0.
12883 13637 * So driver need adjust the string.
12884 13638 */
12885 13639 if (strchr(ptr, ',') == NULL) {
12886 13640 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12887 13641 (void) sprintf(devnm, "%s,0", (char *)arg);
12888 13642 ptr = strchr(devnm, '@');
12889 13643 ptr++;
12890 13644 }
12891 13645
12892 13646 /*
12893 13647 * The device path is wWWID format and the device
12894 13648 * is not SMP target device.
12895 13649 */
12896 13650 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12897 13651 if (ret != DDI_SUCCESS) {
12898 13652 ret = NDI_FAILURE;
12899 13653 break;
12900 13654 }
12901 13655 *childp = NULL;
12902 13656 if (ptr[0] == 'w') {
12903 13657 ret = mptsas_config_one_addr(pdip, wwid,
12904 13658 lun, childp);
12905 13659 } else if (ptr[0] == 'p') {
12906 13660 ret = mptsas_config_one_phy(pdip, phy, lun,
12907 13661 childp);
12908 13662 }
12909 13663
12910 13664 /*
12911 13665 * If this is CD/DVD device in OBP path, the
12912 13666 * ndi_busop_bus_config can be skipped as config one
12913 13667 * operation is done above.
12914 13668 */
12915 13669 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12916 13670 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12917 13671 (strncmp((char *)arg, "disk", 4) == 0)) {
12918 13672 bconfig = FALSE;
12919 13673 ndi_hold_devi(*childp);
12920 13674 }
12921 13675 } else {
12922 13676 ret = NDI_FAILURE;
12923 13677 break;
12924 13678 }
12925 13679
12926 13680 /*
12927 13681 * DDI group instructed us to use this flag.
12928 13682 */
12929 13683 mflags |= NDI_MDI_FALLBACK;
12930 13684 break;
12931 13685 case BUS_CONFIG_DRIVER:
12932 13686 case BUS_CONFIG_ALL:
12933 13687 mptsas_config_all(pdip);
12934 13688 ret = NDI_SUCCESS;
12935 13689 break;
12936 13690 }
12937 13691
12938 13692 if ((ret == NDI_SUCCESS) && bconfig) {
12939 13693 ret = ndi_busop_bus_config(pdip, mflags, op,
12940 13694 (devnm == NULL) ? arg : devnm, childp, 0);
12941 13695 }
12942 13696
12943 13697 ndi_devi_exit(pdip, circ1);
12944 13698 ndi_devi_exit(scsi_vhci_dip, circ);
12945 13699 if (devnm != NULL)
12946 13700 kmem_free(devnm, SCSI_MAXNAMELEN);
12947 13701 return (ret);
12948 13702 }
12949 13703
12950 13704 static int
12951 13705 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12952 13706 mptsas_target_t *ptgt)
12953 13707 {
12954 13708 int rval = DDI_FAILURE;
12955 13709 struct scsi_inquiry *sd_inq = NULL;
12956 13710 mptsas_t *mpt = DIP2MPT(pdip);
12957 13711
12958 13712 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12959 13713
12960 13714 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12961 13715 SUN_INQSIZE, 0, (uchar_t)0);
12962 13716
12963 13717 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12964 13718 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12965 13719 } else {
12966 13720 rval = DDI_FAILURE;
12967 13721 }
12968 13722
12969 13723 kmem_free(sd_inq, SUN_INQSIZE);
12970 13724 return (rval);
12971 13725 }
12972 13726
12973 13727 static int
12974 13728 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12975 13729 dev_info_t **lundip)
12976 13730 {
12977 13731 int rval;
12978 13732 mptsas_t *mpt = DIP2MPT(pdip);
12979 13733 int phymask;
12980 13734 mptsas_target_t *ptgt = NULL;
12981 13735
12982 13736 /*
12983 13737 * Get the physical port associated to the iport
12984 13738 */
12985 13739 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12986 13740 "phymask", 0);
12987 13741
12988 13742 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12989 13743 if (ptgt == NULL) {
12990 13744 /*
12991 13745 * didn't match any device by searching
12992 13746 */
12993 13747 return (DDI_FAILURE);
12994 13748 }
12995 13749 /*
12996 13750 * If the LUN already exists and the status is online,
12997 13751 * we just return the pointer to dev_info_t directly.
12998 13752 * For the mdi_pathinfo node, we'll handle it in
12999 13753 * mptsas_create_virt_lun()
13000 13754 * TODO should be also in mptsas_handle_dr
13001 13755 */
13002 13756
13003 13757 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13004 13758 if (*lundip != NULL) {
13005 13759 /*
13006 13760 * TODO Another senario is, we hotplug the same disk
13007 13761 * on the same slot, the devhdl changed, is this
13008 13762 * possible?
13009 13763 * tgt_private->t_private != ptgt
13010 13764 */
13011 13765 if (sasaddr != ptgt->m_addr.mta_wwn) {
13012 13766 /*
13013 13767 * The device has changed although the devhdl is the
13014 13768 * same (Enclosure mapping mode, change drive on the
13015 13769 * same slot)
13016 13770 */
13017 13771 return (DDI_FAILURE);
13018 13772 }
13019 13773 return (DDI_SUCCESS);
13020 13774 }
13021 13775
13022 13776 if (phymask == 0) {
13023 13777 /*
13024 13778 * Configure IR volume
13025 13779 */
13026 13780 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13027 13781 return (rval);
13028 13782 }
13029 13783 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13030 13784
13031 13785 return (rval);
13032 13786 }
13033 13787
13034 13788 static int
13035 13789 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13036 13790 dev_info_t **lundip)
13037 13791 {
13038 13792 int rval;
13039 13793 mptsas_t *mpt = DIP2MPT(pdip);
13040 13794 mptsas_phymask_t phymask;
13041 13795 mptsas_target_t *ptgt = NULL;
13042 13796
13043 13797 /*
13044 13798 * Get the physical port associated to the iport
13045 13799 */
13046 13800 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13047 13801 "phymask", 0);
13048 13802
13049 13803 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13050 13804 if (ptgt == NULL) {
13051 13805 /*
13052 13806 * didn't match any device by searching
13053 13807 */
13054 13808 return (DDI_FAILURE);
13055 13809 }
13056 13810
13057 13811 /*
13058 13812 * If the LUN already exists and the status is online,
13059 13813 * we just return the pointer to dev_info_t directly.
13060 13814 * For the mdi_pathinfo node, we'll handle it in
13061 13815 * mptsas_create_virt_lun().
13062 13816 */
13063 13817
13064 13818 *lundip = mptsas_find_child_phy(pdip, phy);
13065 13819 if (*lundip != NULL) {
13066 13820 return (DDI_SUCCESS);
13067 13821 }
13068 13822
13069 13823 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13070 13824
13071 13825 return (rval);
13072 13826 }
13073 13827
13074 13828 static int
13075 13829 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13076 13830 uint8_t *lun_addr_type)
13077 13831 {
13078 13832 uint32_t lun_idx = 0;
13079 13833
13080 13834 ASSERT(lun_num != NULL);
13081 13835 ASSERT(lun_addr_type != NULL);
13082 13836
13083 13837 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13084 13838 /* determine report luns addressing type */
13085 13839 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13086 13840 /*
13087 13841 * Vendors in the field have been found to be concatenating
13088 13842 * bus/target/lun to equal the complete lun value instead
13089 13843 * of switching to flat space addressing
13090 13844 */
13091 13845 /* 00b - peripheral device addressing method */
13092 13846 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13093 13847 /* FALLTHRU */
13094 13848 /* 10b - logical unit addressing method */
13095 13849 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13096 13850 /* FALLTHRU */
13097 13851 /* 01b - flat space addressing method */
13098 13852 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13099 13853 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13100 13854 *lun_addr_type = (buf[lun_idx] &
13101 13855 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13102 13856 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13103 13857 *lun_num |= buf[lun_idx + 1];
13104 13858 return (DDI_SUCCESS);
13105 13859 default:
13106 13860 return (DDI_FAILURE);
13107 13861 }
13108 13862 }
13109 13863
13110 13864 static int
13111 13865 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13112 13866 {
13113 13867 struct buf *repluns_bp = NULL;
13114 13868 struct scsi_address ap;
13115 13869 uchar_t cdb[CDB_GROUP5];
13116 13870 int ret = DDI_FAILURE;
13117 13871 int retry = 0;
13118 13872 int lun_list_len = 0;
13119 13873 uint16_t lun_num = 0;
13120 13874 uint8_t lun_addr_type = 0;
13121 13875 uint32_t lun_cnt = 0;
13122 13876 uint32_t lun_total = 0;
13123 13877 dev_info_t *cdip = NULL;
13124 13878 uint16_t *saved_repluns = NULL;
13125 13879 char *buffer = NULL;
13126 13880 int buf_len = 128;
13127 13881 mptsas_t *mpt = DIP2MPT(pdip);
13128 13882 uint64_t sas_wwn = 0;
13129 13883 uint8_t phy = 0xFF;
13130 13884 uint32_t dev_info = 0;
13131 13885
13132 13886 mutex_enter(&mpt->m_mutex);
13133 13887 sas_wwn = ptgt->m_addr.mta_wwn;
13134 13888 phy = ptgt->m_phynum;
13135 13889 dev_info = ptgt->m_deviceinfo;
13136 13890 mutex_exit(&mpt->m_mutex);
13137 13891
13138 13892 if (sas_wwn == 0) {
13139 13893 /*
13140 13894 * It's a SATA without Device Name
13141 13895 * So don't try multi-LUNs
13142 13896 */
13143 13897 if (mptsas_find_child_phy(pdip, phy)) {
13144 13898 return (DDI_SUCCESS);
13145 13899 } else {
13146 13900 /*
13147 13901 * need configure and create node
13148 13902 */
13149 13903 return (DDI_FAILURE);
13150 13904 }
13151 13905 }
13152 13906
13153 13907 /*
13154 13908 * WWN (SAS address or Device Name exist)
13155 13909 */
13156 13910 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13157 13911 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13158 13912 /*
13159 13913 * SATA device with Device Name
13160 13914 * So don't try multi-LUNs
13161 13915 */
13162 13916 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13163 13917 return (DDI_SUCCESS);
13164 13918 } else {
13165 13919 return (DDI_FAILURE);
13166 13920 }
13167 13921 }
13168 13922
13169 13923 do {
13170 13924 ap.a_target = MPTSAS_INVALID_DEVHDL;
13171 13925 ap.a_lun = 0;
13172 13926 ap.a_hba_tran = mpt->m_tran;
13173 13927 repluns_bp = scsi_alloc_consistent_buf(&ap,
13174 13928 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13175 13929 if (repluns_bp == NULL) {
13176 13930 retry++;
13177 13931 continue;
13178 13932 }
13179 13933 bzero(cdb, CDB_GROUP5);
13180 13934 cdb[0] = SCMD_REPORT_LUNS;
13181 13935 cdb[6] = (buf_len & 0xff000000) >> 24;
13182 13936 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13183 13937 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13184 13938 cdb[9] = (buf_len & 0x000000ff);
13185 13939
13186 13940 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13187 13941 repluns_bp, NULL);
13188 13942 if (ret != DDI_SUCCESS) {
13189 13943 scsi_free_consistent_buf(repluns_bp);
13190 13944 retry++;
13191 13945 continue;
13192 13946 }
13193 13947 lun_list_len = BE_32(*(int *)((void *)(
13194 13948 repluns_bp->b_un.b_addr)));
13195 13949 if (buf_len >= lun_list_len + 8) {
13196 13950 ret = DDI_SUCCESS;
13197 13951 break;
13198 13952 }
13199 13953 scsi_free_consistent_buf(repluns_bp);
13200 13954 buf_len = lun_list_len + 8;
13201 13955
13202 13956 } while (retry < 3);
13203 13957
13204 13958 if (ret != DDI_SUCCESS)
13205 13959 return (ret);
13206 13960 buffer = (char *)repluns_bp->b_un.b_addr;
13207 13961 /*
13208 13962 * find out the number of luns returned by the SCSI ReportLun call
13209 13963 * and allocate buffer space
13210 13964 */
13211 13965 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13212 13966 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13213 13967 if (saved_repluns == NULL) {
13214 13968 scsi_free_consistent_buf(repluns_bp);
13215 13969 return (DDI_FAILURE);
13216 13970 }
13217 13971 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13218 13972 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13219 13973 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13220 13974 continue;
13221 13975 }
13222 13976 saved_repluns[lun_cnt] = lun_num;
13223 13977 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13224 13978 ret = DDI_SUCCESS;
13225 13979 else
13226 13980 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13227 13981 ptgt);
13228 13982 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13229 13983 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13230 13984 MPTSAS_DEV_GONE);
13231 13985 }
13232 13986 }
13233 13987 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13234 13988 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13235 13989 scsi_free_consistent_buf(repluns_bp);
13236 13990 return (DDI_SUCCESS);
13237 13991 }
13238 13992
13239 13993 static int
13240 13994 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13241 13995 {
13242 13996 int rval = DDI_FAILURE;
13243 13997 struct scsi_inquiry *sd_inq = NULL;
13244 13998 mptsas_t *mpt = DIP2MPT(pdip);
13245 13999 mptsas_target_t *ptgt = NULL;
13246 14000
13247 14001 mutex_enter(&mpt->m_mutex);
13248 14002 ptgt = refhash_linear_search(mpt->m_targets,
13249 14003 mptsas_target_eval_devhdl, &target);
13250 14004 mutex_exit(&mpt->m_mutex);
13251 14005 if (ptgt == NULL) {
13252 14006 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13253 14007 "not found.", target);
13254 14008 return (rval);
13255 14009 }
13256 14010
13257 14011 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13258 14012 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13259 14013 SUN_INQSIZE, 0, (uchar_t)0);
13260 14014
13261 14015 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13262 14016 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13263 14017 0);
13264 14018 } else {
13265 14019 rval = DDI_FAILURE;
13266 14020 }
13267 14021
13268 14022 kmem_free(sd_inq, SUN_INQSIZE);
13269 14023 return (rval);
13270 14024 }
13271 14025
13272 14026 /*
13273 14027 * configure all RAID volumes for virtual iport
13274 14028 */
13275 14029 static void
13276 14030 mptsas_config_all_viport(dev_info_t *pdip)
13277 14031 {
13278 14032 mptsas_t *mpt = DIP2MPT(pdip);
13279 14033 int config, vol;
13280 14034 int target;
13281 14035 dev_info_t *lundip = NULL;
13282 14036
13283 14037 /*
13284 14038 * Get latest RAID info and search for any Volume DevHandles. If any
13285 14039 * are found, configure the volume.
13286 14040 */
13287 14041 mutex_enter(&mpt->m_mutex);
13288 14042 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13289 14043 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13290 14044 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
13291 14045 == 1) {
13292 14046 target = mpt->m_raidconfig[config].
13293 14047 m_raidvol[vol].m_raidhandle;
13294 14048 mutex_exit(&mpt->m_mutex);
13295 14049 (void) mptsas_config_raid(pdip, target,
13296 14050 &lundip);
13297 14051 mutex_enter(&mpt->m_mutex);
13298 14052 }
13299 14053 }
13300 14054 }
13301 14055 mutex_exit(&mpt->m_mutex);
13302 14056 }
13303 14057
13304 14058 static void
13305 14059 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13306 14060 int lun_cnt, mptsas_target_t *ptgt)
13307 14061 {
13308 14062 dev_info_t *child = NULL, *savechild = NULL;
13309 14063 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13310 14064 uint64_t sas_wwn, wwid;
13311 14065 uint8_t phy;
13312 14066 int lun;
13313 14067 int i;
13314 14068 int find;
13315 14069 char *addr;
13316 14070 char *nodename;
13317 14071 mptsas_t *mpt = DIP2MPT(pdip);
13318 14072
13319 14073 mutex_enter(&mpt->m_mutex);
13320 14074 wwid = ptgt->m_addr.mta_wwn;
13321 14075 mutex_exit(&mpt->m_mutex);
13322 14076
13323 14077 child = ddi_get_child(pdip);
13324 14078 while (child) {
13325 14079 find = 0;
13326 14080 savechild = child;
13327 14081 child = ddi_get_next_sibling(child);
13328 14082
13329 14083 nodename = ddi_node_name(savechild);
13330 14084 if (strcmp(nodename, "smp") == 0) {
13331 14085 continue;
13332 14086 }
13333 14087
13334 14088 addr = ddi_get_name_addr(savechild);
13335 14089 if (addr == NULL) {
13336 14090 continue;
13337 14091 }
13338 14092
13339 14093 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13340 14094 DDI_SUCCESS) {
13341 14095 continue;
13342 14096 }
13343 14097
13344 14098 if (wwid == sas_wwn) {
13345 14099 for (i = 0; i < lun_cnt; i++) {
13346 14100 if (repluns[i] == lun) {
13347 14101 find = 1;
13348 14102 break;
13349 14103 }
13350 14104 }
13351 14105 } else {
13352 14106 continue;
13353 14107 }
13354 14108 if (find == 0) {
13355 14109 /*
13356 14110 * The lun has not been there already
13357 14111 */
13358 14112 (void) mptsas_offline_lun(pdip, savechild, NULL,
13359 14113 NDI_DEVI_REMOVE);
13360 14114 }
13361 14115 }
13362 14116
13363 14117 pip = mdi_get_next_client_path(pdip, NULL);
13364 14118 while (pip) {
13365 14119 find = 0;
13366 14120 savepip = pip;
13367 14121 addr = MDI_PI(pip)->pi_addr;
13368 14122
13369 14123 pip = mdi_get_next_client_path(pdip, pip);
13370 14124
13371 14125 if (addr == NULL) {
13372 14126 continue;
13373 14127 }
13374 14128
13375 14129 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13376 14130 &lun) != DDI_SUCCESS) {
13377 14131 continue;
13378 14132 }
13379 14133
13380 14134 if (sas_wwn == wwid) {
13381 14135 for (i = 0; i < lun_cnt; i++) {
13382 14136 if (repluns[i] == lun) {
13383 14137 find = 1;
13384 14138 break;
13385 14139 }
13386 14140 }
13387 14141 } else {
13388 14142 continue;
13389 14143 }
13390 14144
13391 14145 if (find == 0) {
13392 14146 /*
13393 14147 * The lun has not been there already
13394 14148 */
13395 14149 (void) mptsas_offline_lun(pdip, NULL, savepip,
13396 14150 NDI_DEVI_REMOVE);
13397 14151 }
13398 14152 }
13399 14153 }
13400 14154
13401 14155 void
13402 14156 mptsas_update_hashtab(struct mptsas *mpt)
13403 14157 {
13404 14158 uint32_t page_address;
13405 14159 int rval = 0;
13406 14160 uint16_t dev_handle;
13407 14161 mptsas_target_t *ptgt = NULL;
13408 14162 mptsas_smp_t smp_node;
13409 14163
13410 14164 /*
13411 14165 * Get latest RAID info.
13412 14166 */
13413 14167 (void) mptsas_get_raid_info(mpt);
13414 14168
13415 14169 dev_handle = mpt->m_smp_devhdl;
13416 14170 for (; mpt->m_done_traverse_smp == 0; ) {
13417 14171 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13418 14172 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13419 14173 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13420 14174 != DDI_SUCCESS) {
13421 14175 break;
13422 14176 }
13423 14177 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13424 14178 (void) mptsas_smp_alloc(mpt, &smp_node);
13425 14179 }
13426 14180
13427 14181 /*
13428 14182 * Config target devices
13429 14183 */
13430 14184 dev_handle = mpt->m_dev_handle;
13431 14185
13432 14186 /*
13433 14187 * Do loop to get sas device page 0 by GetNextHandle till the
13434 14188 * the last handle. If the sas device is a SATA/SSP target,
13435 14189 * we try to config it.
13436 14190 */
13437 14191 for (; mpt->m_done_traverse_dev == 0; ) {
13438 14192 ptgt = NULL;
13439 14193 page_address =
13440 14194 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13441 14195 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13442 14196 (uint32_t)dev_handle;
13443 14197 rval = mptsas_get_target_device_info(mpt, page_address,
13444 14198 &dev_handle, &ptgt);
13445 14199 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13446 14200 (rval == DEV_INFO_FAIL_ALLOC)) {
13447 14201 break;
13448 14202 }
13449 14203
13450 14204 mpt->m_dev_handle = dev_handle;
13451 14205 }
13452 14206
13453 14207 }
13454 14208
13455 14209 void
13456 14210 mptsas_update_driver_data(struct mptsas *mpt)
13457 14211 {
13458 14212 mptsas_target_t *tp;
13459 14213 mptsas_smp_t *sp;
13460 14214
13461 14215 ASSERT(MUTEX_HELD(&mpt->m_mutex));
13462 14216
13463 14217 /*
13464 14218 * TODO after hard reset, update the driver data structures
13465 14219 * 1. update port/phymask mapping table mpt->m_phy_info
13466 14220 * 2. invalid all the entries in hash table
13467 14221 * m_devhdl = 0xffff and m_deviceinfo = 0
13468 14222 * 3. call sas_device_page/expander_page to update hash table
13469 14223 */
13470 14224 mptsas_update_phymask(mpt);
13471 14225 /*
13472 14226 * Invalid the existing entries
13473 14227 *
13474 14228 * XXX - It seems like we should just delete everything here. We are
13475 14229 * holding the lock and are about to refresh all the targets in both
13476 14230 * hashes anyway. Given the path we're in, what outstanding async
13477 14231 * event could possibly be trying to reference one of these things
13478 14232 * without taking the lock, and how would that be useful anyway?
13479 14233 */
13480 14234 for (tp = refhash_first(mpt->m_targets); tp != NULL;
13481 14235 tp = refhash_next(mpt->m_targets, tp)) {
13482 14236 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
13483 14237 tp->m_deviceinfo = 0;
13484 14238 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
13485 14239 }
13486 14240 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
13487 14241 sp = refhash_next(mpt->m_smp_targets, sp)) {
13488 14242 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
13489 14243 sp->m_deviceinfo = 0;
13490 14244 }
13491 14245 mpt->m_done_traverse_dev = 0;
13492 14246 mpt->m_done_traverse_smp = 0;
13493 14247 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13494 14248 mptsas_update_hashtab(mpt);
13495 14249 }
13496 14250
13497 14251 static void
13498 14252 mptsas_config_all(dev_info_t *pdip)
13499 14253 {
13500 14254 dev_info_t *smpdip = NULL;
13501 14255 mptsas_t *mpt = DIP2MPT(pdip);
13502 14256 int phymask = 0;
13503 14257 mptsas_phymask_t phy_mask;
13504 14258 mptsas_target_t *ptgt = NULL;
13505 14259 mptsas_smp_t *psmp;
13506 14260
13507 14261 /*
13508 14262 * Get the phymask associated to the iport
13509 14263 */
13510 14264 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13511 14265 "phymask", 0);
13512 14266
13513 14267 /*
13514 14268 * Enumerate RAID volumes here (phymask == 0).
13515 14269 */
13516 14270 if (phymask == 0) {
13517 14271 mptsas_config_all_viport(pdip);
13518 14272 return;
13519 14273 }
13520 14274
13521 14275 mutex_enter(&mpt->m_mutex);
13522 14276
13523 14277 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13524 14278 mptsas_update_hashtab(mpt);
13525 14279 }
13526 14280
13527 14281 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
13528 14282 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
13529 14283 phy_mask = psmp->m_addr.mta_phymask;
13530 14284 if (phy_mask == phymask) {
13531 14285 smpdip = NULL;
13532 14286 mutex_exit(&mpt->m_mutex);
13533 14287 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13534 14288 mutex_enter(&mpt->m_mutex);
13535 14289 }
13536 14290 }
13537 14291
13538 14292 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13539 14293 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13540 14294 phy_mask = ptgt->m_addr.mta_phymask;
13541 14295 if (phy_mask == phymask) {
13542 14296 mutex_exit(&mpt->m_mutex);
13543 14297 (void) mptsas_config_target(pdip, ptgt);
13544 14298 mutex_enter(&mpt->m_mutex);
13545 14299 }
13546 14300 }
13547 14301 mutex_exit(&mpt->m_mutex);
13548 14302 }
13549 14303
13550 14304 static int
13551 14305 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13552 14306 {
13553 14307 int rval = DDI_FAILURE;
13554 14308 dev_info_t *tdip;
13555 14309
13556 14310 rval = mptsas_config_luns(pdip, ptgt);
13557 14311 if (rval != DDI_SUCCESS) {
13558 14312 /*
13559 14313 * The return value means the SCMD_REPORT_LUNS
13560 14314 * did not execute successfully. The target maybe
13561 14315 * doesn't support such command.
13562 14316 */
13563 14317 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13564 14318 }
13565 14319 return (rval);
13566 14320 }
13567 14321
13568 14322 /*
13569 14323 * Return fail if not all the childs/paths are freed.
13570 14324 * if there is any path under the HBA, the return value will be always fail
13571 14325 * because we didn't call mdi_pi_free for path
13572 14326 */
13573 14327 static int
13574 14328 mptsas_offline_target(dev_info_t *pdip, char *name)
13575 14329 {
13576 14330 dev_info_t *child = NULL, *prechild = NULL;
13577 14331 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13578 14332 int tmp_rval, rval = DDI_SUCCESS;
13579 14333 char *addr, *cp;
13580 14334 size_t s;
13581 14335 mptsas_t *mpt = DIP2MPT(pdip);
13582 14336
13583 14337 child = ddi_get_child(pdip);
13584 14338 while (child) {
13585 14339 addr = ddi_get_name_addr(child);
13586 14340 prechild = child;
13587 14341 child = ddi_get_next_sibling(child);
13588 14342
13589 14343 if (addr == NULL) {
13590 14344 continue;
13591 14345 }
13592 14346 if ((cp = strchr(addr, ',')) == NULL) {
13593 14347 continue;
13594 14348 }
13595 14349
13596 14350 s = (uintptr_t)cp - (uintptr_t)addr;
13597 14351
13598 14352 if (strncmp(addr, name, s) != 0) {
13599 14353 continue;
13600 14354 }
13601 14355
13602 14356 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13603 14357 NDI_DEVI_REMOVE);
13604 14358 if (tmp_rval != DDI_SUCCESS) {
13605 14359 rval = DDI_FAILURE;
13606 14360 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13607 14361 prechild, MPTSAS_DEV_GONE) !=
13608 14362 DDI_PROP_SUCCESS) {
13609 14363 mptsas_log(mpt, CE_WARN, "mptsas driver "
13610 14364 "unable to create property for "
13611 14365 "SAS %s (MPTSAS_DEV_GONE)", addr);
13612 14366 }
13613 14367 }
13614 14368 }
13615 14369
13616 14370 pip = mdi_get_next_client_path(pdip, NULL);
13617 14371 while (pip) {
13618 14372 addr = MDI_PI(pip)->pi_addr;
13619 14373 savepip = pip;
13620 14374 pip = mdi_get_next_client_path(pdip, pip);
13621 14375 if (addr == NULL) {
13622 14376 continue;
13623 14377 }
13624 14378
13625 14379 if ((cp = strchr(addr, ',')) == NULL) {
13626 14380 continue;
13627 14381 }
13628 14382
13629 14383 s = (uintptr_t)cp - (uintptr_t)addr;
13630 14384
13631 14385 if (strncmp(addr, name, s) != 0) {
13632 14386 continue;
13633 14387 }
13634 14388
13635 14389 (void) mptsas_offline_lun(pdip, NULL, savepip,
13636 14390 NDI_DEVI_REMOVE);
13637 14391 /*
13638 14392 * driver will not invoke mdi_pi_free, so path will not
13639 14393 * be freed forever, return DDI_FAILURE.
13640 14394 */
13641 14395 rval = DDI_FAILURE;
13642 14396 }
13643 14397 return (rval);
13644 14398 }
13645 14399
13646 14400 static int
13647 14401 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13648 14402 mdi_pathinfo_t *rpip, uint_t flags)
13649 14403 {
13650 14404 int rval = DDI_FAILURE;
13651 14405 char *devname;
13652 14406 dev_info_t *cdip, *parent;
13653 14407
13654 14408 if (rpip != NULL) {
13655 14409 parent = scsi_vhci_dip;
13656 14410 cdip = mdi_pi_get_client(rpip);
13657 14411 } else if (rdip != NULL) {
13658 14412 parent = pdip;
13659 14413 cdip = rdip;
13660 14414 } else {
13661 14415 return (DDI_FAILURE);
13662 14416 }
13663 14417
13664 14418 /*
13665 14419 * Make sure node is attached otherwise
13666 14420 * it won't have related cache nodes to
13667 14421 * clean up. i_ddi_devi_attached is
13668 14422 * similiar to i_ddi_node_state(cdip) >=
13669 14423 * DS_ATTACHED.
13670 14424 */
13671 14425 if (i_ddi_devi_attached(cdip)) {
13672 14426
13673 14427 /* Get full devname */
13674 14428 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13675 14429 (void) ddi_deviname(cdip, devname);
13676 14430 /* Clean cache */
13677 14431 (void) devfs_clean(parent, devname + 1,
13678 14432 DV_CLEAN_FORCE);
13679 14433 kmem_free(devname, MAXNAMELEN + 1);
13680 14434 }
13681 14435 if (rpip != NULL) {
13682 14436 if (MDI_PI_IS_OFFLINE(rpip)) {
13683 14437 rval = DDI_SUCCESS;
13684 14438 } else {
13685 14439 rval = mdi_pi_offline(rpip, 0);
13686 14440 }
13687 14441 } else {
13688 14442 rval = ndi_devi_offline(cdip, flags);
13689 14443 }
13690 14444
13691 14445 return (rval);
13692 14446 }
13693 14447
13694 14448 static dev_info_t *
13695 14449 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13696 14450 {
13697 14451 dev_info_t *child = NULL;
13698 14452 char *smp_wwn = NULL;
13699 14453
13700 14454 child = ddi_get_child(parent);
13701 14455 while (child) {
13702 14456 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13703 14457 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13704 14458 != DDI_SUCCESS) {
13705 14459 child = ddi_get_next_sibling(child);
13706 14460 continue;
13707 14461 }
13708 14462
13709 14463 if (strcmp(smp_wwn, str_wwn) == 0) {
13710 14464 ddi_prop_free(smp_wwn);
13711 14465 break;
13712 14466 }
13713 14467 child = ddi_get_next_sibling(child);
13714 14468 ddi_prop_free(smp_wwn);
13715 14469 }
13716 14470 return (child);
13717 14471 }
13718 14472
13719 14473 static int
13720 14474 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13721 14475 {
13722 14476 int rval = DDI_FAILURE;
13723 14477 char *devname;
13724 14478 char wwn_str[MPTSAS_WWN_STRLEN];
13725 14479 dev_info_t *cdip;
13726 14480
13727 14481 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
13728 14482
13729 14483 cdip = mptsas_find_smp_child(pdip, wwn_str);
13730 14484
13731 14485 if (cdip == NULL)
13732 14486 return (DDI_SUCCESS);
13733 14487
13734 14488 /*
13735 14489 * Make sure node is attached otherwise
13736 14490 * it won't have related cache nodes to
13737 14491 * clean up. i_ddi_devi_attached is
13738 14492 * similiar to i_ddi_node_state(cdip) >=
13739 14493 * DS_ATTACHED.
13740 14494 */
13741 14495 if (i_ddi_devi_attached(cdip)) {
13742 14496
13743 14497 /* Get full devname */
13744 14498 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13745 14499 (void) ddi_deviname(cdip, devname);
13746 14500 /* Clean cache */
13747 14501 (void) devfs_clean(pdip, devname + 1,
13748 14502 DV_CLEAN_FORCE);
13749 14503 kmem_free(devname, MAXNAMELEN + 1);
13750 14504 }
13751 14505
13752 14506 rval = ndi_devi_offline(cdip, flags);
13753 14507
13754 14508 return (rval);
13755 14509 }
13756 14510
13757 14511 static dev_info_t *
13758 14512 mptsas_find_child(dev_info_t *pdip, char *name)
13759 14513 {
13760 14514 dev_info_t *child = NULL;
13761 14515 char *rname = NULL;
13762 14516 int rval = DDI_FAILURE;
13763 14517
13764 14518 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13765 14519
13766 14520 child = ddi_get_child(pdip);
13767 14521 while (child) {
13768 14522 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13769 14523 if (rval != DDI_SUCCESS) {
13770 14524 child = ddi_get_next_sibling(child);
13771 14525 bzero(rname, SCSI_MAXNAMELEN);
13772 14526 continue;
13773 14527 }
13774 14528
13775 14529 if (strcmp(rname, name) == 0) {
13776 14530 break;
13777 14531 }
13778 14532 child = ddi_get_next_sibling(child);
13779 14533 bzero(rname, SCSI_MAXNAMELEN);
13780 14534 }
13781 14535
13782 14536 kmem_free(rname, SCSI_MAXNAMELEN);
13783 14537
13784 14538 return (child);
13785 14539 }
13786 14540
13787 14541
13788 14542 static dev_info_t *
13789 14543 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13790 14544 {
13791 14545 dev_info_t *child = NULL;
13792 14546 char *name = NULL;
13793 14547 char *addr = NULL;
13794 14548
13795 14549 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13796 14550 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13797 14551 (void) sprintf(name, "%016"PRIx64, sasaddr);
13798 14552 (void) sprintf(addr, "w%s,%x", name, lun);
13799 14553 child = mptsas_find_child(pdip, addr);
13800 14554 kmem_free(name, SCSI_MAXNAMELEN);
13801 14555 kmem_free(addr, SCSI_MAXNAMELEN);
13802 14556 return (child);
13803 14557 }
13804 14558
13805 14559 static dev_info_t *
13806 14560 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13807 14561 {
13808 14562 dev_info_t *child;
13809 14563 char *addr;
13810 14564
13811 14565 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13812 14566 (void) sprintf(addr, "p%x,0", phy);
13813 14567 child = mptsas_find_child(pdip, addr);
13814 14568 kmem_free(addr, SCSI_MAXNAMELEN);
13815 14569 return (child);
13816 14570 }
13817 14571
13818 14572 static mdi_pathinfo_t *
13819 14573 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13820 14574 {
13821 14575 mdi_pathinfo_t *path;
13822 14576 char *addr = NULL;
13823 14577
13824 14578 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13825 14579 (void) sprintf(addr, "p%x,0", phy);
13826 14580 path = mdi_pi_find(pdip, NULL, addr);
13827 14581 kmem_free(addr, SCSI_MAXNAMELEN);
13828 14582 return (path);
13829 14583 }
13830 14584
13831 14585 static mdi_pathinfo_t *
13832 14586 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13833 14587 {
13834 14588 mdi_pathinfo_t *path;
13835 14589 char *name = NULL;
13836 14590 char *addr = NULL;
13837 14591
13838 14592 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13839 14593 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13840 14594 (void) sprintf(name, "%016"PRIx64, sasaddr);
13841 14595 (void) sprintf(addr, "w%s,%x", name, lun);
13842 14596 path = mdi_pi_find(parent, NULL, addr);
13843 14597 kmem_free(name, SCSI_MAXNAMELEN);
13844 14598 kmem_free(addr, SCSI_MAXNAMELEN);
13845 14599
13846 14600 return (path);
13847 14601 }
13848 14602
13849 14603 static int
13850 14604 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13851 14605 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13852 14606 {
13853 14607 int i = 0;
13854 14608 uchar_t *inq83 = NULL;
13855 14609 int inq83_len1 = 0xFF;
13856 14610 int inq83_len = 0;
13857 14611 int rval = DDI_FAILURE;
13858 14612 ddi_devid_t devid;
13859 14613 char *guid = NULL;
13860 14614 int target = ptgt->m_devhdl;
13861 14615 mdi_pathinfo_t *pip = NULL;
13862 14616 mptsas_t *mpt = DIP2MPT(pdip);
13863 14617
13864 14618 /*
13865 14619 * For DVD/CD ROM and tape devices and optical
13866 14620 * devices, we won't try to enumerate them under
13867 14621 * scsi_vhci, so no need to try page83
13868 14622 */
13869 14623 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13870 14624 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13871 14625 sd_inq->inq_dtype == DTYPE_ESI))
13872 14626 goto create_lun;
13873 14627
13874 14628 /*
13875 14629 * The LCA returns good SCSI status, but corrupt page 83 data the first
13876 14630 * time it is queried. The solution is to keep trying to request page83
13877 14631 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13878 14632 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13879 14633 * give up to get VPD page at this stage and fail the enumeration.
13880 14634 */
13881 14635
13882 14636 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13883 14637
13884 14638 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13885 14639 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13886 14640 inq83_len1, &inq83_len, 1);
13887 14641 if (rval != 0) {
13888 14642 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13889 14643 "0x83 for target:%x, lun:%x failed!", target, lun);
13890 14644 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13891 14645 goto create_lun;
13892 14646 goto out;
13893 14647 }
13894 14648 /*
13895 14649 * create DEVID from inquiry data
13896 14650 */
13897 14651 if ((rval = ddi_devid_scsi_encode(
13898 14652 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13899 14653 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13900 14654 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13901 14655 /*
13902 14656 * extract GUID from DEVID
13903 14657 */
13904 14658 guid = ddi_devid_to_guid(devid);
13905 14659
13906 14660 /*
13907 14661 * Do not enable MPXIO if the strlen(guid) is greater
13908 14662 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13909 14663 * handled by framework later.
13910 14664 */
13911 14665 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13912 14666 ddi_devid_free_guid(guid);
13913 14667 guid = NULL;
13914 14668 if (mpt->m_mpxio_enable == TRUE) {
13915 14669 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13916 14670 "lun:%x doesn't have a valid GUID, "
13917 14671 "multipathing for this drive is "
13918 14672 "not enabled", target, lun);
13919 14673 }
13920 14674 }
13921 14675
13922 14676 /*
13923 14677 * devid no longer needed
13924 14678 */
13925 14679 ddi_devid_free(devid);
13926 14680 break;
13927 14681 } else if (rval == DDI_NOT_WELL_FORMED) {
13928 14682 /*
13929 14683 * return value of ddi_devid_scsi_encode equal to
13930 14684 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13931 14685 * to retry inquiry page 0x83 and get GUID.
13932 14686 */
13933 14687 NDBG20(("Not well formed devid, retry..."));
13934 14688 delay(1 * drv_usectohz(1000000));
13935 14689 continue;
13936 14690 } else {
13937 14691 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13938 14692 "path target:%x, lun:%x", target, lun);
13939 14693 rval = DDI_FAILURE;
13940 14694 goto create_lun;
13941 14695 }
13942 14696 }
13943 14697
13944 14698 if (i == mptsas_inq83_retry_timeout) {
13945 14699 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13946 14700 "for path target:%x, lun:%x", target, lun);
13947 14701 }
13948 14702
13949 14703 rval = DDI_FAILURE;
13950 14704
13951 14705 create_lun:
13952 14706 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13953 14707 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13954 14708 ptgt, lun);
13955 14709 }
13956 14710 if (rval != DDI_SUCCESS) {
13957 14711 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13958 14712 ptgt, lun);
13959 14713
13960 14714 }
13961 14715 out:
13962 14716 if (guid != NULL) {
13963 14717 /*
13964 14718 * guid no longer needed
13965 14719 */
13966 14720 ddi_devid_free_guid(guid);
13967 14721 }
13968 14722 if (inq83 != NULL)
13969 14723 kmem_free(inq83, inq83_len1);
13970 14724 return (rval);
13971 14725 }
13972 14726
13973 14727 static int
13974 14728 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13975 14729 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13976 14730 {
13977 14731 int target;
13978 14732 char *nodename = NULL;
13979 14733 char **compatible = NULL;
13980 14734 int ncompatible = 0;
13981 14735 int mdi_rtn = MDI_FAILURE;
13982 14736 int rval = DDI_FAILURE;
13983 14737 char *old_guid = NULL;
13984 14738 mptsas_t *mpt = DIP2MPT(pdip);
13985 14739 char *lun_addr = NULL;
13986 14740 char *wwn_str = NULL;
13987 14741 char *attached_wwn_str = NULL;
13988 14742 char *component = NULL;
13989 14743 uint8_t phy = 0xFF;
13990 14744 uint64_t sas_wwn;
13991 14745 int64_t lun64 = 0;
13992 14746 uint32_t devinfo;
13993 14747 uint16_t dev_hdl;
13994 14748 uint16_t pdev_hdl;
13995 14749 uint64_t dev_sas_wwn;
13996 14750 uint64_t pdev_sas_wwn;
13997 14751 uint32_t pdev_info;
13998 14752 uint8_t physport;
13999 14753 uint8_t phy_id;
14000 14754 uint32_t page_address;
14001 14755 uint16_t bay_num, enclosure;
14002 14756 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14003 14757 uint32_t dev_info;
14004 14758
14005 14759 mutex_enter(&mpt->m_mutex);
14006 14760 target = ptgt->m_devhdl;
14007 14761 sas_wwn = ptgt->m_addr.mta_wwn;
14008 14762 devinfo = ptgt->m_deviceinfo;
14009 14763 phy = ptgt->m_phynum;
14010 14764 mutex_exit(&mpt->m_mutex);
14011 14765
14012 14766 if (sas_wwn) {
14013 14767 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14014 14768 } else {
14015 14769 *pip = mptsas_find_path_phy(pdip, phy);
14016 14770 }
14017 14771
14018 14772 if (*pip != NULL) {
14019 14773 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14020 14774 ASSERT(*lun_dip != NULL);
14021 14775 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14022 14776 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14023 14777 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14024 14778 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14025 14779 /*
14026 14780 * Same path back online again.
14027 14781 */
14028 14782 (void) ddi_prop_free(old_guid);
14029 14783 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14030 14784 (!MDI_PI_IS_STANDBY(*pip)) &&
14031 14785 (ptgt->m_tgt_unconfigured == 0)) {
14032 14786 rval = mdi_pi_online(*pip, 0);
14033 14787 mutex_enter(&mpt->m_mutex);
14034 14788 ptgt->m_led_status = 0;
14035 14789 (void) mptsas_flush_led_status(mpt,
14036 14790 ptgt);
14037 14791 mutex_exit(&mpt->m_mutex);
14038 14792 } else {
14039 14793 rval = DDI_SUCCESS;
14040 14794 }
14041 14795 if (rval != DDI_SUCCESS) {
14042 14796 mptsas_log(mpt, CE_WARN, "path:target: "
14043 14797 "%x, lun:%x online failed!", target,
14044 14798 lun);
14045 14799 *pip = NULL;
14046 14800 *lun_dip = NULL;
14047 14801 }
14048 14802 return (rval);
14049 14803 } else {
14050 14804 /*
14051 14805 * The GUID of the LUN has changed which maybe
14052 14806 * because customer mapped another volume to the
14053 14807 * same LUN.
14054 14808 */
14055 14809 mptsas_log(mpt, CE_WARN, "The GUID of the "
14056 14810 "target:%x, lun:%x was changed, maybe "
14057 14811 "because someone mapped another volume "
14058 14812 "to the same LUN", target, lun);
14059 14813 (void) ddi_prop_free(old_guid);
14060 14814 if (!MDI_PI_IS_OFFLINE(*pip)) {
14061 14815 rval = mdi_pi_offline(*pip, 0);
14062 14816 if (rval != MDI_SUCCESS) {
14063 14817 mptsas_log(mpt, CE_WARN, "path:"
14064 14818 "target:%x, lun:%x offline "
14065 14819 "failed!", target, lun);
14066 14820 *pip = NULL;
14067 14821 *lun_dip = NULL;
14068 14822 return (DDI_FAILURE);
14069 14823 }
14070 14824 }
14071 14825 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14072 14826 mptsas_log(mpt, CE_WARN, "path:target:"
14073 14827 "%x, lun:%x free failed!", target,
14074 14828 lun);
14075 14829 *pip = NULL;
14076 14830 *lun_dip = NULL;
14077 14831 return (DDI_FAILURE);
14078 14832 }
14079 14833 }
14080 14834 } else {
14081 14835 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14082 14836 "property for path:target:%x, lun:%x", target, lun);
14083 14837 *pip = NULL;
14084 14838 *lun_dip = NULL;
14085 14839 return (DDI_FAILURE);
14086 14840 }
14087 14841 }
14088 14842 scsi_hba_nodename_compatible_get(inq, NULL,
14089 14843 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14090 14844
14091 14845 /*
14092 14846 * if nodename can't be determined then print a message and skip it
14093 14847 */
14094 14848 if (nodename == NULL) {
14095 14849 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14096 14850 "driver for target%d lun %d dtype:0x%02x", target, lun,
14097 14851 inq->inq_dtype);
14098 14852 return (DDI_FAILURE);
14099 14853 }
14100 14854
14101 14855 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14102 14856 /* The property is needed by MPAPI */
14103 14857 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14104 14858
14105 14859 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14106 14860 if (guid) {
14107 14861 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14108 14862 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14109 14863 } else {
14110 14864 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14111 14865 (void) sprintf(wwn_str, "p%x", phy);
14112 14866 }
14113 14867
14114 14868 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14115 14869 guid, lun_addr, compatible, ncompatible,
14116 14870 0, pip);
14117 14871 if (mdi_rtn == MDI_SUCCESS) {
14118 14872
14119 14873 if (mdi_prop_update_string(*pip, MDI_GUID,
14120 14874 guid) != DDI_SUCCESS) {
14121 14875 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14122 14876 "create prop for target %d lun %d (MDI_GUID)",
14123 14877 target, lun);
14124 14878 mdi_rtn = MDI_FAILURE;
14125 14879 goto virt_create_done;
14126 14880 }
14127 14881
14128 14882 if (mdi_prop_update_int(*pip, LUN_PROP,
14129 14883 lun) != DDI_SUCCESS) {
14130 14884 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14131 14885 "create prop for target %d lun %d (LUN_PROP)",
14132 14886 target, lun);
14133 14887 mdi_rtn = MDI_FAILURE;
14134 14888 goto virt_create_done;
14135 14889 }
14136 14890 lun64 = (int64_t)lun;
14137 14891 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14138 14892 lun64) != DDI_SUCCESS) {
14139 14893 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14140 14894 "create prop for target %d (LUN64_PROP)",
14141 14895 target);
14142 14896 mdi_rtn = MDI_FAILURE;
14143 14897 goto virt_create_done;
14144 14898 }
14145 14899 if (mdi_prop_update_string_array(*pip, "compatible",
14146 14900 compatible, ncompatible) !=
14147 14901 DDI_PROP_SUCCESS) {
14148 14902 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14149 14903 "create prop for target %d lun %d (COMPATIBLE)",
14150 14904 target, lun);
14151 14905 mdi_rtn = MDI_FAILURE;
14152 14906 goto virt_create_done;
14153 14907 }
14154 14908 if (sas_wwn && (mdi_prop_update_string(*pip,
14155 14909 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14156 14910 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14157 14911 "create prop for target %d lun %d "
14158 14912 "(target-port)", target, lun);
14159 14913 mdi_rtn = MDI_FAILURE;
14160 14914 goto virt_create_done;
14161 14915 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14162 14916 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14163 14917 /*
14164 14918 * Direct attached SATA device without DeviceName
14165 14919 */
14166 14920 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14167 14921 "create prop for SAS target %d lun %d "
14168 14922 "(sata-phy)", target, lun);
14169 14923 mdi_rtn = MDI_FAILURE;
14170 14924 goto virt_create_done;
14171 14925 }
14172 14926 mutex_enter(&mpt->m_mutex);
14173 14927
14174 14928 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14175 14929 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14176 14930 (uint32_t)ptgt->m_devhdl;
14177 14931 rval = mptsas_get_sas_device_page0(mpt, page_address,
14178 14932 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14179 14933 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14180 14934 if (rval != DDI_SUCCESS) {
14181 14935 mutex_exit(&mpt->m_mutex);
14182 14936 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14183 14937 "parent device for handle %d", page_address);
14184 14938 mdi_rtn = MDI_FAILURE;
14185 14939 goto virt_create_done;
14186 14940 }
14187 14941
14188 14942 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14189 14943 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14190 14944 rval = mptsas_get_sas_device_page0(mpt, page_address,
14191 14945 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14192 14946 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14193 14947 if (rval != DDI_SUCCESS) {
14194 14948 mutex_exit(&mpt->m_mutex);
14195 14949 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14196 14950 "device info for handle %d", page_address);
14197 14951 mdi_rtn = MDI_FAILURE;
14198 14952 goto virt_create_done;
14199 14953 }
14200 14954
14201 14955 mutex_exit(&mpt->m_mutex);
14202 14956
14203 14957 /*
14204 14958 * If this device direct attached to the controller
14205 14959 * set the attached-port to the base wwid
14206 14960 */
14207 14961 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14208 14962 != DEVINFO_DIRECT_ATTACHED) {
14209 14963 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14210 14964 pdev_sas_wwn);
14211 14965 } else {
14212 14966 /*
14213 14967 * Update the iport's attached-port to guid
14214 14968 */
14215 14969 if (sas_wwn == 0) {
14216 14970 (void) sprintf(wwn_str, "p%x", phy);
14217 14971 } else {
14218 14972 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14219 14973 }
14220 14974 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14221 14975 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14222 14976 DDI_PROP_SUCCESS) {
14223 14977 mptsas_log(mpt, CE_WARN,
14224 14978 "mptsas unable to create "
14225 14979 "property for iport target-port"
14226 14980 " %s (sas_wwn)",
14227 14981 wwn_str);
14228 14982 mdi_rtn = MDI_FAILURE;
14229 14983 goto virt_create_done;
14230 14984 }
14231 14985
14232 14986 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14233 14987 mpt->un.m_base_wwid);
14234 14988 }
14235 14989
14236 14990 if (mdi_prop_update_string(*pip,
14237 14991 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14238 14992 DDI_PROP_SUCCESS) {
14239 14993 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14240 14994 "property for iport attached-port %s (sas_wwn)",
14241 14995 attached_wwn_str);
14242 14996 mdi_rtn = MDI_FAILURE;
14243 14997 goto virt_create_done;
14244 14998 }
14245 14999
14246 15000
14247 15001 if (inq->inq_dtype == 0) {
14248 15002 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14249 15003 /*
14250 15004 * set obp path for pathinfo
14251 15005 */
14252 15006 (void) snprintf(component, MAXPATHLEN,
14253 15007 "disk@%s", lun_addr);
14254 15008
14255 15009 if (mdi_pi_pathname_obp_set(*pip, component) !=
14256 15010 DDI_SUCCESS) {
14257 15011 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14258 15012 "unable to set obp-path for object %s",
14259 15013 component);
14260 15014 mdi_rtn = MDI_FAILURE;
14261 15015 goto virt_create_done;
14262 15016 }
14263 15017 }
14264 15018
14265 15019 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14266 15020 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14267 15021 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14268 15022 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14269 15023 "pm-capable", 1)) !=
14270 15024 DDI_PROP_SUCCESS) {
14271 15025 mptsas_log(mpt, CE_WARN, "mptsas driver"
14272 15026 "failed to create pm-capable "
14273 15027 "property, target %d", target);
14274 15028 mdi_rtn = MDI_FAILURE;
14275 15029 goto virt_create_done;
14276 15030 }
14277 15031 }
14278 15032 /*
14279 15033 * Create the phy-num property
14280 15034 */
14281 15035 if (mdi_prop_update_int(*pip, "phy-num",
14282 15036 ptgt->m_phynum) != DDI_SUCCESS) {
14283 15037 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14284 15038 "create phy-num property for target %d lun %d",
14285 15039 target, lun);
14286 15040 mdi_rtn = MDI_FAILURE;
14287 15041 goto virt_create_done;
14288 15042 }
14289 15043 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14290 15044 mdi_rtn = mdi_pi_online(*pip, 0);
14291 15045 if (mdi_rtn == MDI_SUCCESS) {
14292 15046 mutex_enter(&mpt->m_mutex);
14293 15047 ptgt->m_led_status = 0;
14294 15048 (void) mptsas_flush_led_status(mpt, ptgt);
14295 15049 mutex_exit(&mpt->m_mutex);
14296 15050 }
14297 15051 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14298 15052 mdi_rtn = MDI_FAILURE;
14299 15053 }
14300 15054 virt_create_done:
14301 15055 if (*pip && mdi_rtn != MDI_SUCCESS) {
14302 15056 (void) mdi_pi_free(*pip, 0);
14303 15057 *pip = NULL;
14304 15058 *lun_dip = NULL;
14305 15059 }
14306 15060 }
14307 15061
14308 15062 scsi_hba_nodename_compatible_free(nodename, compatible);
14309 15063 if (lun_addr != NULL) {
14310 15064 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14311 15065 }
14312 15066 if (wwn_str != NULL) {
14313 15067 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14314 15068 }
14315 15069 if (component != NULL) {
14316 15070 kmem_free(component, MAXPATHLEN);
14317 15071 }
14318 15072
14319 15073 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14320 15074 }
14321 15075
14322 15076 static int
14323 15077 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14324 15078 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14325 15079 {
14326 15080 int target;
14327 15081 int rval;
14328 15082 int ndi_rtn = NDI_FAILURE;
14329 15083 uint64_t be_sas_wwn;
14330 15084 char *nodename = NULL;
14331 15085 char **compatible = NULL;
14332 15086 int ncompatible = 0;
14333 15087 int instance = 0;
14334 15088 mptsas_t *mpt = DIP2MPT(pdip);
14335 15089 char *wwn_str = NULL;
14336 15090 char *component = NULL;
14337 15091 char *attached_wwn_str = NULL;
14338 15092 uint8_t phy = 0xFF;
14339 15093 uint64_t sas_wwn;
14340 15094 uint32_t devinfo;
14341 15095 uint16_t dev_hdl;
14342 15096 uint16_t pdev_hdl;
14343 15097 uint64_t pdev_sas_wwn;
14344 15098 uint64_t dev_sas_wwn;
14345 15099 uint32_t pdev_info;
14346 15100 uint8_t physport;
14347 15101 uint8_t phy_id;
14348 15102 uint32_t page_address;
14349 15103 uint16_t bay_num, enclosure;
14350 15104 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14351 15105 uint32_t dev_info;
14352 15106 int64_t lun64 = 0;
14353 15107
14354 15108 mutex_enter(&mpt->m_mutex);
14355 15109 target = ptgt->m_devhdl;
14356 15110 sas_wwn = ptgt->m_addr.mta_wwn;
14357 15111 devinfo = ptgt->m_deviceinfo;
14358 15112 phy = ptgt->m_phynum;
14359 15113 mutex_exit(&mpt->m_mutex);
14360 15114
14361 15115 /*
14362 15116 * generate compatible property with binding-set "mpt"
14363 15117 */
14364 15118 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14365 15119 &nodename, &compatible, &ncompatible);
14366 15120
14367 15121 /*
14368 15122 * if nodename can't be determined then print a message and skip it
14369 15123 */
14370 15124 if (nodename == NULL) {
14371 15125 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14372 15126 "for target %d lun %d", target, lun);
14373 15127 return (DDI_FAILURE);
14374 15128 }
14375 15129
14376 15130 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14377 15131 DEVI_SID_NODEID, lun_dip);
14378 15132
14379 15133 /*
14380 15134 * if lun alloc success, set props
14381 15135 */
14382 15136 if (ndi_rtn == NDI_SUCCESS) {
14383 15137
14384 15138 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14385 15139 *lun_dip, LUN_PROP, lun) !=
14386 15140 DDI_PROP_SUCCESS) {
14387 15141 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14388 15142 "property for target %d lun %d (LUN_PROP)",
14389 15143 target, lun);
14390 15144 ndi_rtn = NDI_FAILURE;
14391 15145 goto phys_create_done;
14392 15146 }
14393 15147
14394 15148 lun64 = (int64_t)lun;
14395 15149 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14396 15150 *lun_dip, LUN64_PROP, lun64) !=
14397 15151 DDI_PROP_SUCCESS) {
14398 15152 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14399 15153 "property for target %d lun64 %d (LUN64_PROP)",
14400 15154 target, lun);
14401 15155 ndi_rtn = NDI_FAILURE;
14402 15156 goto phys_create_done;
14403 15157 }
14404 15158 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14405 15159 *lun_dip, "compatible", compatible, ncompatible)
14406 15160 != DDI_PROP_SUCCESS) {
14407 15161 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14408 15162 "property for target %d lun %d (COMPATIBLE)",
14409 15163 target, lun);
14410 15164 ndi_rtn = NDI_FAILURE;
14411 15165 goto phys_create_done;
14412 15166 }
14413 15167
14414 15168 /*
14415 15169 * We need the SAS WWN for non-multipath devices, so
14416 15170 * we'll use the same property as that multipathing
14417 15171 * devices need to present for MPAPI. If we don't have
14418 15172 * a WWN (e.g. parallel SCSI), don't create the prop.
14419 15173 */
14420 15174 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14421 15175 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14422 15176 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14423 15177 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14424 15178 != DDI_PROP_SUCCESS) {
14425 15179 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14426 15180 "create property for SAS target %d lun %d "
14427 15181 "(target-port)", target, lun);
14428 15182 ndi_rtn = NDI_FAILURE;
14429 15183 goto phys_create_done;
14430 15184 }
14431 15185
14432 15186 be_sas_wwn = BE_64(sas_wwn);
14433 15187 if (sas_wwn && ndi_prop_update_byte_array(
14434 15188 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14435 15189 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14436 15190 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14437 15191 "create property for SAS target %d lun %d "
14438 15192 "(port-wwn)", target, lun);
14439 15193 ndi_rtn = NDI_FAILURE;
14440 15194 goto phys_create_done;
14441 15195 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14442 15196 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14443 15197 DDI_PROP_SUCCESS)) {
14444 15198 /*
14445 15199 * Direct attached SATA device without DeviceName
14446 15200 */
14447 15201 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14448 15202 "create property for SAS target %d lun %d "
14449 15203 "(sata-phy)", target, lun);
14450 15204 ndi_rtn = NDI_FAILURE;
14451 15205 goto phys_create_done;
14452 15206 }
14453 15207
14454 15208 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14455 15209 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14456 15210 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14457 15211 "create property for SAS target %d lun %d"
14458 15212 " (SAS_PROP)", target, lun);
14459 15213 ndi_rtn = NDI_FAILURE;
14460 15214 goto phys_create_done;
14461 15215 }
14462 15216 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14463 15217 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14464 15218 mptsas_log(mpt, CE_WARN, "mptsas unable "
14465 15219 "to create guid property for target %d "
14466 15220 "lun %d", target, lun);
14467 15221 ndi_rtn = NDI_FAILURE;
14468 15222 goto phys_create_done;
14469 15223 }
14470 15224
14471 15225 /*
14472 15226 * The following code is to set properties for SM-HBA support,
14473 15227 * it doesn't apply to RAID volumes
14474 15228 */
14475 15229 if (ptgt->m_addr.mta_phymask == 0)
14476 15230 goto phys_raid_lun;
14477 15231
14478 15232 mutex_enter(&mpt->m_mutex);
14479 15233
14480 15234 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14481 15235 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14482 15236 (uint32_t)ptgt->m_devhdl;
14483 15237 rval = mptsas_get_sas_device_page0(mpt, page_address,
14484 15238 &dev_hdl, &dev_sas_wwn, &dev_info,
14485 15239 &physport, &phy_id, &pdev_hdl,
14486 15240 &bay_num, &enclosure);
14487 15241 if (rval != DDI_SUCCESS) {
14488 15242 mutex_exit(&mpt->m_mutex);
14489 15243 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14490 15244 "parent device for handle %d.", page_address);
14491 15245 ndi_rtn = NDI_FAILURE;
14492 15246 goto phys_create_done;
14493 15247 }
14494 15248
14495 15249 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14496 15250 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14497 15251 rval = mptsas_get_sas_device_page0(mpt, page_address,
14498 15252 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14499 15253 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14500 15254 if (rval != DDI_SUCCESS) {
14501 15255 mutex_exit(&mpt->m_mutex);
14502 15256 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14503 15257 "device for handle %d.", page_address);
14504 15258 ndi_rtn = NDI_FAILURE;
14505 15259 goto phys_create_done;
14506 15260 }
14507 15261
14508 15262 mutex_exit(&mpt->m_mutex);
14509 15263
14510 15264 /*
14511 15265 * If this device direct attached to the controller
14512 15266 * set the attached-port to the base wwid
14513 15267 */
14514 15268 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14515 15269 != DEVINFO_DIRECT_ATTACHED) {
14516 15270 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14517 15271 pdev_sas_wwn);
14518 15272 } else {
14519 15273 /*
14520 15274 * Update the iport's attached-port to guid
14521 15275 */
14522 15276 if (sas_wwn == 0) {
14523 15277 (void) sprintf(wwn_str, "p%x", phy);
14524 15278 } else {
14525 15279 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14526 15280 }
14527 15281 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14528 15282 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14529 15283 DDI_PROP_SUCCESS) {
14530 15284 mptsas_log(mpt, CE_WARN,
14531 15285 "mptsas unable to create "
14532 15286 "property for iport target-port"
14533 15287 " %s (sas_wwn)",
14534 15288 wwn_str);
14535 15289 ndi_rtn = NDI_FAILURE;
14536 15290 goto phys_create_done;
14537 15291 }
14538 15292
14539 15293 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14540 15294 mpt->un.m_base_wwid);
14541 15295 }
14542 15296
14543 15297 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14544 15298 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14545 15299 DDI_PROP_SUCCESS) {
14546 15300 mptsas_log(mpt, CE_WARN,
14547 15301 "mptsas unable to create "
14548 15302 "property for iport attached-port %s (sas_wwn)",
14549 15303 attached_wwn_str);
14550 15304 ndi_rtn = NDI_FAILURE;
14551 15305 goto phys_create_done;
14552 15306 }
14553 15307
14554 15308 if (IS_SATA_DEVICE(dev_info)) {
14555 15309 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14556 15310 *lun_dip, MPTSAS_VARIANT, "sata") !=
14557 15311 DDI_PROP_SUCCESS) {
14558 15312 mptsas_log(mpt, CE_WARN,
14559 15313 "mptsas unable to create "
14560 15314 "property for device variant ");
14561 15315 ndi_rtn = NDI_FAILURE;
14562 15316 goto phys_create_done;
14563 15317 }
14564 15318 }
14565 15319
14566 15320 if (IS_ATAPI_DEVICE(dev_info)) {
14567 15321 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14568 15322 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14569 15323 DDI_PROP_SUCCESS) {
14570 15324 mptsas_log(mpt, CE_WARN,
14571 15325 "mptsas unable to create "
14572 15326 "property for device variant ");
14573 15327 ndi_rtn = NDI_FAILURE;
14574 15328 goto phys_create_done;
14575 15329 }
14576 15330 }
14577 15331
14578 15332 phys_raid_lun:
14579 15333 /*
14580 15334 * if this is a SAS controller, and the target is a SATA
14581 15335 * drive, set the 'pm-capable' property for sd and if on
14582 15336 * an OPL platform, also check if this is an ATAPI
14583 15337 * device.
14584 15338 */
14585 15339 instance = ddi_get_instance(mpt->m_dip);
14586 15340 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14587 15341 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14588 15342 NDBG2(("mptsas%d: creating pm-capable property, "
14589 15343 "target %d", instance, target));
14590 15344
14591 15345 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14592 15346 *lun_dip, "pm-capable", 1)) !=
14593 15347 DDI_PROP_SUCCESS) {
14594 15348 mptsas_log(mpt, CE_WARN, "mptsas "
14595 15349 "failed to create pm-capable "
14596 15350 "property, target %d", target);
14597 15351 ndi_rtn = NDI_FAILURE;
14598 15352 goto phys_create_done;
14599 15353 }
14600 15354
14601 15355 }
14602 15356
14603 15357 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14604 15358 /*
14605 15359 * add 'obp-path' properties for devinfo
14606 15360 */
14607 15361 bzero(wwn_str, sizeof (wwn_str));
14608 15362 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14609 15363 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14610 15364 if (guid) {
14611 15365 (void) snprintf(component, MAXPATHLEN,
14612 15366 "disk@w%s,%x", wwn_str, lun);
14613 15367 } else {
14614 15368 (void) snprintf(component, MAXPATHLEN,
14615 15369 "disk@p%x,%x", phy, lun);
14616 15370 }
14617 15371 if (ddi_pathname_obp_set(*lun_dip, component)
14618 15372 != DDI_SUCCESS) {
14619 15373 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14620 15374 "unable to set obp-path for SAS "
14621 15375 "object %s", component);
14622 15376 ndi_rtn = NDI_FAILURE;
14623 15377 goto phys_create_done;
14624 15378 }
14625 15379 }
14626 15380 /*
14627 15381 * Create the phy-num property for non-raid disk
14628 15382 */
14629 15383 if (ptgt->m_addr.mta_phymask != 0) {
14630 15384 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14631 15385 *lun_dip, "phy-num", ptgt->m_phynum) !=
14632 15386 DDI_PROP_SUCCESS) {
14633 15387 mptsas_log(mpt, CE_WARN, "mptsas driver "
14634 15388 "failed to create phy-num property for "
14635 15389 "target %d", target);
14636 15390 ndi_rtn = NDI_FAILURE;
14637 15391 goto phys_create_done;
14638 15392 }
14639 15393 }
14640 15394 phys_create_done:
14641 15395 /*
14642 15396 * If props were setup ok, online the lun
14643 15397 */
14644 15398 if (ndi_rtn == NDI_SUCCESS) {
14645 15399 /*
14646 15400 * Try to online the new node
14647 15401 */
14648 15402 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14649 15403 }
14650 15404 if (ndi_rtn == NDI_SUCCESS) {
14651 15405 mutex_enter(&mpt->m_mutex);
14652 15406 ptgt->m_led_status = 0;
14653 15407 (void) mptsas_flush_led_status(mpt, ptgt);
14654 15408 mutex_exit(&mpt->m_mutex);
14655 15409 }
14656 15410
14657 15411 /*
14658 15412 * If success set rtn flag, else unwire alloc'd lun
14659 15413 */
14660 15414 if (ndi_rtn != NDI_SUCCESS) {
14661 15415 NDBG12(("mptsas driver unable to online "
14662 15416 "target %d lun %d", target, lun));
14663 15417 ndi_prop_remove_all(*lun_dip);
14664 15418 (void) ndi_devi_free(*lun_dip);
14665 15419 *lun_dip = NULL;
14666 15420 }
14667 15421 }
14668 15422
14669 15423 scsi_hba_nodename_compatible_free(nodename, compatible);
14670 15424
14671 15425 if (wwn_str != NULL) {
14672 15426 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14673 15427 }
14674 15428 if (component != NULL) {
14675 15429 kmem_free(component, MAXPATHLEN);
14676 15430 }
14677 15431
14678 15432
14679 15433 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14680 15434 }
14681 15435
14682 15436 static int
14683 15437 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14684 15438 {
14685 15439 mptsas_t *mpt = DIP2MPT(pdip);
14686 15440 struct smp_device smp_sd;
14687 15441
14688 15442 /* XXX An HBA driver should not be allocating an smp_device. */
14689 15443 bzero(&smp_sd, sizeof (struct smp_device));
14690 15444 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14691 15445 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14692 15446
14693 15447 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14694 15448 return (NDI_FAILURE);
14695 15449 return (NDI_SUCCESS);
14696 15450 }
14697 15451
14698 15452 static int
14699 15453 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14700 15454 {
14701 15455 mptsas_t *mpt = DIP2MPT(pdip);
14702 15456 mptsas_smp_t *psmp = NULL;
14703 15457 int rval;
14704 15458 int phymask;
14705 15459
14706 15460 /*
14707 15461 * Get the physical port associated to the iport
14708 15462 * PHYMASK TODO
14709 15463 */
14710 15464 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14711 15465 "phymask", 0);
14712 15466 /*
14713 15467 * Find the smp node in hash table with specified sas address and
14714 15468 * physical port
14715 15469 */
14716 15470 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14717 15471 if (psmp == NULL) {
14718 15472 return (DDI_FAILURE);
14719 15473 }
14720 15474
14721 15475 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14722 15476
14723 15477 return (rval);
14724 15478 }
14725 15479
14726 15480 static int
14727 15481 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14728 15482 dev_info_t **smp_dip)
14729 15483 {
14730 15484 char wwn_str[MPTSAS_WWN_STRLEN];
14731 15485 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14732 15486 int ndi_rtn = NDI_FAILURE;
14733 15487 int rval = 0;
14734 15488 mptsas_smp_t dev_info;
14735 15489 uint32_t page_address;
14736 15490 mptsas_t *mpt = DIP2MPT(pdip);
14737 15491 uint16_t dev_hdl;
14738 15492 uint64_t sas_wwn;
14739 15493 uint64_t smp_sas_wwn;
14740 15494 uint8_t physport;
14741 15495 uint8_t phy_id;
14742 15496 uint16_t pdev_hdl;
14743 15497 uint8_t numphys = 0;
14744 15498 uint16_t i = 0;
14745 15499 char phymask[MPTSAS_MAX_PHYS];
14746 15500 char *iport = NULL;
14747 15501 mptsas_phymask_t phy_mask = 0;
14748 15502 uint16_t attached_devhdl;
14749 15503 uint16_t bay_num, enclosure;
14750 15504
14751 15505 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14752 15506
14753 15507 /*
14754 15508 * Probe smp device, prevent the node of removed device from being
14755 15509 * configured succesfully
14756 15510 */
14757 15511 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
14758 15512 return (DDI_FAILURE);
14759 15513 }
14760 15514
14761 15515 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14762 15516 return (DDI_SUCCESS);
14763 15517 }
14764 15518
14765 15519 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14766 15520
14767 15521 /*
14768 15522 * if lun alloc success, set props
14769 15523 */
14770 15524 if (ndi_rtn == NDI_SUCCESS) {
14771 15525 /*
14772 15526 * Set the flavor of the child to be SMP flavored
14773 15527 */
14774 15528 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14775 15529
14776 15530 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14777 15531 *smp_dip, SMP_WWN, wwn_str) !=
14778 15532 DDI_PROP_SUCCESS) {
14779 15533 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14780 15534 "property for smp device %s (sas_wwn)",
14781 15535 wwn_str);
14782 15536 ndi_rtn = NDI_FAILURE;
14783 15537 goto smp_create_done;
14784 15538 }
14785 15539 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
14786 15540 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14787 15541 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14788 15542 DDI_PROP_SUCCESS) {
14789 15543 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14790 15544 "property for iport target-port %s (sas_wwn)",
14791 15545 wwn_str);
14792 15546 ndi_rtn = NDI_FAILURE;
14793 15547 goto smp_create_done;
14794 15548 }
14795 15549
14796 15550 mutex_enter(&mpt->m_mutex);
14797 15551
14798 15552 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14799 15553 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14800 15554 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14801 15555 &dev_info);
14802 15556 if (rval != DDI_SUCCESS) {
14803 15557 mutex_exit(&mpt->m_mutex);
14804 15558 mptsas_log(mpt, CE_WARN,
14805 15559 "mptsas unable to get expander "
14806 15560 "parent device info for %x", page_address);
14807 15561 ndi_rtn = NDI_FAILURE;
14808 15562 goto smp_create_done;
14809 15563 }
14810 15564
14811 15565 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14812 15566 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14813 15567 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14814 15568 (uint32_t)dev_info.m_pdevhdl;
14815 15569 rval = mptsas_get_sas_device_page0(mpt, page_address,
14816 15570 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14817 15571 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14818 15572 if (rval != DDI_SUCCESS) {
14819 15573 mutex_exit(&mpt->m_mutex);
14820 15574 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14821 15575 "device info for %x", page_address);
14822 15576 ndi_rtn = NDI_FAILURE;
14823 15577 goto smp_create_done;
14824 15578 }
14825 15579
14826 15580 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14827 15581 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14828 15582 (uint32_t)dev_info.m_devhdl;
14829 15583 rval = mptsas_get_sas_device_page0(mpt, page_address,
14830 15584 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14831 15585 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14832 15586 if (rval != DDI_SUCCESS) {
14833 15587 mutex_exit(&mpt->m_mutex);
14834 15588 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14835 15589 "device info for %x", page_address);
14836 15590 ndi_rtn = NDI_FAILURE;
14837 15591 goto smp_create_done;
14838 15592 }
14839 15593 mutex_exit(&mpt->m_mutex);
14840 15594
14841 15595 /*
14842 15596 * If this smp direct attached to the controller
14843 15597 * set the attached-port to the base wwid
14844 15598 */
14845 15599 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14846 15600 != DEVINFO_DIRECT_ATTACHED) {
14847 15601 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14848 15602 sas_wwn);
14849 15603 } else {
14850 15604 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14851 15605 mpt->un.m_base_wwid);
14852 15606 }
14853 15607
14854 15608 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14855 15609 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14856 15610 DDI_PROP_SUCCESS) {
14857 15611 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14858 15612 "property for smp attached-port %s (sas_wwn)",
14859 15613 attached_wwn_str);
14860 15614 ndi_rtn = NDI_FAILURE;
14861 15615 goto smp_create_done;
14862 15616 }
14863 15617
14864 15618 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14865 15619 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14866 15620 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14867 15621 "create property for SMP %s (SMP_PROP) ",
14868 15622 wwn_str);
14869 15623 ndi_rtn = NDI_FAILURE;
14870 15624 goto smp_create_done;
14871 15625 }
14872 15626
14873 15627 /*
14874 15628 * check the smp to see whether it direct
14875 15629 * attached to the controller
14876 15630 */
14877 15631 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14878 15632 != DEVINFO_DIRECT_ATTACHED) {
14879 15633 goto smp_create_done;
14880 15634 }
14881 15635 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14882 15636 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14883 15637 if (numphys > 0) {
14884 15638 goto smp_create_done;
14885 15639 }
14886 15640 /*
14887 15641 * this iport is an old iport, we need to
14888 15642 * reconfig the props for it.
14889 15643 */
14890 15644 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14891 15645 MPTSAS_VIRTUAL_PORT, 0) !=
14892 15646 DDI_PROP_SUCCESS) {
14893 15647 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14894 15648 MPTSAS_VIRTUAL_PORT);
14895 15649 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14896 15650 "prop update failed");
14897 15651 goto smp_create_done;
14898 15652 }
14899 15653
14900 15654 mutex_enter(&mpt->m_mutex);
14901 15655 numphys = 0;
14902 15656 iport = ddi_get_name_addr(pdip);
14903 15657 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14904 15658 bzero(phymask, sizeof (phymask));
14905 15659 (void) sprintf(phymask,
14906 15660 "%x", mpt->m_phy_info[i].phy_mask);
14907 15661 if (strcmp(phymask, iport) == 0) {
14908 15662 phy_mask = mpt->m_phy_info[i].phy_mask;
14909 15663 break;
14910 15664 }
14911 15665 }
14912 15666
14913 15667 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14914 15668 if ((phy_mask >> i) & 0x01) {
14915 15669 numphys++;
14916 15670 }
14917 15671 }
14918 15672 /*
14919 15673 * Update PHY info for smhba
14920 15674 */
14921 15675 if (mptsas_smhba_phy_init(mpt)) {
14922 15676 mutex_exit(&mpt->m_mutex);
14923 15677 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14924 15678 "failed");
14925 15679 goto smp_create_done;
14926 15680 }
14927 15681 mutex_exit(&mpt->m_mutex);
14928 15682
14929 15683 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
14930 15684 &attached_devhdl);
14931 15685
14932 15686 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14933 15687 MPTSAS_NUM_PHYS, numphys) !=
14934 15688 DDI_PROP_SUCCESS) {
14935 15689 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14936 15690 MPTSAS_NUM_PHYS);
14937 15691 mptsas_log(mpt, CE_WARN, "mptsas update "
14938 15692 "num phys props failed");
14939 15693 goto smp_create_done;
14940 15694 }
14941 15695 /*
14942 15696 * Add parent's props for SMHBA support
14943 15697 */
14944 15698 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14945 15699 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14946 15700 DDI_PROP_SUCCESS) {
14947 15701 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14948 15702 SCSI_ADDR_PROP_ATTACHED_PORT);
14949 15703 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14950 15704 "attached-port failed");
14951 15705 goto smp_create_done;
14952 15706 }
14953 15707
14954 15708 smp_create_done:
14955 15709 /*
14956 15710 * If props were setup ok, online the lun
14957 15711 */
14958 15712 if (ndi_rtn == NDI_SUCCESS) {
14959 15713 /*
14960 15714 * Try to online the new node
14961 15715 */
14962 15716 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14963 15717 }
14964 15718
14965 15719 /*
14966 15720 * If success set rtn flag, else unwire alloc'd lun
14967 15721 */
14968 15722 if (ndi_rtn != NDI_SUCCESS) {
14969 15723 NDBG12(("mptsas unable to online "
14970 15724 "SMP target %s", wwn_str));
14971 15725 ndi_prop_remove_all(*smp_dip);
14972 15726 (void) ndi_devi_free(*smp_dip);
14973 15727 }
14974 15728 }
|
↓ open down ↓ |
3731 lines elided |
↑ open up ↑ |
14975 15729
14976 15730 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14977 15731 }
14978 15732
14979 15733 /* smp transport routine */
14980 15734 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14981 15735 {
14982 15736 uint64_t wwn;
14983 15737 Mpi2SmpPassthroughRequest_t req;
14984 15738 Mpi2SmpPassthroughReply_t rep;
14985 - uint32_t direction = 0;
15739 + uint8_t direction = 0;
14986 15740 mptsas_t *mpt;
14987 15741 int ret;
14988 15742 uint64_t tmp64;
14989 15743
14990 15744 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14991 15745 smp_a_hba_tran->smp_tran_hba_private;
14992 15746
14993 15747 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14994 15748 /*
14995 15749 * Need to compose a SMP request message
14996 15750 * and call mptsas_do_passthru() function
14997 15751 */
14998 15752 bzero(&req, sizeof (req));
14999 15753 bzero(&rep, sizeof (rep));
15000 15754 req.PassthroughFlags = 0;
15001 15755 req.PhysicalPort = 0xff;
15002 15756 req.ChainOffset = 0;
15003 15757 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15004 15758
15005 15759 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15006 15760 smp_pkt->smp_pkt_reason = ERANGE;
15007 15761 return (DDI_FAILURE);
15008 15762 }
15009 15763 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15010 15764
15011 15765 req.MsgFlags = 0;
15012 15766 tmp64 = LE_64(wwn);
15013 15767 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15014 15768 if (smp_pkt->smp_pkt_rspsize > 0) {
15015 15769 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15016 15770 }
15017 15771 if (smp_pkt->smp_pkt_reqsize > 0) {
15018 15772 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15019 15773 }
15020 15774
15021 15775 mutex_enter(&mpt->m_mutex);
15022 15776 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15023 15777 (uint8_t *)smp_pkt->smp_pkt_rsp,
15024 15778 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15025 15779 smp_pkt->smp_pkt_rspsize - 4, direction,
15026 15780 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15027 15781 smp_pkt->smp_pkt_timeout, FKIOCTL);
15028 15782 mutex_exit(&mpt->m_mutex);
15029 15783 if (ret != 0) {
15030 15784 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15031 15785 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15032 15786 return (DDI_FAILURE);
15033 15787 }
15034 15788 /* do passthrough success, check the smp status */
15035 15789 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15036 15790 switch (LE_16(rep.IOCStatus)) {
15037 15791 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15038 15792 smp_pkt->smp_pkt_reason = ENODEV;
15039 15793 break;
15040 15794 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15041 15795 smp_pkt->smp_pkt_reason = EOVERFLOW;
15042 15796 break;
15043 15797 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15044 15798 smp_pkt->smp_pkt_reason = EIO;
15045 15799 break;
15046 15800 default:
15047 15801 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15048 15802 "status:%x", LE_16(rep.IOCStatus));
15049 15803 smp_pkt->smp_pkt_reason = EIO;
15050 15804 break;
15051 15805 }
15052 15806 return (DDI_FAILURE);
15053 15807 }
15054 15808 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15055 15809 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15056 15810 rep.SASStatus);
15057 15811 smp_pkt->smp_pkt_reason = EIO;
15058 15812 return (DDI_FAILURE);
15059 15813 }
15060 15814
15061 15815 return (DDI_SUCCESS);
15062 15816 }
15063 15817
15064 15818 /*
15065 15819 * If we didn't get a match, we need to get sas page0 for each device, and
15066 15820 * untill we get a match. If failed, return NULL
15067 15821 */
15068 15822 static mptsas_target_t *
15069 15823 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
15070 15824 {
15071 15825 int i, j = 0;
15072 15826 int rval = 0;
15073 15827 uint16_t cur_handle;
15074 15828 uint32_t page_address;
15075 15829 mptsas_target_t *ptgt = NULL;
15076 15830
15077 15831 /*
15078 15832 * PHY named device must be direct attached and attaches to
15079 15833 * narrow port, if the iport is not parent of the device which
15080 15834 * we are looking for.
15081 15835 */
15082 15836 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15083 15837 if ((1 << i) & phymask)
15084 15838 j++;
15085 15839 }
15086 15840
15087 15841 if (j > 1)
15088 15842 return (NULL);
15089 15843
15090 15844 /*
15091 15845 * Must be a narrow port and single device attached to the narrow port
15092 15846 * So the physical port num of device which is equal to the iport's
15093 15847 * port num is the device what we are looking for.
15094 15848 */
15095 15849
15096 15850 if (mpt->m_phy_info[phy].phy_mask != phymask)
15097 15851 return (NULL);
15098 15852
15099 15853 mutex_enter(&mpt->m_mutex);
15100 15854
15101 15855 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
15102 15856 &phy);
15103 15857 if (ptgt != NULL) {
15104 15858 mutex_exit(&mpt->m_mutex);
15105 15859 return (ptgt);
15106 15860 }
15107 15861
15108 15862 if (mpt->m_done_traverse_dev) {
15109 15863 mutex_exit(&mpt->m_mutex);
15110 15864 return (NULL);
15111 15865 }
15112 15866
15113 15867 /* If didn't get a match, come here */
15114 15868 cur_handle = mpt->m_dev_handle;
15115 15869 for (; ; ) {
15116 15870 ptgt = NULL;
15117 15871 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15118 15872 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15119 15873 rval = mptsas_get_target_device_info(mpt, page_address,
15120 15874 &cur_handle, &ptgt);
15121 15875 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15122 15876 (rval == DEV_INFO_FAIL_ALLOC)) {
15123 15877 break;
15124 15878 }
15125 15879 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15126 15880 (rval == DEV_INFO_PHYS_DISK)) {
15127 15881 continue;
15128 15882 }
15129 15883 mpt->m_dev_handle = cur_handle;
15130 15884
15131 15885 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
15132 15886 break;
15133 15887 }
15134 15888 }
15135 15889
15136 15890 mutex_exit(&mpt->m_mutex);
15137 15891 return (ptgt);
15138 15892 }
15139 15893
15140 15894 /*
15141 15895 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
15142 15896 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15143 15897 * If we didn't get a match, we need to get sas page0 for each device, and
15144 15898 * untill we get a match
15145 15899 * If failed, return NULL
15146 15900 */
15147 15901 static mptsas_target_t *
15148 15902 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15149 15903 {
15150 15904 int rval = 0;
15151 15905 uint16_t cur_handle;
15152 15906 uint32_t page_address;
15153 15907 mptsas_target_t *tmp_tgt = NULL;
15154 15908 mptsas_target_addr_t addr;
15155 15909
15156 15910 addr.mta_wwn = wwid;
15157 15911 addr.mta_phymask = phymask;
15158 15912 mutex_enter(&mpt->m_mutex);
15159 15913 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15160 15914 if (tmp_tgt != NULL) {
15161 15915 mutex_exit(&mpt->m_mutex);
15162 15916 return (tmp_tgt);
15163 15917 }
15164 15918
15165 15919 if (phymask == 0) {
15166 15920 /*
15167 15921 * It's IR volume
15168 15922 */
15169 15923 rval = mptsas_get_raid_info(mpt);
15170 15924 if (rval) {
15171 15925 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15172 15926 }
15173 15927 mutex_exit(&mpt->m_mutex);
15174 15928 return (tmp_tgt);
15175 15929 }
15176 15930
15177 15931 if (mpt->m_done_traverse_dev) {
15178 15932 mutex_exit(&mpt->m_mutex);
15179 15933 return (NULL);
15180 15934 }
15181 15935
15182 15936 /* If didn't get a match, come here */
15183 15937 cur_handle = mpt->m_dev_handle;
15184 15938 for (;;) {
15185 15939 tmp_tgt = NULL;
15186 15940 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15187 15941 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15188 15942 rval = mptsas_get_target_device_info(mpt, page_address,
15189 15943 &cur_handle, &tmp_tgt);
15190 15944 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15191 15945 (rval == DEV_INFO_FAIL_ALLOC)) {
15192 15946 tmp_tgt = NULL;
15193 15947 break;
15194 15948 }
15195 15949 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15196 15950 (rval == DEV_INFO_PHYS_DISK)) {
15197 15951 continue;
15198 15952 }
15199 15953 mpt->m_dev_handle = cur_handle;
15200 15954 if ((tmp_tgt->m_addr.mta_wwn) &&
15201 15955 (tmp_tgt->m_addr.mta_wwn == wwid) &&
15202 15956 (tmp_tgt->m_addr.mta_phymask == phymask)) {
15203 15957 break;
15204 15958 }
15205 15959 }
15206 15960
15207 15961 mutex_exit(&mpt->m_mutex);
15208 15962 return (tmp_tgt);
15209 15963 }
15210 15964
15211 15965 static mptsas_smp_t *
15212 15966 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15213 15967 {
15214 15968 int rval = 0;
15215 15969 uint16_t cur_handle;
15216 15970 uint32_t page_address;
15217 15971 mptsas_smp_t smp_node, *psmp = NULL;
15218 15972 mptsas_target_addr_t addr;
15219 15973
15220 15974 addr.mta_wwn = wwid;
15221 15975 addr.mta_phymask = phymask;
15222 15976 mutex_enter(&mpt->m_mutex);
15223 15977 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
15224 15978 if (psmp != NULL) {
15225 15979 mutex_exit(&mpt->m_mutex);
15226 15980 return (psmp);
15227 15981 }
15228 15982
15229 15983 if (mpt->m_done_traverse_smp) {
15230 15984 mutex_exit(&mpt->m_mutex);
15231 15985 return (NULL);
15232 15986 }
15233 15987
15234 15988 /* If didn't get a match, come here */
15235 15989 cur_handle = mpt->m_smp_devhdl;
15236 15990 for (;;) {
15237 15991 psmp = NULL;
15238 15992 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15239 15993 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15240 15994 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15241 15995 &smp_node);
15242 15996 if (rval != DDI_SUCCESS) {
15243 15997 break;
15244 15998 }
15245 15999 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15246 16000 psmp = mptsas_smp_alloc(mpt, &smp_node);
15247 16001 ASSERT(psmp);
15248 16002 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
15249 16003 (psmp->m_addr.mta_phymask == phymask)) {
15250 16004 break;
15251 16005 }
15252 16006 }
15253 16007
15254 16008 mutex_exit(&mpt->m_mutex);
15255 16009 return (psmp);
15256 16010 }
15257 16011
15258 16012 mptsas_target_t *
15259 16013 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
15260 16014 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15261 16015 {
15262 16016 mptsas_target_t *tmp_tgt = NULL;
15263 16017 mptsas_target_addr_t addr;
15264 16018
15265 16019 addr.mta_wwn = wwid;
15266 16020 addr.mta_phymask = phymask;
15267 16021 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15268 16022 if (tmp_tgt != NULL) {
15269 16023 NDBG20(("Hash item already exist"));
15270 16024 tmp_tgt->m_deviceinfo = devinfo;
15271 16025 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
15272 16026 return (tmp_tgt);
15273 16027 }
15274 16028 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15275 16029 if (tmp_tgt == NULL) {
15276 16030 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15277 16031 return (NULL);
15278 16032 }
15279 16033 tmp_tgt->m_devhdl = devhdl;
15280 16034 tmp_tgt->m_addr.mta_wwn = wwid;
15281 16035 tmp_tgt->m_deviceinfo = devinfo;
15282 16036 tmp_tgt->m_addr.mta_phymask = phymask;
15283 16037 tmp_tgt->m_phynum = phynum;
15284 16038 /* Initialized the tgt structure */
15285 16039 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15286 16040 tmp_tgt->m_qfull_retry_interval =
15287 16041 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15288 16042 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15289 16043 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
15290 16044
15291 16045 refhash_insert(mpt->m_targets, tmp_tgt);
15292 16046
15293 16047 return (tmp_tgt);
15294 16048 }
15295 16049
15296 16050 static void
15297 16051 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
15298 16052 {
15299 16053 dst->m_devhdl = src->m_devhdl;
15300 16054 dst->m_deviceinfo = src->m_deviceinfo;
15301 16055 dst->m_pdevhdl = src->m_pdevhdl;
15302 16056 dst->m_pdevinfo = src->m_pdevinfo;
15303 16057 }
15304 16058
15305 16059 static mptsas_smp_t *
15306 16060 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
15307 16061 {
15308 16062 mptsas_target_addr_t addr;
15309 16063 mptsas_smp_t *ret_data;
15310 16064
15311 16065 addr.mta_wwn = data->m_addr.mta_wwn;
15312 16066 addr.mta_phymask = data->m_addr.mta_phymask;
15313 16067 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
15314 16068 /*
15315 16069 * If there's already a matching SMP target, update its fields
15316 16070 * in place. Since the address is not changing, it's safe to do
15317 16071 * this. We cannot just bcopy() here because the structure we've
15318 16072 * been given has invalid hash links.
15319 16073 */
15320 16074 if (ret_data != NULL) {
15321 16075 mptsas_smp_target_copy(data, ret_data);
15322 16076 return (ret_data);
15323 16077 }
15324 16078
15325 16079 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15326 16080 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15327 16081 refhash_insert(mpt->m_smp_targets, ret_data);
15328 16082 return (ret_data);
15329 16083 }
15330 16084
15331 16085 /*
15332 16086 * Functions for SGPIO LED support
15333 16087 */
15334 16088 static dev_info_t *
15335 16089 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15336 16090 {
15337 16091 dev_info_t *dip;
15338 16092 int prop;
15339 16093 dip = e_ddi_hold_devi_by_dev(dev, 0);
15340 16094 if (dip == NULL)
15341 16095 return (dip);
15342 16096 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15343 16097 "phymask", 0);
15344 16098 *phymask = (mptsas_phymask_t)prop;
15345 16099 ddi_release_devi(dip);
15346 16100 return (dip);
15347 16101 }
15348 16102 static mptsas_target_t *
15349 16103 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15350 16104 {
15351 16105 uint8_t phynum;
15352 16106 uint64_t wwn;
15353 16107 int lun;
15354 16108 mptsas_target_t *ptgt = NULL;
15355 16109
15356 16110 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
15357 16111 return (NULL);
15358 16112 }
15359 16113 if (addr[0] == 'w') {
15360 16114 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15361 16115 } else {
15362 16116 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15363 16117 }
15364 16118 return (ptgt);
15365 16119 }
15366 16120
15367 16121 static int
15368 16122 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
15369 16123 {
15370 16124 uint32_t slotstatus = 0;
15371 16125
15372 16126 /* Build an MPI2 Slot Status based on our view of the world */
15373 16127 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
15374 16128 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
15375 16129 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
15376 16130 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
15377 16131 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
15378 16132 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
15379 16133
15380 16134 /* Write it to the controller */
15381 16135 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
15382 16136 slotstatus, ptgt->m_slot_num));
15383 16137 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
15384 16138 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
15385 16139 }
15386 16140
15387 16141 /*
15388 16142 * send sep request, use enclosure/slot addressing
15389 16143 */
15390 16144 static int
15391 16145 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
15392 16146 uint32_t *status, uint8_t act)
15393 16147 {
15394 16148 Mpi2SepRequest_t req;
15395 16149 Mpi2SepReply_t rep;
15396 16150 int ret;
15397 16151
15398 16152 ASSERT(mutex_owned(&mpt->m_mutex));
15399 16153
15400 16154 /*
15401 16155 * We only support SEP control of directly-attached targets, in which
15402 16156 * case the "SEP" we're talking to is a virtual one contained within
15403 16157 * the HBA itself. This is necessary because DA targets typically have
15404 16158 * no other mechanism for LED control. Targets for which a separate
15405 16159 * enclosure service processor exists should be controlled via ses(7d)
15406 16160 * or sgen(7d). Furthermore, since such requests can time out, they
15407 16161 * should be made in user context rather than in response to
15408 16162 * asynchronous fabric changes.
15409 16163 *
15410 16164 * In addition, we do not support this operation for RAID volumes,
15411 16165 * since there is no slot associated with them.
15412 16166 */
15413 16167 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
15414 16168 ptgt->m_addr.mta_phymask == 0) {
15415 16169 return (ENOTTY);
15416 16170 }
15417 16171
15418 16172 bzero(&req, sizeof (req));
15419 16173 bzero(&rep, sizeof (rep));
15420 16174
15421 16175 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
15422 16176 req.Action = act;
15423 16177 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
15424 16178 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
15425 16179 req.Slot = LE_16(ptgt->m_slot_num);
15426 16180 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15427 16181 req.SlotStatus = LE_32(*status);
15428 16182 }
15429 16183 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
15430 16184 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
15431 16185 if (ret != 0) {
15432 16186 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
15433 16187 "Processor Request message error %d", ret);
15434 16188 return (ret);
15435 16189 }
15436 16190 /* do passthrough success, check the ioc status */
15437 16191 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15438 16192 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
15439 16193 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
15440 16194 LE_32(rep.IOCLogInfo));
15441 16195 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
15442 16196 case MPI2_IOCSTATUS_INVALID_FUNCTION:
15443 16197 case MPI2_IOCSTATUS_INVALID_VPID:
15444 16198 case MPI2_IOCSTATUS_INVALID_FIELD:
15445 16199 case MPI2_IOCSTATUS_INVALID_STATE:
15446 16200 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
15447 16201 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
15448 16202 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
15449 16203 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
15450 16204 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
15451 16205 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
15452 16206 return (EINVAL);
15453 16207 case MPI2_IOCSTATUS_BUSY:
15454 16208 return (EBUSY);
15455 16209 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
15456 16210 return (EAGAIN);
15457 16211 case MPI2_IOCSTATUS_INVALID_SGL:
15458 16212 case MPI2_IOCSTATUS_INTERNAL_ERROR:
15459 16213 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
15460 16214 default:
15461 16215 return (EIO);
15462 16216 }
15463 16217 }
15464 16218 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15465 16219 *status = LE_32(rep.SlotStatus);
15466 16220 }
15467 16221
15468 16222 return (0);
15469 16223 }
15470 16224
15471 16225 int
15472 16226 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15473 16227 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15474 16228 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15475 16229 {
15476 16230 ddi_dma_cookie_t new_cookie;
15477 16231 size_t alloc_len;
15478 16232 uint_t ncookie;
15479 16233
15480 16234 if (cookiep == NULL)
15481 16235 cookiep = &new_cookie;
15482 16236
15483 16237 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15484 16238 NULL, dma_hdp) != DDI_SUCCESS) {
15485 16239 return (FALSE);
15486 16240 }
15487 16241
15488 16242 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15489 16243 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15490 16244 acc_hdp) != DDI_SUCCESS) {
15491 16245 ddi_dma_free_handle(dma_hdp);
15492 16246 return (FALSE);
15493 16247 }
15494 16248
15495 16249 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15496 16250 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15497 16251 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15498 16252 (void) ddi_dma_mem_free(acc_hdp);
15499 16253 ddi_dma_free_handle(dma_hdp);
15500 16254 return (FALSE);
15501 16255 }
15502 16256
15503 16257 return (TRUE);
15504 16258 }
15505 16259
15506 16260 void
15507 16261 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15508 16262 {
15509 16263 if (*dma_hdp == NULL)
15510 16264 return;
15511 16265
15512 16266 (void) ddi_dma_unbind_handle(*dma_hdp);
15513 16267 (void) ddi_dma_mem_free(acc_hdp);
15514 16268 ddi_dma_free_handle(dma_hdp);
15515 16269 }
|
↓ open down ↓ |
520 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX