Print this page
4682 panic in mptsas refhash
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Approved by: Albert Lee <trisk@nexenta.com>
4500 mptsas_hash_traverse() is unsafe, leads to missing devices
Reviewed by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
Approved by: Albert Lee <trisk@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 - * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 + * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * Copyright (c) 2000 to 2010, LSI Corporation.
31 31 * All rights reserved.
32 32 *
33 33 * Redistribution and use in source and binary forms of all code within
34 34 * this file that is exclusively owned by LSI, with or without
35 35 * modification, is permitted provided that, in addition to the CDDL 1.0
36 36 * License requirements, the following conditions are met:
37 37 *
38 38 * Neither the name of the author nor the names of its contributors may be
39 39 * used to endorse or promote products derived from this software without
40 40 * specific prior written permission.
41 41 *
42 42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
45 45 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
46 46 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
47 47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
48 48 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
49 49 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
50 50 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
51 51 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
52 52 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
53 53 * DAMAGE.
54 54 */
55 55
56 56 /*
57 57 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
58 58 *
59 59 */
60 60
61 61 #if defined(lint) || defined(DEBUG)
62 62 #define MPTSAS_DEBUG
63 63 #endif
64 64
65 65 /*
66 66 * standard header files.
67 67 */
68 68 #include <sys/note.h>
69 69 #include <sys/scsi/scsi.h>
70 70 #include <sys/pci.h>
71 71 #include <sys/file.h>
72 72 #include <sys/policy.h>
73 73 #include <sys/model.h>
74 74 #include <sys/sysevent.h>
75 75 #include <sys/sysevent/eventdefs.h>
76 76 #include <sys/sysevent/dr.h>
77 77 #include <sys/sata/sata_defs.h>
78 78 #include <sys/scsi/generic/sas.h>
79 79 #include <sys/scsi/impl/scsi_sas.h>
80 80
81 81 #pragma pack(1)
82 82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
83 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
84 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
85 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
86 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
87 87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
88 88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
89 89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
|
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
90 90 #pragma pack()
91 91
92 92 /*
93 93 * private header files.
94 94 *
95 95 */
96 96 #include <sys/scsi/impl/scsi_reset_notify.h>
97 97 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
98 98 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
99 99 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
100 +#include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
100 101 #include <sys/raidioctl.h>
101 102
102 103 #include <sys/fs/dv_node.h> /* devfs_clean */
103 104
104 105 /*
105 106 * FMA header files
106 107 */
107 108 #include <sys/ddifm.h>
108 109 #include <sys/fm/protocol.h>
109 110 #include <sys/fm/util.h>
110 111 #include <sys/fm/io/ddi.h>
111 112
112 113 /*
113 114 * autoconfiguration data and routines.
114 115 */
115 116 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
116 117 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
117 118 static int mptsas_power(dev_info_t *dip, int component, int level);
118 119
119 120 /*
120 121 * cb_ops function
121 122 */
122 123 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
123 124 cred_t *credp, int *rval);
124 125 #ifdef __sparc
125 126 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
126 127 #else /* __sparc */
127 128 static int mptsas_quiesce(dev_info_t *devi);
128 129 #endif /* __sparc */
129 130
130 131 /*
131 132 * Resource initilaization for hardware
132 133 */
133 134 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
134 135 static void mptsas_disable_bus_master(mptsas_t *mpt);
135 136 static void mptsas_hba_fini(mptsas_t *mpt);
136 137 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
137 138 static int mptsas_hba_setup(mptsas_t *mpt);
138 139 static void mptsas_hba_teardown(mptsas_t *mpt);
139 140 static int mptsas_config_space_init(mptsas_t *mpt);
140 141 static void mptsas_config_space_fini(mptsas_t *mpt);
141 142 static void mptsas_iport_register(mptsas_t *mpt);
142 143 static int mptsas_smp_setup(mptsas_t *mpt);
143 144 static void mptsas_smp_teardown(mptsas_t *mpt);
144 145 static int mptsas_cache_create(mptsas_t *mpt);
145 146 static void mptsas_cache_destroy(mptsas_t *mpt);
146 147 static int mptsas_alloc_request_frames(mptsas_t *mpt);
147 148 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
148 149 static int mptsas_alloc_free_queue(mptsas_t *mpt);
149 150 static int mptsas_alloc_post_queue(mptsas_t *mpt);
150 151 static void mptsas_alloc_reply_args(mptsas_t *mpt);
151 152 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
152 153 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
153 154 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
154 155
155 156 /*
156 157 * SCSA function prototypes
157 158 */
158 159 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
159 160 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
160 161 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
161 162 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
162 163 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
163 164 int tgtonly);
164 165 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
165 166 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
166 167 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
167 168 int tgtlen, int flags, int (*callback)(), caddr_t arg);
168 169 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
169 170 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
170 171 struct scsi_pkt *pkt);
171 172 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
172 173 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
173 174 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
174 175 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
175 176 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
176 177 void (*callback)(caddr_t), caddr_t arg);
177 178 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
178 179 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
179 180 static int mptsas_scsi_quiesce(dev_info_t *dip);
180 181 static int mptsas_scsi_unquiesce(dev_info_t *dip);
181 182 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
182 183 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
183 184
184 185 /*
185 186 * SMP functions
186 187 */
187 188 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
188 189
189 190 /*
190 191 * internal function prototypes.
191 192 */
192 193 static void mptsas_list_add(mptsas_t *mpt);
193 194 static void mptsas_list_del(mptsas_t *mpt);
194 195
195 196 static int mptsas_quiesce_bus(mptsas_t *mpt);
196 197 static int mptsas_unquiesce_bus(mptsas_t *mpt);
197 198
198 199 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
199 200 static void mptsas_free_handshake_msg(mptsas_t *mpt);
200 201
201 202 static void mptsas_ncmds_checkdrain(void *arg);
202 203
203 204 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
204 205 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
205 206 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
206 207 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
207 208
208 209 static int mptsas_do_detach(dev_info_t *dev);
209 210 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
210 211 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
211 212 struct scsi_pkt *pkt);
212 213 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
213 214
214 215 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
215 216 static void mptsas_handle_event(void *args);
216 217 static int mptsas_handle_event_sync(void *args);
217 218 static void mptsas_handle_dr(void *args);
218 219 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
219 220 dev_info_t *pdip);
220 221
221 222 static void mptsas_restart_cmd(void *);
222 223
223 224 static void mptsas_flush_hba(mptsas_t *mpt);
224 225 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
225 226 uint8_t tasktype);
226 227 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
227 228 uchar_t reason, uint_t stat);
228 229
229 230 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
230 231 static void mptsas_process_intr(mptsas_t *mpt,
231 232 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
232 233 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
233 234 pMpi2ReplyDescriptorsUnion_t reply_desc);
234 235 static void mptsas_handle_address_reply(mptsas_t *mpt,
235 236 pMpi2ReplyDescriptorsUnion_t reply_desc);
236 237 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
237 238 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
238 239 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
239 240
240 241 static void mptsas_watch(void *arg);
241 242 static void mptsas_watchsubr(mptsas_t *mpt);
242 243 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
243 244
244 245 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
245 246 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
246 247 uint8_t *data, uint32_t request_size, uint32_t reply_size,
247 248 uint32_t data_size, uint32_t direction, uint8_t *dataout,
248 249 uint32_t dataout_size, short timeout, int mode);
249 250 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
250 251
251 252 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
252 253 uint32_t unique_id);
253 254 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
254 255 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
255 256 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
256 257 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
257 258 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
258 259 uint32_t diag_type);
259 260 static int mptsas_diag_register(mptsas_t *mpt,
260 261 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
261 262 static int mptsas_diag_unregister(mptsas_t *mpt,
262 263 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
263 264 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
264 265 uint32_t *return_code);
265 266 static int mptsas_diag_read_buffer(mptsas_t *mpt,
266 267 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
267 268 uint32_t *return_code, int ioctl_mode);
268 269 static int mptsas_diag_release(mptsas_t *mpt,
269 270 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
270 271 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
271 272 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
272 273 int ioctl_mode);
273 274 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
274 275 int mode);
275 276
276 277 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
277 278 int cmdlen, int tgtlen, int statuslen, int kf);
278 279 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
279 280
280 281 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
281 282 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
282 283
283 284 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
284 285 int kmflags);
285 286 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
286 287
287 288 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
288 289 mptsas_cmd_t *cmd);
289 290 static void mptsas_check_task_mgt(mptsas_t *mpt,
290 291 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
291 292 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
292 293 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
293 294 int *resid);
294 295
295 296 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
296 297 static void mptsas_free_active_slots(mptsas_t *mpt);
297 298 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
298 299
299 300 static void mptsas_restart_hba(mptsas_t *mpt);
300 301 static void mptsas_restart_waitq(mptsas_t *mpt);
301 302
302 303 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
303 304 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
304 305 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
305 306
306 307 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
307 308 static void mptsas_doneq_empty(mptsas_t *mpt);
308 309 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
309 310
310 311 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
311 312 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
312 313 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
313 314 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
314 315
315 316
316 317 static void mptsas_start_watch_reset_delay();
317 318 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
318 319 static void mptsas_watch_reset_delay(void *arg);
319 320 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
320 321
321 322 /*
322 323 * helper functions
323 324 */
324 325 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
325 326
326 327 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
327 328 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
328 329 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
|
↓ open down ↓ |
219 lines elided |
↑ open up ↑ |
329 330 int lun);
330 331 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
331 332 int lun);
332 333 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
333 334 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
334 335
335 336 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
336 337 int *lun);
337 338 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
338 339
339 -static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
340 - uint8_t phy);
341 -static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
342 - uint64_t wwid);
343 -static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
344 - uint64_t wwid);
340 +static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
341 + mptsas_phymask_t phymask, uint8_t phy);
342 +static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
343 + mptsas_phymask_t phymask, uint64_t wwid);
344 +static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
345 + mptsas_phymask_t phymask, uint64_t wwid);
345 346
346 347 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
347 348 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
348 349
349 350 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
350 351 uint16_t *handle, mptsas_target_t **pptgt);
351 352 static void mptsas_update_phymask(mptsas_t *mpt);
352 353
353 354 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
354 355 uint32_t *status, uint8_t cmd);
355 356 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
356 357 mptsas_phymask_t *phymask);
357 358 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
358 359 mptsas_phymask_t phymask);
359 360 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
360 361
361 362
362 363 /*
363 364 * Enumeration / DR functions
364 365 */
365 366 static void mptsas_config_all(dev_info_t *pdip);
366 367 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
367 368 dev_info_t **lundip);
368 369 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
369 370 dev_info_t **lundip);
370 371
371 372 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
372 373 static int mptsas_offline_target(dev_info_t *pdip, char *name);
373 374
374 375 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
375 376 dev_info_t **dip);
376 377
377 378 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
378 379 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
379 380 dev_info_t **dip, mptsas_target_t *ptgt);
380 381
381 382 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
382 383 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
383 384
384 385 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
385 386 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
386 387 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
387 388 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
388 389 int lun);
389 390
390 391 static void mptsas_offline_missed_luns(dev_info_t *pdip,
391 392 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
392 393 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
393 394 mdi_pathinfo_t *rpip, uint_t flags);
394 395
395 396 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
396 397 dev_info_t **smp_dip);
397 398 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
398 399 uint_t flags);
399 400
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
400 401 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
401 402 int mode, int *rval);
402 403 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
403 404 int mode, int *rval);
404 405 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
405 406 int mode, int *rval);
406 407 static void mptsas_record_event(void *args);
407 408 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
408 409 int mode);
409 410
410 -static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
411 -static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
412 -static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
413 -static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
414 - mptsas_phymask_t key2);
415 -static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
416 - mptsas_phymask_t key2);
417 -static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
418 -
419 -mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
411 +mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
420 412 uint32_t, mptsas_phymask_t, uint8_t);
421 -static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
422 - mptsas_smp_t *data);
423 -static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
424 - mptsas_phymask_t phymask);
425 -static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
426 -static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
413 +static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
427 414 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
428 415 dev_info_t **smp_dip);
429 416
430 417 /*
431 418 * Power management functions
432 419 */
433 420 static int mptsas_get_pci_cap(mptsas_t *mpt);
434 421 static int mptsas_init_pm(mptsas_t *mpt);
435 422
436 423 /*
437 424 * MPT MSI tunable:
438 425 *
439 426 * By default MSI is enabled on all supported platforms.
440 427 */
441 428 boolean_t mptsas_enable_msi = B_TRUE;
442 429 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
443 430
444 431 static int mptsas_register_intrs(mptsas_t *);
445 432 static void mptsas_unregister_intrs(mptsas_t *);
446 433 static int mptsas_add_intrs(mptsas_t *, int);
447 434 static void mptsas_rem_intrs(mptsas_t *);
448 435
449 436 /*
450 437 * FMA Prototypes
451 438 */
452 439 static void mptsas_fm_init(mptsas_t *mpt);
453 440 static void mptsas_fm_fini(mptsas_t *mpt);
454 441 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
455 442
456 443 extern pri_t minclsyspri, maxclsyspri;
457 444
458 445 /*
459 446 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
460 447 * under this device that the paths to a physical device are created when
461 448 * MPxIO is used.
462 449 */
463 450 extern dev_info_t *scsi_vhci_dip;
464 451
465 452 /*
466 453 * Tunable timeout value for Inquiry VPD page 0x83
467 454 * By default the value is 30 seconds.
468 455 */
469 456 int mptsas_inq83_retry_timeout = 30;
470 457
471 458 /*
472 459 * This is used to allocate memory for message frame storage, not for
473 460 * data I/O DMA. All message frames must be stored in the first 4G of
474 461 * physical memory.
475 462 */
476 463 ddi_dma_attr_t mptsas_dma_attrs = {
477 464 DMA_ATTR_V0, /* attribute layout version */
478 465 0x0ull, /* address low - should be 0 (longlong) */
479 466 0xffffffffull, /* address high - 32-bit max range */
480 467 0x00ffffffull, /* count max - max DMA object size */
481 468 4, /* allocation alignment requirements */
482 469 0x78, /* burstsizes - binary encoded values */
483 470 1, /* minxfer - gran. of DMA engine */
484 471 0x00ffffffull, /* maxxfer - gran. of DMA engine */
485 472 0xffffffffull, /* max segment size (DMA boundary) */
486 473 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
487 474 512, /* granularity - device transfer size */
488 475 0 /* flags, set to 0 */
489 476 };
490 477
491 478 /*
492 479 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
493 480 * physical addresses are supported.)
494 481 */
495 482 ddi_dma_attr_t mptsas_dma_attrs64 = {
496 483 DMA_ATTR_V0, /* attribute layout version */
497 484 0x0ull, /* address low - should be 0 (longlong) */
498 485 0xffffffffffffffffull, /* address high - 64-bit max */
499 486 0x00ffffffull, /* count max - max DMA object size */
500 487 4, /* allocation alignment requirements */
501 488 0x78, /* burstsizes - binary encoded values */
502 489 1, /* minxfer - gran. of DMA engine */
503 490 0x00ffffffull, /* maxxfer - gran. of DMA engine */
504 491 0xffffffffull, /* max segment size (DMA boundary) */
505 492 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
506 493 512, /* granularity - device transfer size */
507 494 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
508 495 };
509 496
510 497 ddi_device_acc_attr_t mptsas_dev_attr = {
511 498 DDI_DEVICE_ATTR_V1,
512 499 DDI_STRUCTURE_LE_ACC,
513 500 DDI_STRICTORDER_ACC,
514 501 DDI_DEFAULT_ACC
515 502 };
516 503
517 504 static struct cb_ops mptsas_cb_ops = {
518 505 scsi_hba_open, /* open */
519 506 scsi_hba_close, /* close */
520 507 nodev, /* strategy */
521 508 nodev, /* print */
522 509 nodev, /* dump */
523 510 nodev, /* read */
524 511 nodev, /* write */
525 512 mptsas_ioctl, /* ioctl */
526 513 nodev, /* devmap */
527 514 nodev, /* mmap */
528 515 nodev, /* segmap */
529 516 nochpoll, /* chpoll */
530 517 ddi_prop_op, /* cb_prop_op */
531 518 NULL, /* streamtab */
532 519 D_MP, /* cb_flag */
533 520 CB_REV, /* rev */
534 521 nodev, /* aread */
535 522 nodev /* awrite */
536 523 };
537 524
538 525 static struct dev_ops mptsas_ops = {
539 526 DEVO_REV, /* devo_rev, */
540 527 0, /* refcnt */
541 528 ddi_no_info, /* info */
542 529 nulldev, /* identify */
543 530 nulldev, /* probe */
544 531 mptsas_attach, /* attach */
545 532 mptsas_detach, /* detach */
546 533 #ifdef __sparc
547 534 mptsas_reset,
548 535 #else
549 536 nodev, /* reset */
550 537 #endif /* __sparc */
551 538 &mptsas_cb_ops, /* driver operations */
552 539 NULL, /* bus operations */
553 540 mptsas_power, /* power management */
554 541 #ifdef __sparc
555 542 ddi_quiesce_not_needed
556 543 #else
557 544 mptsas_quiesce /* quiesce */
558 545 #endif /* __sparc */
559 546 };
560 547
561 548
562 549 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
563 550
564 551 static struct modldrv modldrv = {
565 552 &mod_driverops, /* Type of module. This one is a driver */
566 553 MPTSAS_MOD_STRING, /* Name of the module. */
567 554 &mptsas_ops, /* driver ops */
568 555 };
569 556
570 557 static struct modlinkage modlinkage = {
571 558 MODREV_1, &modldrv, NULL
572 559 };
573 560 #define TARGET_PROP "target"
574 561 #define LUN_PROP "lun"
575 562 #define LUN64_PROP "lun64"
576 563 #define SAS_PROP "sas-mpt"
577 564 #define MDI_GUID "wwn"
578 565 #define NDI_GUID "guid"
579 566 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
580 567
581 568 /*
582 569 * Local static data
583 570 */
584 571 #if defined(MPTSAS_DEBUG)
585 572 uint32_t mptsas_debug_flags = 0;
586 573 #endif /* defined(MPTSAS_DEBUG) */
587 574 uint32_t mptsas_debug_resets = 0;
588 575
589 576 static kmutex_t mptsas_global_mutex;
590 577 static void *mptsas_state; /* soft state ptr */
591 578 static krwlock_t mptsas_global_rwlock;
592 579
593 580 static kmutex_t mptsas_log_mutex;
594 581 static char mptsas_log_buf[256];
595 582 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
596 583
597 584 static mptsas_t *mptsas_head, *mptsas_tail;
598 585 static clock_t mptsas_scsi_watchdog_tick;
599 586 static clock_t mptsas_tick;
600 587 static timeout_id_t mptsas_reset_watch;
601 588 static timeout_id_t mptsas_timeout_id;
602 589 static int mptsas_timeouts_enabled = 0;
603 590 /*
604 591 * warlock directives
605 592 */
606 593 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
607 594 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
608 595 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
609 596 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
610 597 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
611 598 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
612 599
613 600 /*
614 601 * SM - HBA statics
615 602 */
616 603 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
617 604
618 605 #ifdef MPTSAS_DEBUG
619 606 void debug_enter(char *);
620 607 #endif
621 608
622 609 /*
623 610 * Notes:
624 611 * - scsi_hba_init(9F) initializes SCSI HBA modules
625 612 * - must call scsi_hba_fini(9F) if modload() fails
626 613 */
627 614 int
628 615 _init(void)
629 616 {
630 617 int status;
631 618 /* CONSTCOND */
632 619 ASSERT(NO_COMPETING_THREADS);
633 620
634 621 NDBG0(("_init"));
635 622
636 623 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
637 624 MPTSAS_INITIAL_SOFT_SPACE);
638 625 if (status != 0) {
639 626 return (status);
640 627 }
641 628
642 629 if ((status = scsi_hba_init(&modlinkage)) != 0) {
643 630 ddi_soft_state_fini(&mptsas_state);
644 631 return (status);
645 632 }
646 633
647 634 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
648 635 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
649 636 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
650 637
651 638 if ((status = mod_install(&modlinkage)) != 0) {
652 639 mutex_destroy(&mptsas_log_mutex);
653 640 rw_destroy(&mptsas_global_rwlock);
654 641 mutex_destroy(&mptsas_global_mutex);
655 642 ddi_soft_state_fini(&mptsas_state);
656 643 scsi_hba_fini(&modlinkage);
657 644 }
658 645
659 646 return (status);
660 647 }
661 648
662 649 /*
663 650 * Notes:
664 651 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
665 652 */
666 653 int
667 654 _fini(void)
668 655 {
669 656 int status;
670 657 /* CONSTCOND */
671 658 ASSERT(NO_COMPETING_THREADS);
672 659
673 660 NDBG0(("_fini"));
674 661
675 662 if ((status = mod_remove(&modlinkage)) == 0) {
676 663 ddi_soft_state_fini(&mptsas_state);
677 664 scsi_hba_fini(&modlinkage);
678 665 mutex_destroy(&mptsas_global_mutex);
679 666 rw_destroy(&mptsas_global_rwlock);
680 667 mutex_destroy(&mptsas_log_mutex);
681 668 }
682 669 return (status);
683 670 }
684 671
685 672 /*
686 673 * The loadable-module _info(9E) entry point
687 674 */
|
↓ open down ↓ |
251 lines elided |
↑ open up ↑ |
688 675 int
689 676 _info(struct modinfo *modinfop)
690 677 {
691 678 /* CONSTCOND */
692 679 ASSERT(NO_COMPETING_THREADS);
693 680 NDBG0(("mptsas _info"));
694 681
695 682 return (mod_info(&modlinkage, modinfop));
696 683 }
697 684
685 +static int
686 +mptsas_target_eval_devhdl(const void *op, void *arg)
687 +{
688 + uint16_t dh = *(uint16_t *)arg;
689 + const mptsas_target_t *tp = op;
698 690
691 + return ((int)tp->m_devhdl - (int)dh);
692 +}
693 +
699 694 static int
695 +mptsas_target_eval_slot(const void *op, void *arg)
696 +{
697 + mptsas_led_control_t *lcp = arg;
698 + const mptsas_target_t *tp = op;
699 +
700 + if (tp->m_enclosure != lcp->Enclosure)
701 + return ((int)tp->m_enclosure - (int)lcp->Enclosure);
702 +
703 + return ((int)tp->m_slot_num - (int)lcp->Slot);
704 +}
705 +
706 +static int
707 +mptsas_target_eval_nowwn(const void *op, void *arg)
708 +{
709 + uint8_t phy = *(uint8_t *)arg;
710 + const mptsas_target_t *tp = op;
711 +
712 + if (tp->m_addr.mta_wwn != 0)
713 + return (-1);
714 +
715 + return ((int)tp->m_phynum - (int)phy);
716 +}
717 +
718 +static int
719 +mptsas_smp_eval_devhdl(const void *op, void *arg)
720 +{
721 + uint16_t dh = *(uint16_t *)arg;
722 + const mptsas_smp_t *sp = op;
723 +
724 + return ((int)sp->m_devhdl - (int)dh);
725 +}
726 +
727 +static uint64_t
728 +mptsas_target_addr_hash(const void *tp)
729 +{
730 + const mptsas_target_addr_t *tap = tp;
731 +
732 + return ((tap->mta_wwn & 0xffffffffffffULL) |
733 + ((uint64_t)tap->mta_phymask << 48));
734 +}
735 +
736 +static int
737 +mptsas_target_addr_cmp(const void *a, const void *b)
738 +{
739 + const mptsas_target_addr_t *aap = a;
740 + const mptsas_target_addr_t *bap = b;
741 +
742 + if (aap->mta_wwn < bap->mta_wwn)
743 + return (-1);
744 + if (aap->mta_wwn > bap->mta_wwn)
745 + return (1);
746 + return ((int)bap->mta_phymask - (int)aap->mta_phymask);
747 +}
748 +
749 +static void
750 +mptsas_target_free(void *op)
751 +{
752 + kmem_free(op, sizeof (mptsas_target_t));
753 +}
754 +
755 +static void
756 +mptsas_smp_free(void *op)
757 +{
758 + kmem_free(op, sizeof (mptsas_smp_t));
759 +}
760 +
761 +static void
762 +mptsas_destroy_hashes(mptsas_t *mpt)
763 +{
764 + mptsas_target_t *tp;
765 + mptsas_smp_t *sp;
766 +
767 + for (tp = refhash_first(mpt->m_targets); tp != NULL;
768 + tp = refhash_next(mpt->m_targets, tp)) {
769 + refhash_remove(mpt->m_targets, tp);
770 + }
771 + for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
772 + sp = refhash_next(mpt->m_smp_targets, sp)) {
773 + refhash_remove(mpt->m_smp_targets, sp);
774 + }
775 + refhash_destroy(mpt->m_targets);
776 + refhash_destroy(mpt->m_smp_targets);
777 + mpt->m_targets = NULL;
778 + mpt->m_smp_targets = NULL;
779 +}
780 +
781 +static int
700 782 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
701 783 {
702 784 dev_info_t *pdip;
703 785 mptsas_t *mpt;
704 786 scsi_hba_tran_t *hba_tran;
705 787 char *iport = NULL;
706 788 char phymask[MPTSAS_MAX_PHYS];
707 789 mptsas_phymask_t phy_mask = 0;
708 790 int dynamic_port = 0;
709 791 uint32_t page_address;
710 792 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
711 793 int rval = DDI_FAILURE;
712 794 int i = 0;
713 795 uint8_t numphys = 0;
714 796 uint8_t phy_id;
715 797 uint8_t phy_port = 0;
716 798 uint16_t attached_devhdl = 0;
717 799 uint32_t dev_info;
718 800 uint64_t attached_sas_wwn;
719 801 uint16_t dev_hdl;
720 802 uint16_t pdev_hdl;
721 803 uint16_t bay_num, enclosure;
722 804 char attached_wwnstr[MPTSAS_WWN_STRLEN];
723 805
724 806 /* CONSTCOND */
725 807 ASSERT(NO_COMPETING_THREADS);
726 808
727 809 switch (cmd) {
728 810 case DDI_ATTACH:
729 811 break;
730 812
731 813 case DDI_RESUME:
732 814 /*
733 815 * If this a scsi-iport node, nothing to do here.
734 816 */
735 817 return (DDI_SUCCESS);
736 818
737 819 default:
738 820 return (DDI_FAILURE);
739 821 }
740 822
741 823 pdip = ddi_get_parent(dip);
742 824
743 825 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
744 826 NULL) {
745 827 cmn_err(CE_WARN, "Failed attach iport because fail to "
746 828 "get tran vector for the HBA node");
747 829 return (DDI_FAILURE);
748 830 }
749 831
750 832 mpt = TRAN2MPT(hba_tran);
751 833 ASSERT(mpt != NULL);
752 834 if (mpt == NULL)
753 835 return (DDI_FAILURE);
754 836
755 837 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
756 838 NULL) {
757 839 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
758 840 "get tran vector for the iport node");
759 841 return (DDI_FAILURE);
760 842 }
761 843
762 844 /*
763 845 * Overwrite parent's tran_hba_private to iport's tran vector
764 846 */
765 847 hba_tran->tran_hba_private = mpt;
766 848
767 849 ddi_report_dev(dip);
768 850
769 851 /*
770 852 * Get SAS address for initiator port according dev_handle
771 853 */
772 854 iport = ddi_get_name_addr(dip);
773 855 if (iport && strncmp(iport, "v0", 2) == 0) {
774 856 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
775 857 MPTSAS_VIRTUAL_PORT, 1) !=
776 858 DDI_PROP_SUCCESS) {
777 859 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
778 860 MPTSAS_VIRTUAL_PORT);
779 861 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
780 862 "prop update failed");
781 863 return (DDI_FAILURE);
782 864 }
783 865 return (DDI_SUCCESS);
784 866 }
785 867
786 868 mutex_enter(&mpt->m_mutex);
787 869 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
788 870 bzero(phymask, sizeof (phymask));
789 871 (void) sprintf(phymask,
790 872 "%x", mpt->m_phy_info[i].phy_mask);
791 873 if (strcmp(phymask, iport) == 0) {
792 874 break;
793 875 }
794 876 }
795 877
796 878 if (i == MPTSAS_MAX_PHYS) {
797 879 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
798 880 "seems not exist", iport);
799 881 mutex_exit(&mpt->m_mutex);
800 882 return (DDI_FAILURE);
801 883 }
802 884
803 885 phy_mask = mpt->m_phy_info[i].phy_mask;
804 886
805 887 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
806 888 dynamic_port = 1;
807 889 else
808 890 dynamic_port = 0;
809 891
810 892 /*
811 893 * Update PHY info for smhba
812 894 */
813 895 if (mptsas_smhba_phy_init(mpt)) {
814 896 mutex_exit(&mpt->m_mutex);
815 897 mptsas_log(mpt, CE_WARN, "mptsas phy update "
816 898 "failed");
817 899 return (DDI_FAILURE);
818 900 }
819 901
820 902 mutex_exit(&mpt->m_mutex);
821 903
822 904 numphys = 0;
823 905 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
824 906 if ((phy_mask >> i) & 0x01) {
825 907 numphys++;
826 908 }
827 909 }
828 910
829 911 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
830 912 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
831 913 mpt->un.m_base_wwid);
832 914
833 915 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
834 916 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
835 917 DDI_PROP_SUCCESS) {
836 918 (void) ddi_prop_remove(DDI_DEV_T_NONE,
837 919 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
838 920 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
839 921 "prop update failed");
840 922 return (DDI_FAILURE);
841 923 }
842 924 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
843 925 MPTSAS_NUM_PHYS, numphys) !=
844 926 DDI_PROP_SUCCESS) {
845 927 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
846 928 return (DDI_FAILURE);
847 929 }
848 930
849 931 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
850 932 "phymask", phy_mask) !=
851 933 DDI_PROP_SUCCESS) {
852 934 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
853 935 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
854 936 "prop update failed");
855 937 return (DDI_FAILURE);
856 938 }
857 939
858 940 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
859 941 "dynamic-port", dynamic_port) !=
860 942 DDI_PROP_SUCCESS) {
861 943 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
862 944 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
863 945 "prop update failed");
864 946 return (DDI_FAILURE);
865 947 }
866 948 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
867 949 MPTSAS_VIRTUAL_PORT, 0) !=
868 950 DDI_PROP_SUCCESS) {
869 951 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
870 952 MPTSAS_VIRTUAL_PORT);
871 953 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
872 954 "prop update failed");
873 955 return (DDI_FAILURE);
874 956 }
875 957 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
876 958 &attached_devhdl);
877 959
878 960 mutex_enter(&mpt->m_mutex);
879 961 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
880 962 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
881 963 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
882 964 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
883 965 &pdev_hdl, &bay_num, &enclosure);
884 966 if (rval != DDI_SUCCESS) {
885 967 mptsas_log(mpt, CE_WARN,
886 968 "Failed to get device page0 for handle:%d",
887 969 attached_devhdl);
888 970 mutex_exit(&mpt->m_mutex);
889 971 return (DDI_FAILURE);
890 972 }
891 973
892 974 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
893 975 bzero(phymask, sizeof (phymask));
894 976 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
895 977 if (strcmp(phymask, iport) == 0) {
896 978 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
897 979 "%x",
898 980 mpt->m_phy_info[i].phy_mask);
899 981 }
900 982 }
901 983 mutex_exit(&mpt->m_mutex);
902 984
903 985 bzero(attached_wwnstr, sizeof (attached_wwnstr));
904 986 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
905 987 attached_sas_wwn);
906 988 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
907 989 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
908 990 DDI_PROP_SUCCESS) {
909 991 (void) ddi_prop_remove(DDI_DEV_T_NONE,
910 992 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
911 993 return (DDI_FAILURE);
912 994 }
913 995
914 996 /* Create kstats for each phy on this iport */
915 997
916 998 mptsas_create_phy_stats(mpt, iport, dip);
917 999
918 1000 /*
919 1001 * register sas hba iport with mdi (MPxIO/vhci)
920 1002 */
921 1003 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
922 1004 dip, 0) == MDI_SUCCESS) {
923 1005 mpt->m_mpxio_enable = TRUE;
924 1006 }
925 1007 return (DDI_SUCCESS);
926 1008 }
927 1009
928 1010 /*
929 1011 * Notes:
930 1012 * Set up all device state and allocate data structures,
931 1013 * mutexes, condition variables, etc. for device operation.
932 1014 * Add interrupts needed.
933 1015 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
934 1016 */
935 1017 static int
936 1018 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
937 1019 {
938 1020 mptsas_t *mpt = NULL;
939 1021 int instance, i, j;
940 1022 int doneq_thread_num;
941 1023 char intr_added = 0;
942 1024 char map_setup = 0;
943 1025 char config_setup = 0;
944 1026 char hba_attach_setup = 0;
945 1027 char smp_attach_setup = 0;
946 1028 char mutex_init_done = 0;
947 1029 char event_taskq_create = 0;
948 1030 char dr_taskq_create = 0;
949 1031 char doneq_thread_create = 0;
950 1032 scsi_hba_tran_t *hba_tran;
951 1033 uint_t mem_bar = MEM_SPACE;
952 1034 int rval = DDI_FAILURE;
953 1035
954 1036 /* CONSTCOND */
955 1037 ASSERT(NO_COMPETING_THREADS);
956 1038
957 1039 if (scsi_hba_iport_unit_address(dip)) {
958 1040 return (mptsas_iport_attach(dip, cmd));
959 1041 }
960 1042
961 1043 switch (cmd) {
962 1044 case DDI_ATTACH:
963 1045 break;
964 1046
965 1047 case DDI_RESUME:
966 1048 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
967 1049 return (DDI_FAILURE);
968 1050
969 1051 mpt = TRAN2MPT(hba_tran);
970 1052
971 1053 if (!mpt) {
972 1054 return (DDI_FAILURE);
973 1055 }
974 1056
975 1057 /*
976 1058 * Reset hardware and softc to "no outstanding commands"
977 1059 * Note that a check condition can result on first command
978 1060 * to a target.
979 1061 */
980 1062 mutex_enter(&mpt->m_mutex);
981 1063
982 1064 /*
983 1065 * raise power.
984 1066 */
985 1067 if (mpt->m_options & MPTSAS_OPT_PM) {
986 1068 mutex_exit(&mpt->m_mutex);
987 1069 (void) pm_busy_component(dip, 0);
988 1070 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
989 1071 if (rval == DDI_SUCCESS) {
990 1072 mutex_enter(&mpt->m_mutex);
991 1073 } else {
992 1074 /*
993 1075 * The pm_raise_power() call above failed,
994 1076 * and that can only occur if we were unable
995 1077 * to reset the hardware. This is probably
996 1078 * due to unhealty hardware, and because
997 1079 * important filesystems(such as the root
998 1080 * filesystem) could be on the attached disks,
999 1081 * it would not be a good idea to continue,
1000 1082 * as we won't be entirely certain we are
1001 1083 * writing correct data. So we panic() here
1002 1084 * to not only prevent possible data corruption,
1003 1085 * but to give developers or end users a hope
1004 1086 * of identifying and correcting any problems.
1005 1087 */
1006 1088 fm_panic("mptsas could not reset hardware "
1007 1089 "during resume");
1008 1090 }
1009 1091 }
1010 1092
1011 1093 mpt->m_suspended = 0;
1012 1094
1013 1095 /*
1014 1096 * Reinitialize ioc
1015 1097 */
1016 1098 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1017 1099 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1018 1100 mutex_exit(&mpt->m_mutex);
1019 1101 if (mpt->m_options & MPTSAS_OPT_PM) {
1020 1102 (void) pm_idle_component(dip, 0);
1021 1103 }
1022 1104 fm_panic("mptsas init chip fail during resume");
1023 1105 }
1024 1106 /*
1025 1107 * mptsas_update_driver_data needs interrupts so enable them
1026 1108 * first.
1027 1109 */
1028 1110 MPTSAS_ENABLE_INTR(mpt);
1029 1111 mptsas_update_driver_data(mpt);
1030 1112
1031 1113 /* start requests, if possible */
1032 1114 mptsas_restart_hba(mpt);
1033 1115
1034 1116 mutex_exit(&mpt->m_mutex);
1035 1117
1036 1118 /*
1037 1119 * Restart watch thread
1038 1120 */
1039 1121 mutex_enter(&mptsas_global_mutex);
1040 1122 if (mptsas_timeout_id == 0) {
1041 1123 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1042 1124 mptsas_tick);
1043 1125 mptsas_timeouts_enabled = 1;
1044 1126 }
1045 1127 mutex_exit(&mptsas_global_mutex);
1046 1128
1047 1129 /* report idle status to pm framework */
1048 1130 if (mpt->m_options & MPTSAS_OPT_PM) {
1049 1131 (void) pm_idle_component(dip, 0);
1050 1132 }
1051 1133
1052 1134 return (DDI_SUCCESS);
1053 1135
1054 1136 default:
1055 1137 return (DDI_FAILURE);
1056 1138
1057 1139 }
1058 1140
1059 1141 instance = ddi_get_instance(dip);
1060 1142
1061 1143 /*
1062 1144 * Allocate softc information.
1063 1145 */
1064 1146 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1065 1147 mptsas_log(NULL, CE_WARN,
1066 1148 "mptsas%d: cannot allocate soft state", instance);
1067 1149 goto fail;
1068 1150 }
1069 1151
1070 1152 mpt = ddi_get_soft_state(mptsas_state, instance);
1071 1153
1072 1154 if (mpt == NULL) {
1073 1155 mptsas_log(NULL, CE_WARN,
1074 1156 "mptsas%d: cannot get soft state", instance);
1075 1157 goto fail;
1076 1158 }
1077 1159
1078 1160 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1079 1161 scsi_size_clean(dip);
1080 1162
1081 1163 mpt->m_dip = dip;
1082 1164 mpt->m_instance = instance;
1083 1165
1084 1166 /* Make a per-instance copy of the structures */
1085 1167 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1086 1168 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1087 1169 mpt->m_reg_acc_attr = mptsas_dev_attr;
1088 1170 mpt->m_dev_acc_attr = mptsas_dev_attr;
1089 1171
1090 1172 /*
1091 1173 * Initialize FMA
1092 1174 */
1093 1175 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1094 1176 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1095 1177 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1096 1178 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1097 1179
1098 1180 mptsas_fm_init(mpt);
1099 1181
1100 1182 if (mptsas_alloc_handshake_msg(mpt,
1101 1183 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1102 1184 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1103 1185 goto fail;
1104 1186 }
1105 1187
1106 1188 /*
1107 1189 * Setup configuration space
1108 1190 */
1109 1191 if (mptsas_config_space_init(mpt) == FALSE) {
1110 1192 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1111 1193 goto fail;
1112 1194 }
1113 1195 config_setup++;
1114 1196
1115 1197 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1116 1198 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1117 1199 mptsas_log(mpt, CE_WARN, "map setup failed");
1118 1200 goto fail;
1119 1201 }
1120 1202 map_setup++;
1121 1203
1122 1204 /*
1123 1205 * A taskq is created for dealing with the event handler
1124 1206 */
1125 1207 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1126 1208 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1127 1209 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1128 1210 goto fail;
1129 1211 }
1130 1212 event_taskq_create++;
1131 1213
1132 1214 /*
1133 1215 * A taskq is created for dealing with dr events
1134 1216 */
1135 1217 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1136 1218 "mptsas_dr_taskq",
1137 1219 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1138 1220 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1139 1221 "failed");
1140 1222 goto fail;
1141 1223 }
1142 1224 dr_taskq_create++;
1143 1225
1144 1226 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1145 1227 0, "mptsas_doneq_thread_threshold_prop", 10);
1146 1228 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1147 1229 0, "mptsas_doneq_length_threshold_prop", 8);
1148 1230 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1149 1231 0, "mptsas_doneq_thread_n_prop", 8);
1150 1232
1151 1233 if (mpt->m_doneq_thread_n) {
1152 1234 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1153 1235 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1154 1236
1155 1237 mutex_enter(&mpt->m_doneq_mutex);
1156 1238 mpt->m_doneq_thread_id =
1157 1239 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1158 1240 * mpt->m_doneq_thread_n, KM_SLEEP);
1159 1241
1160 1242 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1161 1243 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1162 1244 CV_DRIVER, NULL);
1163 1245 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1164 1246 MUTEX_DRIVER, NULL);
1165 1247 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1166 1248 mpt->m_doneq_thread_id[j].flag |=
1167 1249 MPTSAS_DONEQ_THREAD_ACTIVE;
1168 1250 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1169 1251 mpt->m_doneq_thread_id[j].arg.t = j;
1170 1252 mpt->m_doneq_thread_id[j].threadp =
1171 1253 thread_create(NULL, 0, mptsas_doneq_thread,
1172 1254 &mpt->m_doneq_thread_id[j].arg,
1173 1255 0, &p0, TS_RUN, minclsyspri);
1174 1256 mpt->m_doneq_thread_id[j].donetail =
1175 1257 &mpt->m_doneq_thread_id[j].doneq;
1176 1258 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1177 1259 }
1178 1260 mutex_exit(&mpt->m_doneq_mutex);
1179 1261 doneq_thread_create++;
1180 1262 }
1181 1263
1182 1264 /* Initialize mutex used in interrupt handler */
1183 1265 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1184 1266 DDI_INTR_PRI(mpt->m_intr_pri));
1185 1267 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1186 1268 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1187 1269 DDI_INTR_PRI(mpt->m_intr_pri));
1188 1270 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1189 1271 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1190 1272 NULL, MUTEX_DRIVER,
1191 1273 DDI_INTR_PRI(mpt->m_intr_pri));
1192 1274 }
1193 1275
1194 1276 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1195 1277 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1196 1278 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1197 1279 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1198 1280 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1199 1281 mutex_init_done++;
1200 1282
1201 1283 /*
1202 1284 * Disable hardware interrupt since we're not ready to
1203 1285 * handle it yet.
1204 1286 */
1205 1287 MPTSAS_DISABLE_INTR(mpt);
1206 1288 if (mptsas_register_intrs(mpt) == FALSE)
1207 1289 goto fail;
1208 1290 intr_added++;
1209 1291
1210 1292 mutex_enter(&mpt->m_mutex);
1211 1293 /*
1212 1294 * Initialize power management component
1213 1295 */
1214 1296 if (mpt->m_options & MPTSAS_OPT_PM) {
1215 1297 if (mptsas_init_pm(mpt)) {
1216 1298 mutex_exit(&mpt->m_mutex);
1217 1299 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1218 1300 "failed");
1219 1301 goto fail;
1220 1302 }
1221 1303 }
1222 1304
1223 1305 /*
1224 1306 * Initialize chip using Message Unit Reset, if allowed
1225 1307 */
1226 1308 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1227 1309 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1228 1310 mutex_exit(&mpt->m_mutex);
1229 1311 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1230 1312 goto fail;
1231 1313 }
1232 1314
1233 1315 /*
1234 1316 * Fill in the phy_info structure and get the base WWID
1235 1317 */
1236 1318 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1237 1319 mptsas_log(mpt, CE_WARN,
1238 1320 "mptsas_get_manufacture_page5 failed!");
1239 1321 goto fail;
1240 1322 }
1241 1323
1242 1324 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1243 1325 mptsas_log(mpt, CE_WARN,
1244 1326 "mptsas_get_sas_io_unit_page_hndshk failed!");
1245 1327 goto fail;
1246 1328 }
1247 1329
1248 1330 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1249 1331 mptsas_log(mpt, CE_WARN,
1250 1332 "mptsas_get_manufacture_page0 failed!");
1251 1333 goto fail;
1252 1334 }
1253 1335
1254 1336 mutex_exit(&mpt->m_mutex);
1255 1337
1256 1338 /*
1257 1339 * Register the iport for multiple port HBA
1258 1340 */
1259 1341 mptsas_iport_register(mpt);
1260 1342
1261 1343 /*
1262 1344 * initialize SCSI HBA transport structure
1263 1345 */
1264 1346 if (mptsas_hba_setup(mpt) == FALSE)
1265 1347 goto fail;
1266 1348 hba_attach_setup++;
1267 1349
1268 1350 if (mptsas_smp_setup(mpt) == FALSE)
1269 1351 goto fail;
1270 1352 smp_attach_setup++;
1271 1353
1272 1354 if (mptsas_cache_create(mpt) == FALSE)
1273 1355 goto fail;
1274 1356
1275 1357 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1276 1358 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1277 1359 if (mpt->m_scsi_reset_delay == 0) {
1278 1360 mptsas_log(mpt, CE_NOTE,
1279 1361 "scsi_reset_delay of 0 is not recommended,"
1280 1362 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1281 1363 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1282 1364 }
1283 1365
1284 1366 /*
1285 1367 * Initialize the wait and done FIFO queue
1286 1368 */
1287 1369 mpt->m_donetail = &mpt->m_doneq;
1288 1370 mpt->m_waitqtail = &mpt->m_waitq;
1289 1371 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1290 1372 mpt->m_tx_draining = 0;
1291 1373
1292 1374 /*
1293 1375 * ioc cmd queue initialize
1294 1376 */
1295 1377 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1296 1378 mpt->m_dev_handle = 0xFFFF;
1297 1379
1298 1380 MPTSAS_ENABLE_INTR(mpt);
1299 1381
1300 1382 /*
1301 1383 * enable event notification
1302 1384 */
1303 1385 mutex_enter(&mpt->m_mutex);
1304 1386 if (mptsas_ioc_enable_event_notification(mpt)) {
1305 1387 mutex_exit(&mpt->m_mutex);
1306 1388 goto fail;
1307 1389 }
1308 1390 mutex_exit(&mpt->m_mutex);
1309 1391
1310 1392 /*
1311 1393 * Initialize PHY info for smhba
1312 1394 */
1313 1395 if (mptsas_smhba_setup(mpt)) {
1314 1396 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1315 1397 "failed");
1316 1398 goto fail;
1317 1399 }
1318 1400
1319 1401 /* Check all dma handles allocated in attach */
1320 1402 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1321 1403 != DDI_SUCCESS) ||
1322 1404 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1323 1405 != DDI_SUCCESS) ||
1324 1406 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1325 1407 != DDI_SUCCESS) ||
1326 1408 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1327 1409 != DDI_SUCCESS) ||
1328 1410 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1329 1411 != DDI_SUCCESS)) {
1330 1412 goto fail;
1331 1413 }
1332 1414
1333 1415 /* Check all acc handles allocated in attach */
1334 1416 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1335 1417 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1336 1418 != DDI_SUCCESS) ||
1337 1419 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1338 1420 != DDI_SUCCESS) ||
1339 1421 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1340 1422 != DDI_SUCCESS) ||
1341 1423 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1342 1424 != DDI_SUCCESS) ||
1343 1425 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1344 1426 != DDI_SUCCESS) ||
1345 1427 (mptsas_check_acc_handle(mpt->m_config_handle)
1346 1428 != DDI_SUCCESS)) {
1347 1429 goto fail;
1348 1430 }
1349 1431
1350 1432 /*
1351 1433 * After this point, we are not going to fail the attach.
1352 1434 */
1353 1435 /*
1354 1436 * used for mptsas_watch
1355 1437 */
1356 1438 mptsas_list_add(mpt);
1357 1439
1358 1440 mutex_enter(&mptsas_global_mutex);
1359 1441 if (mptsas_timeouts_enabled == 0) {
1360 1442 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1361 1443 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1362 1444
1363 1445 mptsas_tick = mptsas_scsi_watchdog_tick *
1364 1446 drv_usectohz((clock_t)1000000);
1365 1447
1366 1448 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1367 1449 mptsas_timeouts_enabled = 1;
1368 1450 }
1369 1451 mutex_exit(&mptsas_global_mutex);
1370 1452
1371 1453 /* Print message of HBA present */
1372 1454 ddi_report_dev(dip);
1373 1455
1374 1456 /* report idle status to pm framework */
1375 1457 if (mpt->m_options & MPTSAS_OPT_PM) {
1376 1458 (void) pm_idle_component(dip, 0);
1377 1459 }
1378 1460
1379 1461 return (DDI_SUCCESS);
1380 1462
1381 1463 fail:
1382 1464 mptsas_log(mpt, CE_WARN, "attach failed");
1383 1465 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1384 1466 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1385 1467 if (mpt) {
1386 1468 mutex_enter(&mptsas_global_mutex);
1387 1469
1388 1470 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1389 1471 timeout_id_t tid = mptsas_timeout_id;
1390 1472 mptsas_timeouts_enabled = 0;
1391 1473 mptsas_timeout_id = 0;
1392 1474 mutex_exit(&mptsas_global_mutex);
1393 1475 (void) untimeout(tid);
1394 1476 mutex_enter(&mptsas_global_mutex);
1395 1477 }
1396 1478 mutex_exit(&mptsas_global_mutex);
|
↓ open down ↓ |
687 lines elided |
↑ open up ↑ |
1397 1479 /* deallocate in reverse order */
1398 1480 mptsas_cache_destroy(mpt);
1399 1481
1400 1482 if (smp_attach_setup) {
1401 1483 mptsas_smp_teardown(mpt);
1402 1484 }
1403 1485 if (hba_attach_setup) {
1404 1486 mptsas_hba_teardown(mpt);
1405 1487 }
1406 1488
1489 + if (mpt->m_targets)
1490 + refhash_destroy(mpt->m_targets);
1491 + if (mpt->m_smp_targets)
1492 + refhash_destroy(mpt->m_smp_targets);
1493 +
1407 1494 if (mpt->m_active) {
1408 - mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1409 - sizeof (mptsas_smp_t));
1410 - mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1411 - sizeof (mptsas_target_t));
1412 1495 mptsas_free_active_slots(mpt);
1413 1496 }
1414 1497 if (intr_added) {
1415 1498 mptsas_unregister_intrs(mpt);
1416 1499 }
1417 1500
1418 1501 if (doneq_thread_create) {
1419 1502 mutex_enter(&mpt->m_doneq_mutex);
1420 1503 doneq_thread_num = mpt->m_doneq_thread_n;
1421 1504 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1422 1505 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1423 1506 mpt->m_doneq_thread_id[j].flag &=
1424 1507 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1425 1508 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1426 1509 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1427 1510 }
1428 1511 while (mpt->m_doneq_thread_n) {
1429 1512 cv_wait(&mpt->m_doneq_thread_cv,
1430 1513 &mpt->m_doneq_mutex);
1431 1514 }
1432 1515 for (j = 0; j < doneq_thread_num; j++) {
1433 1516 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1434 1517 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1435 1518 }
1436 1519 kmem_free(mpt->m_doneq_thread_id,
1437 1520 sizeof (mptsas_doneq_thread_list_t)
1438 1521 * doneq_thread_num);
1439 1522 mutex_exit(&mpt->m_doneq_mutex);
1440 1523 cv_destroy(&mpt->m_doneq_thread_cv);
1441 1524 mutex_destroy(&mpt->m_doneq_mutex);
1442 1525 }
1443 1526 if (event_taskq_create) {
1444 1527 ddi_taskq_destroy(mpt->m_event_taskq);
1445 1528 }
1446 1529 if (dr_taskq_create) {
1447 1530 ddi_taskq_destroy(mpt->m_dr_taskq);
1448 1531 }
1449 1532 if (mutex_init_done) {
1450 1533 mutex_destroy(&mpt->m_tx_waitq_mutex);
1451 1534 mutex_destroy(&mpt->m_passthru_mutex);
1452 1535 mutex_destroy(&mpt->m_mutex);
1453 1536 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1454 1537 mutex_destroy(
1455 1538 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1456 1539 }
1457 1540 cv_destroy(&mpt->m_cv);
1458 1541 cv_destroy(&mpt->m_passthru_cv);
1459 1542 cv_destroy(&mpt->m_fw_cv);
1460 1543 cv_destroy(&mpt->m_config_cv);
1461 1544 cv_destroy(&mpt->m_fw_diag_cv);
1462 1545 }
1463 1546
1464 1547 if (map_setup) {
1465 1548 mptsas_cfg_fini(mpt);
1466 1549 }
1467 1550 if (config_setup) {
1468 1551 mptsas_config_space_fini(mpt);
1469 1552 }
1470 1553 mptsas_free_handshake_msg(mpt);
1471 1554 mptsas_hba_fini(mpt);
1472 1555
1473 1556 mptsas_fm_fini(mpt);
1474 1557 ddi_soft_state_free(mptsas_state, instance);
1475 1558 ddi_prop_remove_all(dip);
1476 1559 }
1477 1560 return (DDI_FAILURE);
1478 1561 }
1479 1562
1480 1563 static int
1481 1564 mptsas_suspend(dev_info_t *devi)
1482 1565 {
1483 1566 mptsas_t *mpt, *g;
1484 1567 scsi_hba_tran_t *tran;
1485 1568
1486 1569 if (scsi_hba_iport_unit_address(devi)) {
1487 1570 return (DDI_SUCCESS);
1488 1571 }
1489 1572
1490 1573 if ((tran = ddi_get_driver_private(devi)) == NULL)
1491 1574 return (DDI_SUCCESS);
1492 1575
1493 1576 mpt = TRAN2MPT(tran);
1494 1577 if (!mpt) {
1495 1578 return (DDI_SUCCESS);
1496 1579 }
1497 1580
1498 1581 mutex_enter(&mpt->m_mutex);
1499 1582
1500 1583 if (mpt->m_suspended++) {
1501 1584 mutex_exit(&mpt->m_mutex);
1502 1585 return (DDI_SUCCESS);
1503 1586 }
1504 1587
1505 1588 /*
1506 1589 * Cancel timeout threads for this mpt
1507 1590 */
1508 1591 if (mpt->m_quiesce_timeid) {
1509 1592 timeout_id_t tid = mpt->m_quiesce_timeid;
1510 1593 mpt->m_quiesce_timeid = 0;
1511 1594 mutex_exit(&mpt->m_mutex);
1512 1595 (void) untimeout(tid);
1513 1596 mutex_enter(&mpt->m_mutex);
1514 1597 }
1515 1598
1516 1599 if (mpt->m_restart_cmd_timeid) {
1517 1600 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1518 1601 mpt->m_restart_cmd_timeid = 0;
1519 1602 mutex_exit(&mpt->m_mutex);
1520 1603 (void) untimeout(tid);
1521 1604 mutex_enter(&mpt->m_mutex);
1522 1605 }
1523 1606
1524 1607 mutex_exit(&mpt->m_mutex);
1525 1608
1526 1609 (void) pm_idle_component(mpt->m_dip, 0);
1527 1610
1528 1611 /*
1529 1612 * Cancel watch threads if all mpts suspended
1530 1613 */
1531 1614 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1532 1615 for (g = mptsas_head; g != NULL; g = g->m_next) {
1533 1616 if (!g->m_suspended)
1534 1617 break;
1535 1618 }
1536 1619 rw_exit(&mptsas_global_rwlock);
1537 1620
1538 1621 mutex_enter(&mptsas_global_mutex);
1539 1622 if (g == NULL) {
1540 1623 timeout_id_t tid;
1541 1624
1542 1625 mptsas_timeouts_enabled = 0;
1543 1626 if (mptsas_timeout_id) {
1544 1627 tid = mptsas_timeout_id;
1545 1628 mptsas_timeout_id = 0;
1546 1629 mutex_exit(&mptsas_global_mutex);
1547 1630 (void) untimeout(tid);
1548 1631 mutex_enter(&mptsas_global_mutex);
1549 1632 }
1550 1633 if (mptsas_reset_watch) {
1551 1634 tid = mptsas_reset_watch;
1552 1635 mptsas_reset_watch = 0;
1553 1636 mutex_exit(&mptsas_global_mutex);
1554 1637 (void) untimeout(tid);
1555 1638 mutex_enter(&mptsas_global_mutex);
1556 1639 }
1557 1640 }
1558 1641 mutex_exit(&mptsas_global_mutex);
1559 1642
1560 1643 mutex_enter(&mpt->m_mutex);
1561 1644
1562 1645 /*
1563 1646 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1564 1647 */
1565 1648 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1566 1649 (mpt->m_power_level != PM_LEVEL_D0)) {
1567 1650 mutex_exit(&mpt->m_mutex);
1568 1651 return (DDI_SUCCESS);
1569 1652 }
1570 1653
1571 1654 /* Disable HBA interrupts in hardware */
1572 1655 MPTSAS_DISABLE_INTR(mpt);
1573 1656 /*
1574 1657 * Send RAID action system shutdown to sync IR
1575 1658 */
1576 1659 mptsas_raid_action_system_shutdown(mpt);
1577 1660
1578 1661 mutex_exit(&mpt->m_mutex);
1579 1662
1580 1663 /* drain the taskq */
1581 1664 ddi_taskq_wait(mpt->m_event_taskq);
1582 1665 ddi_taskq_wait(mpt->m_dr_taskq);
1583 1666
1584 1667 return (DDI_SUCCESS);
1585 1668 }
1586 1669
1587 1670 #ifdef __sparc
1588 1671 /*ARGSUSED*/
1589 1672 static int
1590 1673 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1591 1674 {
1592 1675 mptsas_t *mpt;
1593 1676 scsi_hba_tran_t *tran;
1594 1677
1595 1678 /*
1596 1679 * If this call is for iport, just return.
1597 1680 */
1598 1681 if (scsi_hba_iport_unit_address(devi))
1599 1682 return (DDI_SUCCESS);
1600 1683
1601 1684 if ((tran = ddi_get_driver_private(devi)) == NULL)
1602 1685 return (DDI_SUCCESS);
1603 1686
1604 1687 if ((mpt = TRAN2MPT(tran)) == NULL)
1605 1688 return (DDI_SUCCESS);
1606 1689
1607 1690 /*
1608 1691 * Send RAID action system shutdown to sync IR. Disable HBA
1609 1692 * interrupts in hardware first.
1610 1693 */
1611 1694 MPTSAS_DISABLE_INTR(mpt);
1612 1695 mptsas_raid_action_system_shutdown(mpt);
1613 1696
1614 1697 return (DDI_SUCCESS);
1615 1698 }
1616 1699 #else /* __sparc */
1617 1700 /*
1618 1701 * quiesce(9E) entry point.
1619 1702 *
1620 1703 * This function is called when the system is single-threaded at high
1621 1704 * PIL with preemption disabled. Therefore, this function must not be
1622 1705 * blocked.
1623 1706 *
1624 1707 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1625 1708 * DDI_FAILURE indicates an error condition and should almost never happen.
1626 1709 */
1627 1710 static int
1628 1711 mptsas_quiesce(dev_info_t *devi)
1629 1712 {
1630 1713 mptsas_t *mpt;
1631 1714 scsi_hba_tran_t *tran;
1632 1715
1633 1716 /*
1634 1717 * If this call is for iport, just return.
1635 1718 */
1636 1719 if (scsi_hba_iport_unit_address(devi))
1637 1720 return (DDI_SUCCESS);
1638 1721
1639 1722 if ((tran = ddi_get_driver_private(devi)) == NULL)
1640 1723 return (DDI_SUCCESS);
1641 1724
1642 1725 if ((mpt = TRAN2MPT(tran)) == NULL)
1643 1726 return (DDI_SUCCESS);
1644 1727
1645 1728 /* Disable HBA interrupts in hardware */
1646 1729 MPTSAS_DISABLE_INTR(mpt);
1647 1730 /* Send RAID action system shutdonw to sync IR */
1648 1731 mptsas_raid_action_system_shutdown(mpt);
1649 1732
1650 1733 return (DDI_SUCCESS);
1651 1734 }
1652 1735 #endif /* __sparc */
1653 1736
1654 1737 /*
1655 1738 * detach(9E). Remove all device allocations and system resources;
1656 1739 * disable device interrupts.
1657 1740 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1658 1741 */
1659 1742 static int
1660 1743 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1661 1744 {
1662 1745 /* CONSTCOND */
1663 1746 ASSERT(NO_COMPETING_THREADS);
1664 1747 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1665 1748
1666 1749 switch (cmd) {
1667 1750 case DDI_DETACH:
1668 1751 return (mptsas_do_detach(devi));
1669 1752
1670 1753 case DDI_SUSPEND:
1671 1754 return (mptsas_suspend(devi));
1672 1755
1673 1756 default:
1674 1757 return (DDI_FAILURE);
1675 1758 }
1676 1759 /* NOTREACHED */
1677 1760 }
1678 1761
1679 1762 static int
1680 1763 mptsas_do_detach(dev_info_t *dip)
1681 1764 {
1682 1765 mptsas_t *mpt;
1683 1766 scsi_hba_tran_t *tran;
1684 1767 int circ = 0;
1685 1768 int circ1 = 0;
1686 1769 mdi_pathinfo_t *pip = NULL;
1687 1770 int i;
1688 1771 int doneq_thread_num = 0;
1689 1772
1690 1773 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1691 1774
1692 1775 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1693 1776 return (DDI_FAILURE);
1694 1777
1695 1778 mpt = TRAN2MPT(tran);
1696 1779 if (!mpt) {
1697 1780 return (DDI_FAILURE);
1698 1781 }
1699 1782 /*
1700 1783 * Still have pathinfo child, should not detach mpt driver
1701 1784 */
1702 1785 if (scsi_hba_iport_unit_address(dip)) {
1703 1786 if (mpt->m_mpxio_enable) {
1704 1787 /*
1705 1788 * MPxIO enabled for the iport
1706 1789 */
1707 1790 ndi_devi_enter(scsi_vhci_dip, &circ1);
1708 1791 ndi_devi_enter(dip, &circ);
1709 1792 while (pip = mdi_get_next_client_path(dip, NULL)) {
1710 1793 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1711 1794 continue;
1712 1795 }
1713 1796 ndi_devi_exit(dip, circ);
1714 1797 ndi_devi_exit(scsi_vhci_dip, circ1);
1715 1798 NDBG12(("detach failed because of "
1716 1799 "outstanding path info"));
1717 1800 return (DDI_FAILURE);
1718 1801 }
1719 1802 ndi_devi_exit(dip, circ);
1720 1803 ndi_devi_exit(scsi_vhci_dip, circ1);
1721 1804 (void) mdi_phci_unregister(dip, 0);
1722 1805 }
1723 1806
1724 1807 ddi_prop_remove_all(dip);
1725 1808
1726 1809 return (DDI_SUCCESS);
1727 1810 }
1728 1811
1729 1812 /* Make sure power level is D0 before accessing registers */
1730 1813 if (mpt->m_options & MPTSAS_OPT_PM) {
1731 1814 (void) pm_busy_component(dip, 0);
1732 1815 if (mpt->m_power_level != PM_LEVEL_D0) {
1733 1816 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1734 1817 DDI_SUCCESS) {
1735 1818 mptsas_log(mpt, CE_WARN,
1736 1819 "mptsas%d: Raise power request failed.",
1737 1820 mpt->m_instance);
1738 1821 (void) pm_idle_component(dip, 0);
1739 1822 return (DDI_FAILURE);
1740 1823 }
1741 1824 }
1742 1825 }
1743 1826
1744 1827 /*
1745 1828 * Send RAID action system shutdown to sync IR. After action, send a
1746 1829 * Message Unit Reset. Since after that DMA resource will be freed,
1747 1830 * set ioc to READY state will avoid HBA initiated DMA operation.
1748 1831 */
1749 1832 mutex_enter(&mpt->m_mutex);
1750 1833 MPTSAS_DISABLE_INTR(mpt);
1751 1834 mptsas_raid_action_system_shutdown(mpt);
1752 1835 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1753 1836 (void) mptsas_ioc_reset(mpt, FALSE);
1754 1837 mutex_exit(&mpt->m_mutex);
1755 1838 mptsas_rem_intrs(mpt);
1756 1839 ddi_taskq_destroy(mpt->m_event_taskq);
1757 1840 ddi_taskq_destroy(mpt->m_dr_taskq);
1758 1841
1759 1842 if (mpt->m_doneq_thread_n) {
1760 1843 mutex_enter(&mpt->m_doneq_mutex);
1761 1844 doneq_thread_num = mpt->m_doneq_thread_n;
1762 1845 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1763 1846 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1764 1847 mpt->m_doneq_thread_id[i].flag &=
1765 1848 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1766 1849 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1767 1850 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1768 1851 }
1769 1852 while (mpt->m_doneq_thread_n) {
1770 1853 cv_wait(&mpt->m_doneq_thread_cv,
1771 1854 &mpt->m_doneq_mutex);
1772 1855 }
1773 1856 for (i = 0; i < doneq_thread_num; i++) {
1774 1857 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1775 1858 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1776 1859 }
1777 1860 kmem_free(mpt->m_doneq_thread_id,
1778 1861 sizeof (mptsas_doneq_thread_list_t)
1779 1862 * doneq_thread_num);
1780 1863 mutex_exit(&mpt->m_doneq_mutex);
1781 1864 cv_destroy(&mpt->m_doneq_thread_cv);
1782 1865 mutex_destroy(&mpt->m_doneq_mutex);
1783 1866 }
1784 1867
1785 1868 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1786 1869
1787 1870 mptsas_list_del(mpt);
1788 1871
1789 1872 /*
1790 1873 * Cancel timeout threads for this mpt
1791 1874 */
1792 1875 mutex_enter(&mpt->m_mutex);
1793 1876 if (mpt->m_quiesce_timeid) {
1794 1877 timeout_id_t tid = mpt->m_quiesce_timeid;
1795 1878 mpt->m_quiesce_timeid = 0;
1796 1879 mutex_exit(&mpt->m_mutex);
1797 1880 (void) untimeout(tid);
1798 1881 mutex_enter(&mpt->m_mutex);
1799 1882 }
1800 1883
1801 1884 if (mpt->m_restart_cmd_timeid) {
1802 1885 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1803 1886 mpt->m_restart_cmd_timeid = 0;
1804 1887 mutex_exit(&mpt->m_mutex);
1805 1888 (void) untimeout(tid);
1806 1889 mutex_enter(&mpt->m_mutex);
1807 1890 }
1808 1891
1809 1892 mutex_exit(&mpt->m_mutex);
1810 1893
1811 1894 /*
1812 1895 * last mpt? ... if active, CANCEL watch threads.
1813 1896 */
1814 1897 mutex_enter(&mptsas_global_mutex);
1815 1898 if (mptsas_head == NULL) {
1816 1899 timeout_id_t tid;
1817 1900 /*
1818 1901 * Clear mptsas_timeouts_enable so that the watch thread
1819 1902 * gets restarted on DDI_ATTACH
1820 1903 */
1821 1904 mptsas_timeouts_enabled = 0;
1822 1905 if (mptsas_timeout_id) {
1823 1906 tid = mptsas_timeout_id;
1824 1907 mptsas_timeout_id = 0;
1825 1908 mutex_exit(&mptsas_global_mutex);
1826 1909 (void) untimeout(tid);
1827 1910 mutex_enter(&mptsas_global_mutex);
1828 1911 }
1829 1912 if (mptsas_reset_watch) {
1830 1913 tid = mptsas_reset_watch;
1831 1914 mptsas_reset_watch = 0;
1832 1915 mutex_exit(&mptsas_global_mutex);
1833 1916 (void) untimeout(tid);
|
↓ open down ↓ |
412 lines elided |
↑ open up ↑ |
1834 1917 mutex_enter(&mptsas_global_mutex);
1835 1918 }
1836 1919 }
1837 1920 mutex_exit(&mptsas_global_mutex);
1838 1921
1839 1922 /*
1840 1923 * Delete Phy stats
1841 1924 */
1842 1925 mptsas_destroy_phy_stats(mpt);
1843 1926
1927 + mptsas_destroy_hashes(mpt);
1928 +
1844 1929 /*
1845 1930 * Delete nt_active.
1846 1931 */
1847 1932 mutex_enter(&mpt->m_mutex);
1848 - mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1849 - mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1850 1933 mptsas_free_active_slots(mpt);
1851 1934 mutex_exit(&mpt->m_mutex);
1852 1935
1853 1936 /* deallocate everything that was allocated in mptsas_attach */
1854 1937 mptsas_cache_destroy(mpt);
1855 1938
1856 1939 mptsas_hba_fini(mpt);
1857 1940 mptsas_cfg_fini(mpt);
1858 1941
1859 1942 /* Lower the power informing PM Framework */
1860 1943 if (mpt->m_options & MPTSAS_OPT_PM) {
1861 1944 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1862 1945 mptsas_log(mpt, CE_WARN,
1863 1946 "!mptsas%d: Lower power request failed "
1864 1947 "during detach, ignoring.",
1865 1948 mpt->m_instance);
1866 1949 }
1867 1950
1868 1951 mutex_destroy(&mpt->m_tx_waitq_mutex);
1869 1952 mutex_destroy(&mpt->m_passthru_mutex);
1870 1953 mutex_destroy(&mpt->m_mutex);
1871 1954 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1872 1955 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1873 1956 }
1874 1957 cv_destroy(&mpt->m_cv);
1875 1958 cv_destroy(&mpt->m_passthru_cv);
1876 1959 cv_destroy(&mpt->m_fw_cv);
1877 1960 cv_destroy(&mpt->m_config_cv);
1878 1961 cv_destroy(&mpt->m_fw_diag_cv);
1879 1962
1880 1963
1881 1964 mptsas_smp_teardown(mpt);
1882 1965 mptsas_hba_teardown(mpt);
1883 1966
1884 1967 mptsas_config_space_fini(mpt);
1885 1968
1886 1969 mptsas_free_handshake_msg(mpt);
1887 1970
1888 1971 mptsas_fm_fini(mpt);
1889 1972 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1890 1973 ddi_prop_remove_all(dip);
1891 1974
1892 1975 return (DDI_SUCCESS);
1893 1976 }
1894 1977
1895 1978 static void
1896 1979 mptsas_list_add(mptsas_t *mpt)
1897 1980 {
1898 1981 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1899 1982
1900 1983 if (mptsas_head == NULL) {
1901 1984 mptsas_head = mpt;
1902 1985 } else {
1903 1986 mptsas_tail->m_next = mpt;
1904 1987 }
1905 1988 mptsas_tail = mpt;
1906 1989 rw_exit(&mptsas_global_rwlock);
1907 1990 }
1908 1991
1909 1992 static void
1910 1993 mptsas_list_del(mptsas_t *mpt)
1911 1994 {
1912 1995 mptsas_t *m;
1913 1996 /*
1914 1997 * Remove device instance from the global linked list
1915 1998 */
1916 1999 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1917 2000 if (mptsas_head == mpt) {
1918 2001 m = mptsas_head = mpt->m_next;
1919 2002 } else {
1920 2003 for (m = mptsas_head; m != NULL; m = m->m_next) {
1921 2004 if (m->m_next == mpt) {
1922 2005 m->m_next = mpt->m_next;
1923 2006 break;
1924 2007 }
1925 2008 }
1926 2009 if (m == NULL) {
1927 2010 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1928 2011 }
1929 2012 }
1930 2013
1931 2014 if (mptsas_tail == mpt) {
1932 2015 mptsas_tail = m;
1933 2016 }
1934 2017 rw_exit(&mptsas_global_rwlock);
1935 2018 }
1936 2019
1937 2020 static int
1938 2021 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1939 2022 {
1940 2023 ddi_dma_attr_t task_dma_attrs;
1941 2024
1942 2025 mpt->m_hshk_dma_size = 0;
1943 2026 task_dma_attrs = mpt->m_msg_dma_attr;
1944 2027 task_dma_attrs.dma_attr_sgllen = 1;
1945 2028 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1946 2029
1947 2030 /* allocate Task Management ddi_dma resources */
1948 2031 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1949 2032 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1950 2033 alloc_size, NULL) == FALSE) {
1951 2034 return (DDI_FAILURE);
1952 2035 }
1953 2036 mpt->m_hshk_dma_size = alloc_size;
1954 2037
1955 2038 return (DDI_SUCCESS);
1956 2039 }
1957 2040
1958 2041 static void
1959 2042 mptsas_free_handshake_msg(mptsas_t *mpt)
1960 2043 {
1961 2044 if (mpt->m_hshk_dma_size == 0)
1962 2045 return;
1963 2046 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1964 2047 mpt->m_hshk_dma_size = 0;
1965 2048 }
1966 2049
1967 2050 static int
1968 2051 mptsas_hba_setup(mptsas_t *mpt)
1969 2052 {
1970 2053 scsi_hba_tran_t *hba_tran;
1971 2054 int tran_flags;
1972 2055
1973 2056 /* Allocate a transport structure */
1974 2057 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1975 2058 SCSI_HBA_CANSLEEP);
1976 2059 ASSERT(mpt->m_tran != NULL);
1977 2060
1978 2061 hba_tran->tran_hba_private = mpt;
1979 2062 hba_tran->tran_tgt_private = NULL;
1980 2063
1981 2064 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1982 2065 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1983 2066
1984 2067 hba_tran->tran_start = mptsas_scsi_start;
1985 2068 hba_tran->tran_reset = mptsas_scsi_reset;
1986 2069 hba_tran->tran_abort = mptsas_scsi_abort;
1987 2070 hba_tran->tran_getcap = mptsas_scsi_getcap;
1988 2071 hba_tran->tran_setcap = mptsas_scsi_setcap;
1989 2072 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
1990 2073 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
1991 2074
1992 2075 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
1993 2076 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
1994 2077 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
1995 2078
1996 2079 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
1997 2080 hba_tran->tran_get_name = mptsas_get_name;
1998 2081
1999 2082 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2000 2083 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2001 2084 hba_tran->tran_bus_reset = NULL;
2002 2085
2003 2086 hba_tran->tran_add_eventcall = NULL;
2004 2087 hba_tran->tran_get_eventcookie = NULL;
2005 2088 hba_tran->tran_post_event = NULL;
2006 2089 hba_tran->tran_remove_eventcall = NULL;
2007 2090
2008 2091 hba_tran->tran_bus_config = mptsas_bus_config;
2009 2092
2010 2093 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2011 2094
2012 2095 /*
2013 2096 * All children of the HBA are iports. We need tran was cloned.
2014 2097 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2015 2098 * inherited to iport's tran vector.
2016 2099 */
2017 2100 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2018 2101
2019 2102 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2020 2103 hba_tran, tran_flags) != DDI_SUCCESS) {
2021 2104 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2022 2105 scsi_hba_tran_free(hba_tran);
2023 2106 mpt->m_tran = NULL;
2024 2107 return (FALSE);
2025 2108 }
2026 2109 return (TRUE);
2027 2110 }
2028 2111
2029 2112 static void
2030 2113 mptsas_hba_teardown(mptsas_t *mpt)
2031 2114 {
2032 2115 (void) scsi_hba_detach(mpt->m_dip);
2033 2116 if (mpt->m_tran != NULL) {
2034 2117 scsi_hba_tran_free(mpt->m_tran);
2035 2118 mpt->m_tran = NULL;
2036 2119 }
2037 2120 }
2038 2121
2039 2122 static void
2040 2123 mptsas_iport_register(mptsas_t *mpt)
2041 2124 {
2042 2125 int i, j;
2043 2126 mptsas_phymask_t mask = 0x0;
2044 2127 /*
2045 2128 * initial value of mask is 0
2046 2129 */
2047 2130 mutex_enter(&mpt->m_mutex);
2048 2131 for (i = 0; i < mpt->m_num_phys; i++) {
2049 2132 mptsas_phymask_t phy_mask = 0x0;
2050 2133 char phy_mask_name[MPTSAS_MAX_PHYS];
2051 2134 uint8_t current_port;
2052 2135
2053 2136 if (mpt->m_phy_info[i].attached_devhdl == 0)
2054 2137 continue;
2055 2138
2056 2139 bzero(phy_mask_name, sizeof (phy_mask_name));
2057 2140
2058 2141 current_port = mpt->m_phy_info[i].port_num;
2059 2142
2060 2143 if ((mask & (1 << i)) != 0)
2061 2144 continue;
2062 2145
2063 2146 for (j = 0; j < mpt->m_num_phys; j++) {
2064 2147 if (mpt->m_phy_info[j].attached_devhdl &&
2065 2148 (mpt->m_phy_info[j].port_num == current_port)) {
2066 2149 phy_mask |= (1 << j);
2067 2150 }
2068 2151 }
2069 2152 mask = mask | phy_mask;
2070 2153
2071 2154 for (j = 0; j < mpt->m_num_phys; j++) {
2072 2155 if ((phy_mask >> j) & 0x01) {
2073 2156 mpt->m_phy_info[j].phy_mask = phy_mask;
2074 2157 }
2075 2158 }
2076 2159
2077 2160 (void) sprintf(phy_mask_name, "%x", phy_mask);
2078 2161
2079 2162 mutex_exit(&mpt->m_mutex);
2080 2163 /*
2081 2164 * register a iport
2082 2165 */
2083 2166 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2084 2167 mutex_enter(&mpt->m_mutex);
2085 2168 }
2086 2169 mutex_exit(&mpt->m_mutex);
2087 2170 /*
2088 2171 * register a virtual port for RAID volume always
2089 2172 */
2090 2173 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2091 2174
2092 2175 }
2093 2176
2094 2177 static int
2095 2178 mptsas_smp_setup(mptsas_t *mpt)
2096 2179 {
2097 2180 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2098 2181 ASSERT(mpt->m_smptran != NULL);
2099 2182 mpt->m_smptran->smp_tran_hba_private = mpt;
|
↓ open down ↓ |
240 lines elided |
↑ open up ↑ |
2100 2183 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2101 2184 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2102 2185 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2103 2186 smp_hba_tran_free(mpt->m_smptran);
2104 2187 mpt->m_smptran = NULL;
2105 2188 return (FALSE);
2106 2189 }
2107 2190 /*
2108 2191 * Initialize smp hash table
2109 2192 */
2110 - mptsas_hash_init(&mpt->m_active->m_smptbl);
2193 + mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2194 + mptsas_target_addr_hash, mptsas_target_addr_cmp,
2195 + mptsas_smp_free, sizeof (mptsas_smp_t),
2196 + offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2197 + KM_SLEEP);
2111 2198 mpt->m_smp_devhdl = 0xFFFF;
2112 2199
2113 2200 return (TRUE);
2114 2201 }
2115 2202
2116 2203 static void
2117 2204 mptsas_smp_teardown(mptsas_t *mpt)
2118 2205 {
2119 2206 (void) smp_hba_detach(mpt->m_dip);
2120 2207 if (mpt->m_smptran != NULL) {
2121 2208 smp_hba_tran_free(mpt->m_smptran);
2122 2209 mpt->m_smptran = NULL;
2123 2210 }
2124 2211 mpt->m_smp_devhdl = 0;
2125 2212 }
2126 2213
2127 2214 static int
2128 2215 mptsas_cache_create(mptsas_t *mpt)
2129 2216 {
2130 2217 int instance = mpt->m_instance;
2131 2218 char buf[64];
2132 2219
2133 2220 /*
2134 2221 * create kmem cache for packets
2135 2222 */
2136 2223 (void) sprintf(buf, "mptsas%d_cache", instance);
2137 2224 mpt->m_kmem_cache = kmem_cache_create(buf,
2138 2225 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2139 2226 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2140 2227 NULL, (void *)mpt, NULL, 0);
2141 2228
2142 2229 if (mpt->m_kmem_cache == NULL) {
2143 2230 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2144 2231 return (FALSE);
2145 2232 }
2146 2233
2147 2234 /*
2148 2235 * create kmem cache for extra SGL frames if SGL cannot
2149 2236 * be accomodated into main request frame.
2150 2237 */
2151 2238 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2152 2239 mpt->m_cache_frames = kmem_cache_create(buf,
2153 2240 sizeof (mptsas_cache_frames_t), 8,
2154 2241 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2155 2242 NULL, (void *)mpt, NULL, 0);
2156 2243
2157 2244 if (mpt->m_cache_frames == NULL) {
2158 2245 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2159 2246 return (FALSE);
2160 2247 }
2161 2248
2162 2249 return (TRUE);
2163 2250 }
2164 2251
2165 2252 static void
2166 2253 mptsas_cache_destroy(mptsas_t *mpt)
2167 2254 {
2168 2255 /* deallocate in reverse order */
2169 2256 if (mpt->m_cache_frames) {
2170 2257 kmem_cache_destroy(mpt->m_cache_frames);
2171 2258 mpt->m_cache_frames = NULL;
2172 2259 }
2173 2260 if (mpt->m_kmem_cache) {
2174 2261 kmem_cache_destroy(mpt->m_kmem_cache);
2175 2262 mpt->m_kmem_cache = NULL;
2176 2263 }
2177 2264 }
2178 2265
2179 2266 static int
2180 2267 mptsas_power(dev_info_t *dip, int component, int level)
2181 2268 {
2182 2269 #ifndef __lock_lint
2183 2270 _NOTE(ARGUNUSED(component))
2184 2271 #endif
2185 2272 mptsas_t *mpt;
2186 2273 int rval = DDI_SUCCESS;
2187 2274 int polls = 0;
2188 2275 uint32_t ioc_status;
2189 2276
2190 2277 if (scsi_hba_iport_unit_address(dip) != 0)
2191 2278 return (DDI_SUCCESS);
2192 2279
2193 2280 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2194 2281 if (mpt == NULL) {
2195 2282 return (DDI_FAILURE);
2196 2283 }
2197 2284
2198 2285 mutex_enter(&mpt->m_mutex);
2199 2286
2200 2287 /*
2201 2288 * If the device is busy, don't lower its power level
2202 2289 */
2203 2290 if (mpt->m_busy && (mpt->m_power_level > level)) {
2204 2291 mutex_exit(&mpt->m_mutex);
2205 2292 return (DDI_FAILURE);
2206 2293 }
2207 2294 switch (level) {
2208 2295 case PM_LEVEL_D0:
2209 2296 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2210 2297 MPTSAS_POWER_ON(mpt);
2211 2298 /*
2212 2299 * Wait up to 30 seconds for IOC to come out of reset.
2213 2300 */
2214 2301 while (((ioc_status = ddi_get32(mpt->m_datap,
2215 2302 &mpt->m_reg->Doorbell)) &
2216 2303 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2217 2304 if (polls++ > 3000) {
2218 2305 break;
2219 2306 }
2220 2307 delay(drv_usectohz(10000));
2221 2308 }
2222 2309 /*
2223 2310 * If IOC is not in operational state, try to hard reset it.
2224 2311 */
2225 2312 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2226 2313 MPI2_IOC_STATE_OPERATIONAL) {
2227 2314 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2228 2315 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2229 2316 mptsas_log(mpt, CE_WARN,
2230 2317 "mptsas_power: hard reset failed");
2231 2318 mutex_exit(&mpt->m_mutex);
2232 2319 return (DDI_FAILURE);
2233 2320 }
2234 2321 }
2235 2322 mpt->m_power_level = PM_LEVEL_D0;
2236 2323 break;
2237 2324 case PM_LEVEL_D3:
2238 2325 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2239 2326 MPTSAS_POWER_OFF(mpt);
2240 2327 break;
2241 2328 default:
2242 2329 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2243 2330 mpt->m_instance, level);
2244 2331 rval = DDI_FAILURE;
2245 2332 break;
2246 2333 }
2247 2334 mutex_exit(&mpt->m_mutex);
2248 2335 return (rval);
2249 2336 }
2250 2337
2251 2338 /*
2252 2339 * Initialize configuration space and figure out which
2253 2340 * chip and revison of the chip the mpt driver is using.
2254 2341 */
2255 2342 static int
2256 2343 mptsas_config_space_init(mptsas_t *mpt)
2257 2344 {
2258 2345 NDBG0(("mptsas_config_space_init"));
2259 2346
2260 2347 if (mpt->m_config_handle != NULL)
2261 2348 return (TRUE);
2262 2349
2263 2350 if (pci_config_setup(mpt->m_dip,
2264 2351 &mpt->m_config_handle) != DDI_SUCCESS) {
2265 2352 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2266 2353 return (FALSE);
2267 2354 }
2268 2355
2269 2356 /*
2270 2357 * This is a workaround for a XMITS ASIC bug which does not
2271 2358 * drive the CBE upper bits.
2272 2359 */
2273 2360 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2274 2361 PCI_STAT_PERROR) {
2275 2362 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2276 2363 PCI_STAT_PERROR);
2277 2364 }
2278 2365
2279 2366 mptsas_setup_cmd_reg(mpt);
2280 2367
2281 2368 /*
2282 2369 * Get the chip device id:
2283 2370 */
2284 2371 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2285 2372
2286 2373 /*
2287 2374 * Save the revision.
2288 2375 */
2289 2376 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2290 2377
2291 2378 /*
2292 2379 * Save the SubSystem Vendor and Device IDs
2293 2380 */
2294 2381 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2295 2382 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2296 2383
2297 2384 /*
2298 2385 * Set the latency timer to 0x40 as specified by the upa -> pci
2299 2386 * bridge chip design team. This may be done by the sparc pci
2300 2387 * bus nexus driver, but the driver should make sure the latency
2301 2388 * timer is correct for performance reasons.
2302 2389 */
2303 2390 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2304 2391 MPTSAS_LATENCY_TIMER);
2305 2392
2306 2393 (void) mptsas_get_pci_cap(mpt);
2307 2394 return (TRUE);
2308 2395 }
2309 2396
2310 2397 static void
2311 2398 mptsas_config_space_fini(mptsas_t *mpt)
2312 2399 {
2313 2400 if (mpt->m_config_handle != NULL) {
2314 2401 mptsas_disable_bus_master(mpt);
2315 2402 pci_config_teardown(&mpt->m_config_handle);
2316 2403 mpt->m_config_handle = NULL;
2317 2404 }
2318 2405 }
2319 2406
2320 2407 static void
2321 2408 mptsas_setup_cmd_reg(mptsas_t *mpt)
2322 2409 {
2323 2410 ushort_t cmdreg;
2324 2411
2325 2412 /*
2326 2413 * Set the command register to the needed values.
2327 2414 */
2328 2415 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2329 2416 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2330 2417 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2331 2418 cmdreg &= ~PCI_COMM_IO;
2332 2419 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2333 2420 }
2334 2421
2335 2422 static void
2336 2423 mptsas_disable_bus_master(mptsas_t *mpt)
2337 2424 {
2338 2425 ushort_t cmdreg;
2339 2426
2340 2427 /*
2341 2428 * Clear the master enable bit in the PCI command register.
2342 2429 * This prevents any bus mastering activity like DMA.
2343 2430 */
2344 2431 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2345 2432 cmdreg &= ~PCI_COMM_ME;
2346 2433 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2347 2434 }
2348 2435
2349 2436 int
2350 2437 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2351 2438 {
2352 2439 ddi_dma_attr_t attrs;
2353 2440
2354 2441 attrs = mpt->m_io_dma_attr;
2355 2442 attrs.dma_attr_sgllen = 1;
2356 2443
2357 2444 ASSERT(dma_statep != NULL);
2358 2445
2359 2446 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2360 2447 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2361 2448 &dma_statep->cookie) == FALSE) {
2362 2449 return (DDI_FAILURE);
2363 2450 }
2364 2451
2365 2452 return (DDI_SUCCESS);
2366 2453 }
2367 2454
2368 2455 void
2369 2456 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2370 2457 {
2371 2458 ASSERT(dma_statep != NULL);
2372 2459 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2373 2460 dma_statep->size = 0;
2374 2461 }
2375 2462
2376 2463 int
2377 2464 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2378 2465 {
2379 2466 ddi_dma_attr_t attrs;
2380 2467 ddi_dma_handle_t dma_handle;
2381 2468 caddr_t memp;
2382 2469 ddi_acc_handle_t accessp;
2383 2470 int rval;
2384 2471
2385 2472 ASSERT(mutex_owned(&mpt->m_mutex));
2386 2473
2387 2474 attrs = mpt->m_msg_dma_attr;
2388 2475 attrs.dma_attr_sgllen = 1;
2389 2476 attrs.dma_attr_granular = size;
2390 2477
2391 2478 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2392 2479 &accessp, &memp, size, NULL) == FALSE) {
2393 2480 return (DDI_FAILURE);
2394 2481 }
2395 2482
2396 2483 rval = (*callback) (mpt, memp, var, accessp);
2397 2484
2398 2485 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2399 2486 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2400 2487 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2401 2488 rval = DDI_FAILURE;
2402 2489 }
2403 2490
2404 2491 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2405 2492 return (rval);
2406 2493
2407 2494 }
2408 2495
2409 2496 static int
2410 2497 mptsas_alloc_request_frames(mptsas_t *mpt)
2411 2498 {
2412 2499 ddi_dma_attr_t frame_dma_attrs;
2413 2500 caddr_t memp;
2414 2501 ddi_dma_cookie_t cookie;
2415 2502 size_t mem_size;
2416 2503
2417 2504 /*
2418 2505 * re-alloc when it has already alloced
2419 2506 */
2420 2507 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2421 2508 &mpt->m_acc_req_frame_hdl);
2422 2509
2423 2510 /*
2424 2511 * The size of the request frame pool is:
2425 2512 * Number of Request Frames * Request Frame Size
2426 2513 */
2427 2514 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2428 2515
2429 2516 /*
2430 2517 * set the DMA attributes. System Request Message Frames must be
2431 2518 * aligned on a 16-byte boundry.
2432 2519 */
2433 2520 frame_dma_attrs = mpt->m_msg_dma_attr;
2434 2521 frame_dma_attrs.dma_attr_align = 16;
2435 2522 frame_dma_attrs.dma_attr_sgllen = 1;
2436 2523
2437 2524 /*
2438 2525 * allocate the request frame pool.
2439 2526 */
2440 2527 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2441 2528 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2442 2529 mem_size, &cookie) == FALSE) {
2443 2530 return (DDI_FAILURE);
2444 2531 }
2445 2532
2446 2533 /*
2447 2534 * Store the request frame memory address. This chip uses this
2448 2535 * address to dma to and from the driver's frame. The second
2449 2536 * address is the address mpt uses to fill in the frame.
2450 2537 */
2451 2538 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2452 2539 mpt->m_req_frame = memp;
2453 2540
2454 2541 /*
2455 2542 * Clear the request frame pool.
2456 2543 */
2457 2544 bzero(mpt->m_req_frame, mem_size);
2458 2545
2459 2546 return (DDI_SUCCESS);
2460 2547 }
2461 2548
2462 2549 static int
2463 2550 mptsas_alloc_reply_frames(mptsas_t *mpt)
2464 2551 {
2465 2552 ddi_dma_attr_t frame_dma_attrs;
2466 2553 caddr_t memp;
2467 2554 ddi_dma_cookie_t cookie;
2468 2555 size_t mem_size;
2469 2556
2470 2557 /*
2471 2558 * re-alloc when it has already alloced
2472 2559 */
2473 2560 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2474 2561 &mpt->m_acc_reply_frame_hdl);
2475 2562
2476 2563 /*
2477 2564 * The size of the reply frame pool is:
2478 2565 * Number of Reply Frames * Reply Frame Size
2479 2566 */
2480 2567 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2481 2568
2482 2569 /*
2483 2570 * set the DMA attributes. System Reply Message Frames must be
2484 2571 * aligned on a 4-byte boundry. This is the default.
2485 2572 */
2486 2573 frame_dma_attrs = mpt->m_msg_dma_attr;
2487 2574 frame_dma_attrs.dma_attr_sgllen = 1;
2488 2575
2489 2576 /*
2490 2577 * allocate the reply frame pool
2491 2578 */
2492 2579 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2493 2580 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2494 2581 mem_size, &cookie) == FALSE) {
2495 2582 return (DDI_FAILURE);
2496 2583 }
2497 2584
2498 2585 /*
2499 2586 * Store the reply frame memory address. This chip uses this
2500 2587 * address to dma to and from the driver's frame. The second
2501 2588 * address is the address mpt uses to process the frame.
2502 2589 */
2503 2590 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2504 2591 mpt->m_reply_frame = memp;
2505 2592
2506 2593 /*
2507 2594 * Clear the reply frame pool.
2508 2595 */
2509 2596 bzero(mpt->m_reply_frame, mem_size);
2510 2597
2511 2598 return (DDI_SUCCESS);
2512 2599 }
2513 2600
2514 2601 static int
2515 2602 mptsas_alloc_free_queue(mptsas_t *mpt)
2516 2603 {
2517 2604 ddi_dma_attr_t frame_dma_attrs;
2518 2605 caddr_t memp;
2519 2606 ddi_dma_cookie_t cookie;
2520 2607 size_t mem_size;
2521 2608
2522 2609 /*
2523 2610 * re-alloc when it has already alloced
2524 2611 */
2525 2612 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2526 2613 &mpt->m_acc_free_queue_hdl);
2527 2614
2528 2615 /*
2529 2616 * The reply free queue size is:
2530 2617 * Reply Free Queue Depth * 4
2531 2618 * The "4" is the size of one 32 bit address (low part of 64-bit
2532 2619 * address)
2533 2620 */
2534 2621 mem_size = mpt->m_free_queue_depth * 4;
2535 2622
2536 2623 /*
2537 2624 * set the DMA attributes The Reply Free Queue must be aligned on a
2538 2625 * 16-byte boundry.
2539 2626 */
2540 2627 frame_dma_attrs = mpt->m_msg_dma_attr;
2541 2628 frame_dma_attrs.dma_attr_align = 16;
2542 2629 frame_dma_attrs.dma_attr_sgllen = 1;
2543 2630
2544 2631 /*
2545 2632 * allocate the reply free queue
2546 2633 */
2547 2634 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2548 2635 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2549 2636 mem_size, &cookie) == FALSE) {
2550 2637 return (DDI_FAILURE);
2551 2638 }
2552 2639
2553 2640 /*
2554 2641 * Store the reply free queue memory address. This chip uses this
2555 2642 * address to read from the reply free queue. The second address
2556 2643 * is the address mpt uses to manage the queue.
2557 2644 */
2558 2645 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2559 2646 mpt->m_free_queue = memp;
2560 2647
2561 2648 /*
2562 2649 * Clear the reply free queue memory.
2563 2650 */
2564 2651 bzero(mpt->m_free_queue, mem_size);
2565 2652
2566 2653 return (DDI_SUCCESS);
2567 2654 }
2568 2655
2569 2656 static int
2570 2657 mptsas_alloc_post_queue(mptsas_t *mpt)
2571 2658 {
2572 2659 ddi_dma_attr_t frame_dma_attrs;
2573 2660 caddr_t memp;
2574 2661 ddi_dma_cookie_t cookie;
2575 2662 size_t mem_size;
2576 2663
2577 2664 /*
2578 2665 * re-alloc when it has already alloced
2579 2666 */
2580 2667 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2581 2668 &mpt->m_acc_post_queue_hdl);
2582 2669
2583 2670 /*
2584 2671 * The reply descriptor post queue size is:
2585 2672 * Reply Descriptor Post Queue Depth * 8
2586 2673 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2587 2674 */
2588 2675 mem_size = mpt->m_post_queue_depth * 8;
2589 2676
2590 2677 /*
2591 2678 * set the DMA attributes. The Reply Descriptor Post Queue must be
2592 2679 * aligned on a 16-byte boundry.
2593 2680 */
2594 2681 frame_dma_attrs = mpt->m_msg_dma_attr;
2595 2682 frame_dma_attrs.dma_attr_align = 16;
2596 2683 frame_dma_attrs.dma_attr_sgllen = 1;
2597 2684
2598 2685 /*
2599 2686 * allocate the reply post queue
2600 2687 */
2601 2688 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2602 2689 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2603 2690 mem_size, &cookie) == FALSE) {
2604 2691 return (DDI_FAILURE);
2605 2692 }
2606 2693
2607 2694 /*
2608 2695 * Store the reply descriptor post queue memory address. This chip
2609 2696 * uses this address to write to the reply descriptor post queue. The
2610 2697 * second address is the address mpt uses to manage the queue.
2611 2698 */
2612 2699 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2613 2700 mpt->m_post_queue = memp;
2614 2701
2615 2702 /*
2616 2703 * Clear the reply post queue memory.
2617 2704 */
2618 2705 bzero(mpt->m_post_queue, mem_size);
2619 2706
2620 2707 return (DDI_SUCCESS);
2621 2708 }
2622 2709
2623 2710 static void
2624 2711 mptsas_alloc_reply_args(mptsas_t *mpt)
2625 2712 {
2626 2713 if (mpt->m_replyh_args == NULL) {
2627 2714 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2628 2715 mpt->m_max_replies, KM_SLEEP);
2629 2716 }
2630 2717 }
2631 2718
2632 2719 static int
2633 2720 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2634 2721 {
2635 2722 mptsas_cache_frames_t *frames = NULL;
2636 2723 if (cmd->cmd_extra_frames == NULL) {
2637 2724 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2638 2725 if (frames == NULL) {
2639 2726 return (DDI_FAILURE);
2640 2727 }
2641 2728 cmd->cmd_extra_frames = frames;
2642 2729 }
2643 2730 return (DDI_SUCCESS);
2644 2731 }
2645 2732
2646 2733 static void
2647 2734 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2648 2735 {
2649 2736 if (cmd->cmd_extra_frames) {
2650 2737 kmem_cache_free(mpt->m_cache_frames,
2651 2738 (void *)cmd->cmd_extra_frames);
2652 2739 cmd->cmd_extra_frames = NULL;
2653 2740 }
2654 2741 }
2655 2742
2656 2743 static void
2657 2744 mptsas_cfg_fini(mptsas_t *mpt)
2658 2745 {
2659 2746 NDBG0(("mptsas_cfg_fini"));
2660 2747 ddi_regs_map_free(&mpt->m_datap);
2661 2748 }
2662 2749
2663 2750 static void
2664 2751 mptsas_hba_fini(mptsas_t *mpt)
2665 2752 {
2666 2753 NDBG0(("mptsas_hba_fini"));
2667 2754
2668 2755 /*
2669 2756 * Free up any allocated memory
2670 2757 */
2671 2758 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2672 2759 &mpt->m_acc_req_frame_hdl);
2673 2760
2674 2761 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2675 2762 &mpt->m_acc_reply_frame_hdl);
2676 2763
2677 2764 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2678 2765 &mpt->m_acc_free_queue_hdl);
2679 2766
2680 2767 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2681 2768 &mpt->m_acc_post_queue_hdl);
2682 2769
2683 2770 if (mpt->m_replyh_args != NULL) {
2684 2771 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2685 2772 * mpt->m_max_replies);
2686 2773 }
2687 2774 }
2688 2775
2689 2776 static int
2690 2777 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2691 2778 {
2692 2779 int lun = 0;
2693 2780 char *sas_wwn = NULL;
2694 2781 int phynum = -1;
2695 2782 int reallen = 0;
2696 2783
2697 2784 /* Get the target num */
2698 2785 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2699 2786 LUN_PROP, 0);
2700 2787
2701 2788 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2702 2789 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2703 2790 /*
2704 2791 * Stick in the address of form "pPHY,LUN"
2705 2792 */
2706 2793 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2707 2794 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2708 2795 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2709 2796 == DDI_PROP_SUCCESS) {
2710 2797 /*
2711 2798 * Stick in the address of the form "wWWN,LUN"
2712 2799 */
2713 2800 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2714 2801 ddi_prop_free(sas_wwn);
2715 2802 } else {
2716 2803 return (DDI_FAILURE);
2717 2804 }
2718 2805
2719 2806 ASSERT(reallen < len);
2720 2807 if (reallen >= len) {
2721 2808 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2722 2809 "length too small, it needs to be %d bytes", reallen + 1);
2723 2810 }
2724 2811 return (DDI_SUCCESS);
2725 2812 }
2726 2813
2727 2814 /*
2728 2815 * tran_tgt_init(9E) - target device instance initialization
2729 2816 */
2730 2817 static int
2731 2818 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2732 2819 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2733 2820 {
2734 2821 #ifndef __lock_lint
2735 2822 _NOTE(ARGUNUSED(hba_tran))
2736 2823 #endif
2737 2824
2738 2825 /*
2739 2826 * At this point, the scsi_device structure already exists
2740 2827 * and has been initialized.
2741 2828 *
2742 2829 * Use this function to allocate target-private data structures,
|
↓ open down ↓ |
622 lines elided |
↑ open up ↑ |
2743 2830 * if needed by this HBA. Add revised flow-control and queue
2744 2831 * properties for child here, if desired and if you can tell they
2745 2832 * support tagged queueing by now.
2746 2833 */
2747 2834 mptsas_t *mpt;
2748 2835 int lun = sd->sd_address.a_lun;
2749 2836 mdi_pathinfo_t *pip = NULL;
2750 2837 mptsas_tgt_private_t *tgt_private = NULL;
2751 2838 mptsas_target_t *ptgt = NULL;
2752 2839 char *psas_wwn = NULL;
2753 - int phymask = 0;
2840 + mptsas_phymask_t phymask = 0;
2754 2841 uint64_t sas_wwn = 0;
2842 + mptsas_target_addr_t addr;
2755 2843 mpt = SDEV2MPT(sd);
2756 2844
2757 2845 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2758 2846
2759 2847 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2760 2848 (void *)hba_dip, (void *)tgt_dip, lun));
2761 2849
2762 2850 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2763 2851 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2764 2852 ddi_set_name_addr(tgt_dip, NULL);
2765 2853 return (DDI_FAILURE);
2766 2854 }
2767 2855 /*
2768 2856 * phymask is 0 means the virtual port for RAID
2769 2857 */
2770 - phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2858 + phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2771 2859 "phymask", 0);
2772 2860 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2773 2861 if ((pip = (void *)(sd->sd_private)) == NULL) {
2774 2862 /*
2775 2863 * Very bad news if this occurs. Somehow scsi_vhci has
2776 2864 * lost the pathinfo node for this target.
2777 2865 */
2778 2866 return (DDI_NOT_WELL_FORMED);
2779 2867 }
2780 2868
2781 2869 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2782 2870 DDI_PROP_SUCCESS) {
2783 2871 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2784 2872 return (DDI_FAILURE);
2785 2873 }
2786 2874
2787 2875 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2788 2876 &psas_wwn) == MDI_SUCCESS) {
2789 2877 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2790 2878 sas_wwn = 0;
2791 2879 }
2792 2880 (void) mdi_prop_free(psas_wwn);
2793 2881 }
2794 2882 } else {
2795 2883 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2796 2884 DDI_PROP_DONTPASS, LUN_PROP, 0);
2797 2885 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2798 2886 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2799 2887 DDI_PROP_SUCCESS) {
2800 2888 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2801 2889 sas_wwn = 0;
2802 2890 }
2803 2891 ddi_prop_free(psas_wwn);
2804 2892 } else {
2805 2893 sas_wwn = 0;
2806 2894 }
2807 2895 }
2896 +
2808 2897 ASSERT((sas_wwn != 0) || (phymask != 0));
2898 + addr.mta_wwn = sas_wwn;
2899 + addr.mta_phymask = phymask;
2809 2900 mutex_enter(&mpt->m_mutex);
2810 - ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2901 + ptgt = refhash_lookup(mpt->m_targets, &addr);
2811 2902 mutex_exit(&mpt->m_mutex);
2812 2903 if (ptgt == NULL) {
2813 2904 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2814 2905 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2815 2906 sas_wwn);
2816 2907 return (DDI_FAILURE);
2817 2908 }
2818 2909 if (hba_tran->tran_tgt_private == NULL) {
2819 2910 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2820 2911 KM_SLEEP);
2821 2912 tgt_private->t_lun = lun;
2822 2913 tgt_private->t_private = ptgt;
2823 2914 hba_tran->tran_tgt_private = tgt_private;
2824 2915 }
2825 2916
2826 2917 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2827 2918 return (DDI_SUCCESS);
2828 2919 }
2829 2920 mutex_enter(&mpt->m_mutex);
2830 2921
2831 2922 if (ptgt->m_deviceinfo &
2832 2923 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2833 2924 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2834 2925 uchar_t *inq89 = NULL;
2835 2926 int inq89_len = 0x238;
2836 2927 int reallen = 0;
2837 2928 int rval = 0;
2838 2929 struct sata_id *sid = NULL;
2839 2930 char model[SATA_ID_MODEL_LEN + 1];
2840 2931 char fw[SATA_ID_FW_LEN + 1];
2841 2932 char *vid, *pid;
2842 2933 int i;
2843 2934
2844 2935 mutex_exit(&mpt->m_mutex);
2845 2936 /*
2846 2937 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2847 2938 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2848 2939 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2849 2940 */
2850 2941 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2851 2942 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2852 2943 inq89, inq89_len, &reallen, 1);
2853 2944
2854 2945 if (rval != 0) {
2855 2946 if (inq89 != NULL) {
2856 2947 kmem_free(inq89, inq89_len);
2857 2948 }
2858 2949
2859 2950 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2860 2951 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2861 2952 return (DDI_SUCCESS);
2862 2953 }
2863 2954 sid = (void *)(&inq89[60]);
2864 2955
2865 2956 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2866 2957 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2867 2958
2868 2959 model[SATA_ID_MODEL_LEN] = 0;
2869 2960 fw[SATA_ID_FW_LEN] = 0;
2870 2961
2871 2962 /*
2872 2963 * split model into into vid/pid
2873 2964 */
2874 2965 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2875 2966 if ((*pid == ' ') || (*pid == '\t'))
2876 2967 break;
2877 2968 if (i < SATA_ID_MODEL_LEN) {
2878 2969 vid = model;
2879 2970 /*
2880 2971 * terminate vid, establish pid
2881 2972 */
2882 2973 *pid++ = 0;
2883 2974 } else {
2884 2975 /*
2885 2976 * vid will stay "ATA ", the rule is same
2886 2977 * as sata framework implementation.
2887 2978 */
2888 2979 vid = NULL;
2889 2980 /*
2890 2981 * model is all pid
2891 2982 */
2892 2983 pid = model;
2893 2984 }
2894 2985
2895 2986 /*
2896 2987 * override SCSA "inquiry-*" properties
2897 2988 */
2898 2989 if (vid)
2899 2990 (void) scsi_device_prop_update_inqstring(sd,
2900 2991 INQUIRY_VENDOR_ID, vid, strlen(vid));
2901 2992 if (pid)
2902 2993 (void) scsi_device_prop_update_inqstring(sd,
2903 2994 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2904 2995 (void) scsi_device_prop_update_inqstring(sd,
2905 2996 INQUIRY_REVISION_ID, fw, strlen(fw));
2906 2997
2907 2998 if (inq89 != NULL) {
2908 2999 kmem_free(inq89, inq89_len);
2909 3000 }
2910 3001 } else {
2911 3002 mutex_exit(&mpt->m_mutex);
2912 3003 }
2913 3004
2914 3005 return (DDI_SUCCESS);
2915 3006 }
2916 3007 /*
2917 3008 * tran_tgt_free(9E) - target device instance deallocation
2918 3009 */
2919 3010 static void
2920 3011 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2921 3012 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2922 3013 {
2923 3014 #ifndef __lock_lint
2924 3015 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2925 3016 #endif
2926 3017
2927 3018 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2928 3019
2929 3020 if (tgt_private != NULL) {
2930 3021 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2931 3022 hba_tran->tran_tgt_private = NULL;
2932 3023 }
2933 3024 }
2934 3025
2935 3026 /*
2936 3027 * scsi_pkt handling
2937 3028 *
2938 3029 * Visible to the external world via the transport structure.
2939 3030 */
2940 3031
2941 3032 /*
2942 3033 * Notes:
2943 3034 * - transport the command to the addressed SCSI target/lun device
2944 3035 * - normal operation is to schedule the command to be transported,
2945 3036 * and return TRAN_ACCEPT if this is successful.
2946 3037 * - if NO_INTR, tran_start must poll device for command completion
2947 3038 */
2948 3039 static int
2949 3040 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2950 3041 {
2951 3042 #ifndef __lock_lint
2952 3043 _NOTE(ARGUNUSED(ap))
2953 3044 #endif
2954 3045 mptsas_t *mpt = PKT2MPT(pkt);
2955 3046 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2956 3047 int rval;
2957 3048 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2958 3049
2959 3050 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2960 3051 ASSERT(ptgt);
2961 3052 if (ptgt == NULL)
2962 3053 return (TRAN_FATAL_ERROR);
2963 3054
2964 3055 /*
2965 3056 * prepare the pkt before taking mutex.
2966 3057 */
2967 3058 rval = mptsas_prepare_pkt(cmd);
2968 3059 if (rval != TRAN_ACCEPT) {
2969 3060 return (rval);
2970 3061 }
2971 3062
2972 3063 /*
2973 3064 * Send the command to target/lun, however your HBA requires it.
2974 3065 * If busy, return TRAN_BUSY; if there's some other formatting error
2975 3066 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2976 3067 * return of TRAN_ACCEPT.
2977 3068 *
2978 3069 * Remember that access to shared resources, including the mptsas_t
2979 3070 * data structure and the HBA hardware registers, must be protected
2980 3071 * with mutexes, here and everywhere.
2981 3072 *
2982 3073 * Also remember that at interrupt time, you'll get an argument
2983 3074 * to the interrupt handler which is a pointer to your mptsas_t
2984 3075 * structure; you'll have to remember which commands are outstanding
2985 3076 * and which scsi_pkt is the currently-running command so the
2986 3077 * interrupt handler can refer to the pkt to set completion
2987 3078 * status, call the target driver back through pkt_comp, etc.
2988 3079 *
2989 3080 * If the instance lock is held by other thread, don't spin to wait
2990 3081 * for it. Instead, queue the cmd and next time when the instance lock
2991 3082 * is not held, accept all the queued cmd. A extra tx_waitq is
2992 3083 * introduced to protect the queue.
2993 3084 *
2994 3085 * The polled cmd will not be queud and accepted as usual.
2995 3086 *
2996 3087 * Under the tx_waitq mutex, record whether a thread is draining
2997 3088 * the tx_waitq. An IO requesting thread that finds the instance
2998 3089 * mutex contended appends to the tx_waitq and while holding the
2999 3090 * tx_wait mutex, if the draining flag is not set, sets it and then
3000 3091 * proceeds to spin for the instance mutex. This scheme ensures that
3001 3092 * the last cmd in a burst be processed.
3002 3093 *
3003 3094 * we enable this feature only when the helper threads are enabled,
3004 3095 * at which we think the loads are heavy.
3005 3096 *
3006 3097 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3007 3098 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3008 3099 */
3009 3100
3010 3101 if (mpt->m_doneq_thread_n) {
3011 3102 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3012 3103 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3013 3104 mutex_exit(&mpt->m_mutex);
3014 3105 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3015 3106 mutex_enter(&mpt->m_mutex);
3016 3107 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3017 3108 mutex_exit(&mpt->m_mutex);
3018 3109 } else {
3019 3110 mutex_enter(&mpt->m_tx_waitq_mutex);
3020 3111 /*
3021 3112 * ptgt->m_dr_flag is protected by m_mutex or
3022 3113 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3023 3114 * is acquired.
3024 3115 */
3025 3116 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3026 3117 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3027 3118 /*
3028 3119 * The command should be allowed to
3029 3120 * retry by returning TRAN_BUSY to
3030 3121 * to stall the I/O's which come from
3031 3122 * scsi_vhci since the device/path is
3032 3123 * in unstable state now.
3033 3124 */
3034 3125 mutex_exit(&mpt->m_tx_waitq_mutex);
3035 3126 return (TRAN_BUSY);
3036 3127 } else {
3037 3128 /*
3038 3129 * The device is offline, just fail the
3039 3130 * command by returning
3040 3131 * TRAN_FATAL_ERROR.
3041 3132 */
3042 3133 mutex_exit(&mpt->m_tx_waitq_mutex);
3043 3134 return (TRAN_FATAL_ERROR);
3044 3135 }
3045 3136 }
3046 3137 if (mpt->m_tx_draining) {
3047 3138 cmd->cmd_flags |= CFLAG_TXQ;
3048 3139 *mpt->m_tx_waitqtail = cmd;
3049 3140 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3050 3141 mutex_exit(&mpt->m_tx_waitq_mutex);
3051 3142 } else { /* drain the queue */
3052 3143 mpt->m_tx_draining = 1;
3053 3144 mutex_exit(&mpt->m_tx_waitq_mutex);
3054 3145 mutex_enter(&mpt->m_mutex);
3055 3146 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3056 3147 mutex_exit(&mpt->m_mutex);
3057 3148 }
3058 3149 }
3059 3150 } else {
3060 3151 mutex_enter(&mpt->m_mutex);
3061 3152 /*
3062 3153 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3063 3154 * in this case, m_mutex is acquired.
3064 3155 */
3065 3156 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3066 3157 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3067 3158 /*
3068 3159 * commands should be allowed to retry by
3069 3160 * returning TRAN_BUSY to stall the I/O's
3070 3161 * which come from scsi_vhci since the device/
3071 3162 * path is in unstable state now.
3072 3163 */
3073 3164 mutex_exit(&mpt->m_mutex);
3074 3165 return (TRAN_BUSY);
3075 3166 } else {
3076 3167 /*
3077 3168 * The device is offline, just fail the
3078 3169 * command by returning TRAN_FATAL_ERROR.
3079 3170 */
3080 3171 mutex_exit(&mpt->m_mutex);
3081 3172 return (TRAN_FATAL_ERROR);
3082 3173 }
3083 3174 }
3084 3175 rval = mptsas_accept_pkt(mpt, cmd);
3085 3176 mutex_exit(&mpt->m_mutex);
3086 3177 }
3087 3178
3088 3179 return (rval);
3089 3180 }
3090 3181
3091 3182 /*
3092 3183 * Accept all the queued cmds(if any) before accept the current one.
3093 3184 */
3094 3185 static int
3095 3186 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3096 3187 {
3097 3188 int rval;
3098 3189 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3099 3190
3100 3191 ASSERT(mutex_owned(&mpt->m_mutex));
3101 3192 /*
3102 3193 * The call to mptsas_accept_tx_waitq() must always be performed
3103 3194 * because that is where mpt->m_tx_draining is cleared.
3104 3195 */
3105 3196 mutex_enter(&mpt->m_tx_waitq_mutex);
3106 3197 mptsas_accept_tx_waitq(mpt);
3107 3198 mutex_exit(&mpt->m_tx_waitq_mutex);
3108 3199 /*
3109 3200 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3110 3201 * in this case, m_mutex is acquired.
3111 3202 */
3112 3203 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3113 3204 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3114 3205 /*
3115 3206 * The command should be allowed to retry by returning
3116 3207 * TRAN_BUSY to stall the I/O's which come from
3117 3208 * scsi_vhci since the device/path is in unstable state
3118 3209 * now.
3119 3210 */
3120 3211 return (TRAN_BUSY);
3121 3212 } else {
3122 3213 /*
3123 3214 * The device is offline, just fail the command by
3124 3215 * return TRAN_FATAL_ERROR.
3125 3216 */
3126 3217 return (TRAN_FATAL_ERROR);
3127 3218 }
3128 3219 }
3129 3220 rval = mptsas_accept_pkt(mpt, cmd);
3130 3221
3131 3222 return (rval);
3132 3223 }
3133 3224
3134 3225 static int
3135 3226 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3136 3227 {
3137 3228 int rval = TRAN_ACCEPT;
3138 3229 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3139 3230
3140 3231 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3141 3232
3142 3233 ASSERT(mutex_owned(&mpt->m_mutex));
3143 3234
3144 3235 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3145 3236 rval = mptsas_prepare_pkt(cmd);
3146 3237 if (rval != TRAN_ACCEPT) {
3147 3238 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3148 3239 return (rval);
3149 3240 }
3150 3241 }
3151 3242
3152 3243 /*
3153 3244 * reset the throttle if we were draining
3154 3245 */
3155 3246 if ((ptgt->m_t_ncmds == 0) &&
3156 3247 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3157 3248 NDBG23(("reset throttle"));
3158 3249 ASSERT(ptgt->m_reset_delay == 0);
3159 3250 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3160 3251 }
3161 3252
3162 3253 /*
3163 3254 * If HBA is being reset, the DevHandles are being re-initialized,
3164 3255 * which means that they could be invalid even if the target is still
3165 3256 * attached. Check if being reset and if DevHandle is being
3166 3257 * re-initialized. If this is the case, return BUSY so the I/O can be
3167 3258 * retried later.
3168 3259 */
3169 3260 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3170 3261 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3171 3262 if (cmd->cmd_flags & CFLAG_TXQ) {
3172 3263 mptsas_doneq_add(mpt, cmd);
3173 3264 mptsas_doneq_empty(mpt);
3174 3265 return (rval);
3175 3266 } else {
3176 3267 return (TRAN_BUSY);
3177 3268 }
3178 3269 }
3179 3270
3180 3271 /*
3181 3272 * If device handle has already been invalidated, just
3182 3273 * fail the command. In theory, command from scsi_vhci
3183 3274 * client is impossible send down command with invalid
3184 3275 * devhdl since devhdl is set after path offline, target
3185 3276 * driver is not suppose to select a offlined path.
3186 3277 */
3187 3278 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3188 3279 NDBG20(("rejecting command, it might because invalid devhdl "
3189 3280 "request."));
3190 3281 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3191 3282 if (cmd->cmd_flags & CFLAG_TXQ) {
3192 3283 mptsas_doneq_add(mpt, cmd);
3193 3284 mptsas_doneq_empty(mpt);
3194 3285 return (rval);
3195 3286 } else {
3196 3287 return (TRAN_FATAL_ERROR);
3197 3288 }
3198 3289 }
3199 3290 /*
3200 3291 * The first case is the normal case. mpt gets a command from the
3201 3292 * target driver and starts it.
3202 3293 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3203 3294 * commands is m_max_requests - 2.
3204 3295 */
3205 3296 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3206 3297 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3207 3298 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3208 3299 (ptgt->m_reset_delay == 0) &&
3209 3300 (ptgt->m_t_nwait == 0) &&
3210 3301 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3211 3302 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3212 3303 (void) mptsas_start_cmd(mpt, cmd);
3213 3304 } else {
3214 3305 mptsas_waitq_add(mpt, cmd);
3215 3306 }
3216 3307 } else {
3217 3308 /*
3218 3309 * Add this pkt to the work queue
3219 3310 */
3220 3311 mptsas_waitq_add(mpt, cmd);
3221 3312
3222 3313 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3223 3314 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3224 3315
3225 3316 /*
3226 3317 * Only flush the doneq if this is not a TM
3227 3318 * cmd. For TM cmds the flushing of the
3228 3319 * doneq will be done in those routines.
3229 3320 */
3230 3321 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
|
↓ open down ↓ |
410 lines elided |
↑ open up ↑ |
3231 3322 mptsas_doneq_empty(mpt);
3232 3323 }
3233 3324 }
3234 3325 }
3235 3326 return (rval);
3236 3327 }
3237 3328
3238 3329 int
3239 3330 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3240 3331 {
3241 - mptsas_slots_t *slots;
3242 - int slot;
3243 - mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3332 + mptsas_slots_t *slots = mpt->m_active;
3333 + uint_t slot, start_rotor;
3334 + mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3244 3335
3245 - ASSERT(mutex_owned(&mpt->m_mutex));
3246 - slots = mpt->m_active;
3336 + ASSERT(MUTEX_HELD(&mpt->m_mutex));
3247 3337
3248 3338 /*
3249 3339 * Account for reserved TM request slot and reserved SMID of 0.
3250 3340 */
3251 - ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3341 + ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3252 3342
3253 3343 /*
3254 - * m_tags is equivalent to the SMID when sending requests. Since the
3255 - * SMID cannot be 0, start out at one if rolling over past the size
3256 - * of the request queue depth. Also, don't use the last SMID, which is
3257 - * reserved for TM requests.
3344 + * Find the next available slot, beginning at m_rotor. If no slot is
3345 + * available, we'll return FALSE to indicate that. This mechanism
3346 + * considers only the normal slots, not the reserved slot 0 nor the
3347 + * task management slot m_n_normal + 1. The rotor is left to point to
3348 + * the normal slot after the one we select, unless we select the last
3349 + * normal slot in which case it returns to slot 1.
3258 3350 */
3259 - slot = (slots->m_tags)++;
3260 - if (slots->m_tags > slots->m_n_slots) {
3261 - slots->m_tags = 1;
3262 - }
3351 + start_rotor = slots->m_rotor;
3352 + do {
3353 + slot = slots->m_rotor++;
3354 + if (slots->m_rotor > slots->m_n_normal)
3355 + slots->m_rotor = 1;
3263 3356
3264 -alloc_tag:
3265 - /* Validate tag, should never fail. */
3266 - if (slots->m_slot[slot] == NULL) {
3267 - /*
3268 - * Make sure SMID is not using reserved value of 0
3269 - * and the TM request slot.
3270 - */
3271 - ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3272 - cmd->cmd_slot = slot;
3273 - slots->m_slot[slot] = cmd;
3274 - mpt->m_ncmds++;
3357 + if (slots->m_rotor == start_rotor)
3358 + break;
3359 + } while (slots->m_slot[slot] != NULL);
3275 3360
3276 - /*
3277 - * only increment per target ncmds if this is not a
3278 - * command that has no target associated with it (i.e. a
3279 - * event acknoledgment)
3280 - */
3281 - if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3282 - ptgt->m_t_ncmds++;
3283 - }
3284 - cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3361 + if (slots->m_slot[slot] != NULL)
3362 + return (FALSE);
3285 3363
3286 - /*
3287 - * If initial timout is less than or equal to one tick, bump
3288 - * the timeout by a tick so that command doesn't timeout before
3289 - * its allotted time.
3290 - */
3291 - if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3292 - cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3293 - }
3294 - return (TRUE);
3295 - } else {
3296 - int i;
3364 + ASSERT(slot != 0 && slot <= slots->m_n_normal);
3297 3365
3298 - /*
3299 - * If slot in use, scan until a free one is found. Don't use 0
3300 - * or final slot, which is reserved for TM requests.
3301 - */
3302 - for (i = 0; i < slots->m_n_slots; i++) {
3303 - slot = slots->m_tags;
3304 - if (++(slots->m_tags) > slots->m_n_slots) {
3305 - slots->m_tags = 1;
3306 - }
3307 - if (slots->m_slot[slot] == NULL) {
3308 - NDBG22(("found free slot %d", slot));
3309 - goto alloc_tag;
3310 - }
3311 - }
3366 + cmd->cmd_slot = slot;
3367 + slots->m_slot[slot] = cmd;
3368 + mpt->m_ncmds++;
3369 +
3370 + /*
3371 + * only increment per target ncmds if this is not a
3372 + * command that has no target associated with it (i.e. a
3373 + * event acknoledgment)
3374 + */
3375 + if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3376 + ptgt->m_t_ncmds++;
3312 3377 }
3313 - return (FALSE);
3378 + cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3379 +
3380 + /*
3381 + * If initial timout is less than or equal to one tick, bump
3382 + * the timeout by a tick so that command doesn't timeout before
3383 + * its allotted time.
3384 + */
3385 + if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3386 + cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3387 + }
3388 + return (TRUE);
3314 3389 }
3315 3390
3316 3391 /*
3317 3392 * prepare the pkt:
3318 3393 * the pkt may have been resubmitted or just reused so
3319 3394 * initialize some fields and do some checks.
3320 3395 */
3321 3396 static int
3322 3397 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3323 3398 {
3324 3399 struct scsi_pkt *pkt = CMD2PKT(cmd);
3325 3400
3326 3401 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3327 3402
3328 3403 /*
3329 3404 * Reinitialize some fields that need it; the packet may
3330 3405 * have been resubmitted
3331 3406 */
3332 3407 pkt->pkt_reason = CMD_CMPLT;
3333 3408 pkt->pkt_state = 0;
3334 3409 pkt->pkt_statistics = 0;
3335 3410 pkt->pkt_resid = 0;
3336 3411 cmd->cmd_age = 0;
3337 3412 cmd->cmd_pkt_flags = pkt->pkt_flags;
3338 3413
3339 3414 /*
3340 3415 * zero status byte.
3341 3416 */
3342 3417 *(pkt->pkt_scbp) = 0;
3343 3418
3344 3419 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3345 3420 pkt->pkt_resid = cmd->cmd_dmacount;
3346 3421
3347 3422 /*
3348 3423 * consistent packets need to be sync'ed first
3349 3424 * (only for data going out)
3350 3425 */
3351 3426 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3352 3427 (cmd->cmd_flags & CFLAG_DMASEND)) {
3353 3428 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3354 3429 DDI_DMA_SYNC_FORDEV);
3355 3430 }
3356 3431 }
3357 3432
3358 3433 cmd->cmd_flags =
3359 3434 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3360 3435 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3361 3436
3362 3437 return (TRAN_ACCEPT);
3363 3438 }
3364 3439
3365 3440 /*
3366 3441 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3367 3442 *
3368 3443 * One of three possibilities:
3369 3444 * - allocate scsi_pkt
3370 3445 * - allocate scsi_pkt and DMA resources
3371 3446 * - allocate DMA resources to an already-allocated pkt
3372 3447 */
3373 3448 static struct scsi_pkt *
3374 3449 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3375 3450 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3376 3451 int (*callback)(), caddr_t arg)
3377 3452 {
3378 3453 mptsas_cmd_t *cmd, *new_cmd;
3379 3454 mptsas_t *mpt = ADDR2MPT(ap);
3380 3455 int failure = 1;
3381 3456 uint_t oldcookiec;
3382 3457 mptsas_target_t *ptgt = NULL;
3383 3458 int rval;
3384 3459 mptsas_tgt_private_t *tgt_private;
3385 3460 int kf;
3386 3461
3387 3462 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3388 3463
3389 3464 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3390 3465 tran_tgt_private;
3391 3466 ASSERT(tgt_private != NULL);
3392 3467 if (tgt_private == NULL) {
3393 3468 return (NULL);
3394 3469 }
3395 3470 ptgt = tgt_private->t_private;
3396 3471 ASSERT(ptgt != NULL);
3397 3472 if (ptgt == NULL)
3398 3473 return (NULL);
3399 3474 ap->a_target = ptgt->m_devhdl;
3400 3475 ap->a_lun = tgt_private->t_lun;
3401 3476
3402 3477 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3403 3478 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3404 3479 statuslen *= 100; tgtlen *= 4;
3405 3480 #endif
3406 3481 NDBG3(("mptsas_scsi_init_pkt:\n"
3407 3482 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3408 3483 ap->a_target, (void *)pkt, (void *)bp,
3409 3484 cmdlen, statuslen, tgtlen, flags));
3410 3485
3411 3486 /*
3412 3487 * Allocate the new packet.
3413 3488 */
3414 3489 if (pkt == NULL) {
3415 3490 ddi_dma_handle_t save_dma_handle;
3416 3491 ddi_dma_handle_t save_arq_dma_handle;
3417 3492 struct buf *save_arq_bp;
3418 3493 ddi_dma_cookie_t save_arqcookie;
3419 3494
3420 3495 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3421 3496
3422 3497 if (cmd) {
3423 3498 save_dma_handle = cmd->cmd_dmahandle;
3424 3499 save_arq_dma_handle = cmd->cmd_arqhandle;
3425 3500 save_arq_bp = cmd->cmd_arq_buf;
3426 3501 save_arqcookie = cmd->cmd_arqcookie;
3427 3502 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3428 3503 cmd->cmd_dmahandle = save_dma_handle;
3429 3504 cmd->cmd_arqhandle = save_arq_dma_handle;
3430 3505 cmd->cmd_arq_buf = save_arq_bp;
3431 3506 cmd->cmd_arqcookie = save_arqcookie;
3432 3507
3433 3508 pkt = (void *)((uchar_t *)cmd +
3434 3509 sizeof (struct mptsas_cmd));
3435 3510 pkt->pkt_ha_private = (opaque_t)cmd;
3436 3511 pkt->pkt_address = *ap;
3437 3512 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3438 3513 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3439 3514 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3440 3515 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3441 3516 cmd->cmd_cdblen = (uchar_t)cmdlen;
3442 3517 cmd->cmd_scblen = statuslen;
3443 3518 cmd->cmd_rqslen = SENSE_LENGTH;
3444 3519 cmd->cmd_tgt_addr = ptgt;
3445 3520 failure = 0;
3446 3521 }
3447 3522
3448 3523 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3449 3524 (tgtlen > PKT_PRIV_LEN) ||
3450 3525 (statuslen > EXTCMDS_STATUS_SIZE)) {
3451 3526 if (failure == 0) {
3452 3527 /*
3453 3528 * if extern alloc fails, all will be
3454 3529 * deallocated, including cmd
3455 3530 */
3456 3531 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3457 3532 cmdlen, tgtlen, statuslen, kf);
3458 3533 }
3459 3534 if (failure) {
3460 3535 /*
3461 3536 * if extern allocation fails, it will
3462 3537 * deallocate the new pkt as well
3463 3538 */
3464 3539 return (NULL);
3465 3540 }
3466 3541 }
3467 3542 new_cmd = cmd;
3468 3543
3469 3544 } else {
3470 3545 cmd = PKT2CMD(pkt);
3471 3546 new_cmd = NULL;
3472 3547 }
3473 3548
3474 3549
3475 3550 /* grab cmd->cmd_cookiec here as oldcookiec */
3476 3551
3477 3552 oldcookiec = cmd->cmd_cookiec;
3478 3553
3479 3554 /*
3480 3555 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3481 3556 * greater than 0 and we'll need to grab the next dma window
3482 3557 */
3483 3558 /*
3484 3559 * SLM-not doing extra command frame right now; may add later
3485 3560 */
3486 3561
3487 3562 if (cmd->cmd_nwin > 0) {
3488 3563
3489 3564 /*
3490 3565 * Make sure we havn't gone past the the total number
3491 3566 * of windows
3492 3567 */
3493 3568 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3494 3569 return (NULL);
3495 3570 }
3496 3571 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3497 3572 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3498 3573 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3499 3574 return (NULL);
3500 3575 }
3501 3576 goto get_dma_cookies;
3502 3577 }
3503 3578
3504 3579
3505 3580 if (flags & PKT_XARQ) {
3506 3581 cmd->cmd_flags |= CFLAG_XARQ;
3507 3582 }
3508 3583
3509 3584 /*
3510 3585 * DMA resource allocation. This version assumes your
3511 3586 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3512 3587 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3513 3588 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3514 3589 */
3515 3590 if (bp && (bp->b_bcount != 0) &&
3516 3591 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3517 3592
3518 3593 int cnt, dma_flags;
3519 3594 mptti_t *dmap; /* ptr to the S/G list */
3520 3595
3521 3596 /*
3522 3597 * Set up DMA memory and position to the next DMA segment.
3523 3598 */
3524 3599 ASSERT(cmd->cmd_dmahandle != NULL);
3525 3600
3526 3601 if (bp->b_flags & B_READ) {
3527 3602 dma_flags = DDI_DMA_READ;
3528 3603 cmd->cmd_flags &= ~CFLAG_DMASEND;
3529 3604 } else {
3530 3605 dma_flags = DDI_DMA_WRITE;
3531 3606 cmd->cmd_flags |= CFLAG_DMASEND;
3532 3607 }
3533 3608 if (flags & PKT_CONSISTENT) {
3534 3609 cmd->cmd_flags |= CFLAG_CMDIOPB;
3535 3610 dma_flags |= DDI_DMA_CONSISTENT;
3536 3611 }
3537 3612
3538 3613 if (flags & PKT_DMA_PARTIAL) {
3539 3614 dma_flags |= DDI_DMA_PARTIAL;
3540 3615 }
3541 3616
3542 3617 /*
3543 3618 * workaround for byte hole issue on psycho and
3544 3619 * schizo pre 2.1
3545 3620 */
3546 3621 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3547 3622 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3548 3623 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3549 3624 dma_flags |= DDI_DMA_CONSISTENT;
3550 3625 }
3551 3626
3552 3627 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3553 3628 dma_flags, callback, arg,
3554 3629 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3555 3630 if (rval == DDI_DMA_PARTIAL_MAP) {
3556 3631 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3557 3632 &cmd->cmd_nwin);
3558 3633 cmd->cmd_winindex = 0;
3559 3634 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3560 3635 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3561 3636 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3562 3637 &cmd->cmd_cookiec);
3563 3638 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3564 3639 switch (rval) {
3565 3640 case DDI_DMA_NORESOURCES:
3566 3641 bioerror(bp, 0);
3567 3642 break;
3568 3643 case DDI_DMA_BADATTR:
3569 3644 case DDI_DMA_NOMAPPING:
3570 3645 bioerror(bp, EFAULT);
3571 3646 break;
3572 3647 case DDI_DMA_TOOBIG:
3573 3648 default:
3574 3649 bioerror(bp, EINVAL);
3575 3650 break;
3576 3651 }
3577 3652 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3578 3653 if (new_cmd) {
3579 3654 mptsas_scsi_destroy_pkt(ap, pkt);
3580 3655 }
3581 3656 return ((struct scsi_pkt *)NULL);
3582 3657 }
3583 3658
3584 3659 get_dma_cookies:
3585 3660 cmd->cmd_flags |= CFLAG_DMAVALID;
3586 3661 ASSERT(cmd->cmd_cookiec > 0);
3587 3662
3588 3663 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3589 3664 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3590 3665 cmd->cmd_cookiec);
3591 3666 bioerror(bp, EINVAL);
3592 3667 if (new_cmd) {
3593 3668 mptsas_scsi_destroy_pkt(ap, pkt);
3594 3669 }
3595 3670 return ((struct scsi_pkt *)NULL);
3596 3671 }
3597 3672
3598 3673 /*
3599 3674 * Allocate extra SGL buffer if needed.
3600 3675 */
3601 3676 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3602 3677 (cmd->cmd_extra_frames == NULL)) {
3603 3678 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3604 3679 DDI_FAILURE) {
3605 3680 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3606 3681 "failed");
3607 3682 bioerror(bp, ENOMEM);
3608 3683 if (new_cmd) {
3609 3684 mptsas_scsi_destroy_pkt(ap, pkt);
3610 3685 }
3611 3686 return ((struct scsi_pkt *)NULL);
3612 3687 }
3613 3688 }
3614 3689
3615 3690 /*
3616 3691 * Always use scatter-gather transfer
3617 3692 * Use the loop below to store physical addresses of
3618 3693 * DMA segments, from the DMA cookies, into your HBA's
3619 3694 * scatter-gather list.
3620 3695 * We need to ensure we have enough kmem alloc'd
3621 3696 * for the sg entries since we are no longer using an
3622 3697 * array inside mptsas_cmd_t.
3623 3698 *
3624 3699 * We check cmd->cmd_cookiec against oldcookiec so
3625 3700 * the scatter-gather list is correctly allocated
3626 3701 */
3627 3702
3628 3703 if (oldcookiec != cmd->cmd_cookiec) {
3629 3704 if (cmd->cmd_sg != (mptti_t *)NULL) {
3630 3705 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3631 3706 oldcookiec);
3632 3707 cmd->cmd_sg = NULL;
3633 3708 }
3634 3709 }
3635 3710
3636 3711 if (cmd->cmd_sg == (mptti_t *)NULL) {
3637 3712 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3638 3713 cmd->cmd_cookiec), kf);
3639 3714
3640 3715 if (cmd->cmd_sg == (mptti_t *)NULL) {
3641 3716 mptsas_log(mpt, CE_WARN,
3642 3717 "unable to kmem_alloc enough memory "
3643 3718 "for scatter/gather list");
3644 3719 /*
3645 3720 * if we have an ENOMEM condition we need to behave
3646 3721 * the same way as the rest of this routine
3647 3722 */
3648 3723
3649 3724 bioerror(bp, ENOMEM);
3650 3725 if (new_cmd) {
3651 3726 mptsas_scsi_destroy_pkt(ap, pkt);
3652 3727 }
3653 3728 return ((struct scsi_pkt *)NULL);
3654 3729 }
3655 3730 }
3656 3731
3657 3732 dmap = cmd->cmd_sg;
3658 3733
3659 3734 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3660 3735
3661 3736 /*
3662 3737 * store the first segment into the S/G list
3663 3738 */
3664 3739 dmap->count = cmd->cmd_cookie.dmac_size;
3665 3740 dmap->addr.address64.Low = (uint32_t)
3666 3741 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3667 3742 dmap->addr.address64.High = (uint32_t)
3668 3743 (cmd->cmd_cookie.dmac_laddress >> 32);
3669 3744
3670 3745 /*
3671 3746 * dmacount counts the size of the dma for this window
3672 3747 * (if partial dma is being used). totaldmacount
3673 3748 * keeps track of the total amount of dma we have
3674 3749 * transferred for all the windows (needed to calculate
3675 3750 * the resid value below).
3676 3751 */
3677 3752 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3678 3753 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3679 3754
3680 3755 /*
3681 3756 * We already stored the first DMA scatter gather segment,
3682 3757 * start at 1 if we need to store more.
3683 3758 */
3684 3759 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3685 3760 /*
3686 3761 * Get next DMA cookie
3687 3762 */
3688 3763 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3689 3764 &cmd->cmd_cookie);
3690 3765 dmap++;
3691 3766
3692 3767 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3693 3768 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3694 3769
3695 3770 /*
3696 3771 * store the segment parms into the S/G list
3697 3772 */
3698 3773 dmap->count = cmd->cmd_cookie.dmac_size;
3699 3774 dmap->addr.address64.Low = (uint32_t)
3700 3775 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3701 3776 dmap->addr.address64.High = (uint32_t)
3702 3777 (cmd->cmd_cookie.dmac_laddress >> 32);
3703 3778 }
3704 3779
3705 3780 /*
3706 3781 * If this was partially allocated we set the resid
3707 3782 * the amount of data NOT transferred in this window
3708 3783 * If there is only one window, the resid will be 0
3709 3784 */
3710 3785 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3711 3786 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3712 3787 }
3713 3788 return (pkt);
3714 3789 }
3715 3790
3716 3791 /*
3717 3792 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3718 3793 *
3719 3794 * Notes:
3720 3795 * - also frees DMA resources if allocated
3721 3796 * - implicit DMA synchonization
3722 3797 */
3723 3798 static void
3724 3799 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3725 3800 {
3726 3801 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3727 3802 mptsas_t *mpt = ADDR2MPT(ap);
3728 3803
3729 3804 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3730 3805 ap->a_target, (void *)pkt));
3731 3806
3732 3807 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3733 3808 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3734 3809 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3735 3810 }
3736 3811
3737 3812 if (cmd->cmd_sg) {
3738 3813 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3739 3814 cmd->cmd_sg = NULL;
3740 3815 }
3741 3816
3742 3817 mptsas_free_extra_sgl_frame(mpt, cmd);
3743 3818
3744 3819 if ((cmd->cmd_flags &
3745 3820 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3746 3821 CFLAG_SCBEXTERN)) == 0) {
3747 3822 cmd->cmd_flags = CFLAG_FREE;
3748 3823 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3749 3824 } else {
3750 3825 mptsas_pkt_destroy_extern(mpt, cmd);
3751 3826 }
3752 3827 }
3753 3828
3754 3829 /*
3755 3830 * kmem cache constructor and destructor:
3756 3831 * When constructing, we bzero the cmd and allocate the dma handle
3757 3832 * When destructing, just free the dma handle
3758 3833 */
3759 3834 static int
3760 3835 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3761 3836 {
3762 3837 mptsas_cmd_t *cmd = buf;
3763 3838 mptsas_t *mpt = cdrarg;
3764 3839 struct scsi_address ap;
3765 3840 uint_t cookiec;
3766 3841 ddi_dma_attr_t arq_dma_attr;
3767 3842 int (*callback)(caddr_t);
3768 3843
3769 3844 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3770 3845
3771 3846 NDBG4(("mptsas_kmem_cache_constructor"));
3772 3847
3773 3848 ap.a_hba_tran = mpt->m_tran;
3774 3849 ap.a_target = 0;
3775 3850 ap.a_lun = 0;
3776 3851
3777 3852 /*
3778 3853 * allocate a dma handle
3779 3854 */
3780 3855 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3781 3856 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3782 3857 cmd->cmd_dmahandle = NULL;
3783 3858 return (-1);
3784 3859 }
3785 3860
3786 3861 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3787 3862 SENSE_LENGTH, B_READ, callback, NULL);
3788 3863 if (cmd->cmd_arq_buf == NULL) {
3789 3864 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3790 3865 cmd->cmd_dmahandle = NULL;
3791 3866 return (-1);
3792 3867 }
3793 3868
3794 3869 /*
3795 3870 * allocate a arq handle
3796 3871 */
3797 3872 arq_dma_attr = mpt->m_msg_dma_attr;
3798 3873 arq_dma_attr.dma_attr_sgllen = 1;
3799 3874 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3800 3875 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3801 3876 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3802 3877 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3803 3878 cmd->cmd_dmahandle = NULL;
3804 3879 cmd->cmd_arqhandle = NULL;
3805 3880 return (-1);
3806 3881 }
3807 3882
3808 3883 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3809 3884 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3810 3885 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3811 3886 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3812 3887 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3813 3888 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3814 3889 cmd->cmd_dmahandle = NULL;
3815 3890 cmd->cmd_arqhandle = NULL;
3816 3891 cmd->cmd_arq_buf = NULL;
3817 3892 return (-1);
3818 3893 }
3819 3894
3820 3895 return (0);
3821 3896 }
3822 3897
3823 3898 static void
3824 3899 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3825 3900 {
3826 3901 #ifndef __lock_lint
3827 3902 _NOTE(ARGUNUSED(cdrarg))
3828 3903 #endif
3829 3904 mptsas_cmd_t *cmd = buf;
3830 3905
3831 3906 NDBG4(("mptsas_kmem_cache_destructor"));
3832 3907
3833 3908 if (cmd->cmd_arqhandle) {
3834 3909 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3835 3910 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3836 3911 cmd->cmd_arqhandle = NULL;
3837 3912 }
3838 3913 if (cmd->cmd_arq_buf) {
3839 3914 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3840 3915 cmd->cmd_arq_buf = NULL;
3841 3916 }
3842 3917 if (cmd->cmd_dmahandle) {
3843 3918 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3844 3919 cmd->cmd_dmahandle = NULL;
3845 3920 }
3846 3921 }
3847 3922
3848 3923 static int
3849 3924 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3850 3925 {
3851 3926 mptsas_cache_frames_t *p = buf;
3852 3927 mptsas_t *mpt = cdrarg;
3853 3928 ddi_dma_attr_t frame_dma_attr;
3854 3929 size_t mem_size, alloc_len;
3855 3930 ddi_dma_cookie_t cookie;
3856 3931 uint_t ncookie;
3857 3932 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3858 3933 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3859 3934
3860 3935 frame_dma_attr = mpt->m_msg_dma_attr;
3861 3936 frame_dma_attr.dma_attr_align = 0x10;
3862 3937 frame_dma_attr.dma_attr_sgllen = 1;
3863 3938
3864 3939 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3865 3940 &p->m_dma_hdl) != DDI_SUCCESS) {
3866 3941 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3867 3942 " extra SGL.");
3868 3943 return (DDI_FAILURE);
3869 3944 }
3870 3945
3871 3946 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3872 3947
3873 3948 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3874 3949 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3875 3950 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3876 3951 ddi_dma_free_handle(&p->m_dma_hdl);
3877 3952 p->m_dma_hdl = NULL;
3878 3953 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3879 3954 " extra SGL.");
3880 3955 return (DDI_FAILURE);
3881 3956 }
3882 3957
3883 3958 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3884 3959 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3885 3960 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3886 3961 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3887 3962 ddi_dma_free_handle(&p->m_dma_hdl);
3888 3963 p->m_dma_hdl = NULL;
3889 3964 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3890 3965 " extra SGL");
3891 3966 return (DDI_FAILURE);
3892 3967 }
3893 3968
3894 3969 /*
3895 3970 * Store the SGL memory address. This chip uses this
3896 3971 * address to dma to and from the driver. The second
3897 3972 * address is the address mpt uses to fill in the SGL.
3898 3973 */
3899 3974 p->m_phys_addr = cookie.dmac_address;
3900 3975
3901 3976 return (DDI_SUCCESS);
3902 3977 }
3903 3978
3904 3979 static void
3905 3980 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3906 3981 {
3907 3982 #ifndef __lock_lint
3908 3983 _NOTE(ARGUNUSED(cdrarg))
3909 3984 #endif
3910 3985 mptsas_cache_frames_t *p = buf;
3911 3986 if (p->m_dma_hdl != NULL) {
3912 3987 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3913 3988 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3914 3989 ddi_dma_free_handle(&p->m_dma_hdl);
3915 3990 p->m_phys_addr = NULL;
3916 3991 p->m_frames_addr = NULL;
3917 3992 p->m_dma_hdl = NULL;
3918 3993 p->m_acc_hdl = NULL;
3919 3994 }
3920 3995
3921 3996 }
3922 3997
3923 3998 /*
3924 3999 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3925 4000 * for non-standard length cdb, pkt_private, status areas
3926 4001 * if allocation fails, then deallocate all external space and the pkt
3927 4002 */
3928 4003 /* ARGSUSED */
3929 4004 static int
3930 4005 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3931 4006 int cmdlen, int tgtlen, int statuslen, int kf)
3932 4007 {
3933 4008 caddr_t cdbp, scbp, tgt;
3934 4009 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3935 4010 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3936 4011 struct scsi_address ap;
3937 4012 size_t senselength;
3938 4013 ddi_dma_attr_t ext_arq_dma_attr;
3939 4014 uint_t cookiec;
3940 4015
3941 4016 NDBG3(("mptsas_pkt_alloc_extern: "
3942 4017 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3943 4018 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3944 4019
3945 4020 tgt = cdbp = scbp = NULL;
3946 4021 cmd->cmd_scblen = statuslen;
3947 4022 cmd->cmd_privlen = (uchar_t)tgtlen;
3948 4023
3949 4024 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3950 4025 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3951 4026 goto fail;
3952 4027 }
3953 4028 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3954 4029 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3955 4030 }
3956 4031 if (tgtlen > PKT_PRIV_LEN) {
3957 4032 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3958 4033 goto fail;
3959 4034 }
3960 4035 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3961 4036 cmd->cmd_pkt->pkt_private = tgt;
3962 4037 }
3963 4038 if (statuslen > EXTCMDS_STATUS_SIZE) {
3964 4039 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3965 4040 goto fail;
3966 4041 }
3967 4042 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3968 4043 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3969 4044
3970 4045 /* allocate sense data buf for DMA */
3971 4046
3972 4047 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3973 4048 struct scsi_arq_status, sts_sensedata);
3974 4049 cmd->cmd_rqslen = (uchar_t)senselength;
3975 4050
3976 4051 ap.a_hba_tran = mpt->m_tran;
3977 4052 ap.a_target = 0;
3978 4053 ap.a_lun = 0;
3979 4054
3980 4055 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3981 4056 (struct buf *)NULL, senselength, B_READ,
3982 4057 callback, NULL);
3983 4058
3984 4059 if (cmd->cmd_ext_arq_buf == NULL) {
3985 4060 goto fail;
3986 4061 }
3987 4062 /*
3988 4063 * allocate a extern arq handle and bind the buf
3989 4064 */
3990 4065 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3991 4066 ext_arq_dma_attr.dma_attr_sgllen = 1;
3992 4067 if ((ddi_dma_alloc_handle(mpt->m_dip,
3993 4068 &ext_arq_dma_attr, callback,
3994 4069 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3995 4070 goto fail;
3996 4071 }
3997 4072
3998 4073 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3999 4074 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4000 4075 callback, NULL, &cmd->cmd_ext_arqcookie,
4001 4076 &cookiec)
4002 4077 != DDI_SUCCESS) {
4003 4078 goto fail;
4004 4079 }
4005 4080 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4006 4081 }
4007 4082 return (0);
4008 4083 fail:
4009 4084 mptsas_pkt_destroy_extern(mpt, cmd);
4010 4085 return (1);
4011 4086 }
4012 4087
4013 4088 /*
4014 4089 * deallocate external pkt space and deallocate the pkt
4015 4090 */
4016 4091 static void
4017 4092 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4018 4093 {
4019 4094 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4020 4095
4021 4096 if (cmd->cmd_flags & CFLAG_FREE) {
4022 4097 mptsas_log(mpt, CE_PANIC,
4023 4098 "mptsas_pkt_destroy_extern: freeing free packet");
4024 4099 _NOTE(NOT_REACHED)
4025 4100 /* NOTREACHED */
4026 4101 }
4027 4102 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4028 4103 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4029 4104 }
4030 4105 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4031 4106 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4032 4107 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4033 4108 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4034 4109 }
4035 4110 if (cmd->cmd_ext_arqhandle) {
4036 4111 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4037 4112 cmd->cmd_ext_arqhandle = NULL;
4038 4113 }
4039 4114 if (cmd->cmd_ext_arq_buf)
4040 4115 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4041 4116 }
4042 4117 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4043 4118 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4044 4119 }
4045 4120 cmd->cmd_flags = CFLAG_FREE;
4046 4121 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4047 4122 }
4048 4123
4049 4124 /*
4050 4125 * tran_sync_pkt(9E) - explicit DMA synchronization
4051 4126 */
4052 4127 /*ARGSUSED*/
4053 4128 static void
4054 4129 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4055 4130 {
4056 4131 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4057 4132
4058 4133 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4059 4134 ap->a_target, (void *)pkt));
4060 4135
4061 4136 if (cmd->cmd_dmahandle) {
4062 4137 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4063 4138 (cmd->cmd_flags & CFLAG_DMASEND) ?
4064 4139 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4065 4140 }
4066 4141 }
4067 4142
4068 4143 /*
4069 4144 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4070 4145 */
4071 4146 /*ARGSUSED*/
4072 4147 static void
4073 4148 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4074 4149 {
4075 4150 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4076 4151 mptsas_t *mpt = ADDR2MPT(ap);
4077 4152
4078 4153 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4079 4154 ap->a_target, (void *)pkt));
4080 4155
4081 4156 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4082 4157 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4083 4158 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4084 4159 }
4085 4160
4086 4161 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4087 4162 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4088 4163 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4089 4164 }
4090 4165
4091 4166 mptsas_free_extra_sgl_frame(mpt, cmd);
4092 4167 }
4093 4168
4094 4169 static void
4095 4170 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4096 4171 {
4097 4172 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4098 4173 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4099 4174 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4100 4175 DDI_DMA_SYNC_FORCPU);
4101 4176 }
4102 4177 (*pkt->pkt_comp)(pkt);
4103 4178 }
4104 4179
4105 4180 static void
4106 4181 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4107 4182 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4108 4183 {
4109 4184 uint_t cookiec;
4110 4185 mptti_t *dmap;
4111 4186 uint32_t flags;
4112 4187 pMpi2SGESimple64_t sge;
4113 4188 pMpi2SGEChain64_t sgechain;
4114 4189 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4115 4190
4116 4191 /*
4117 4192 * Save the number of entries in the DMA
4118 4193 * Scatter/Gather list
4119 4194 */
4120 4195 cookiec = cmd->cmd_cookiec;
4121 4196
4122 4197 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4123 4198
4124 4199 /*
4125 4200 * Set read/write bit in control.
4126 4201 */
4127 4202 if (cmd->cmd_flags & CFLAG_DMASEND) {
4128 4203 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4129 4204 } else {
4130 4205 *control |= MPI2_SCSIIO_CONTROL_READ;
4131 4206 }
4132 4207
4133 4208 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4134 4209
4135 4210 /*
4136 4211 * We have 2 cases here. First where we can fit all the
4137 4212 * SG elements into the main frame, and the case
4138 4213 * where we can't.
4139 4214 * If we have more cookies than we can attach to a frame
4140 4215 * we will need to use a chain element to point
4141 4216 * a location of memory where the rest of the S/G
4142 4217 * elements reside.
4143 4218 */
4144 4219 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4145 4220 dmap = cmd->cmd_sg;
4146 4221 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4147 4222 while (cookiec--) {
4148 4223 ddi_put32(acc_hdl,
4149 4224 &sge->Address.Low, dmap->addr.address64.Low);
4150 4225 ddi_put32(acc_hdl,
4151 4226 &sge->Address.High, dmap->addr.address64.High);
4152 4227 ddi_put32(acc_hdl, &sge->FlagsLength,
4153 4228 dmap->count);
4154 4229 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4155 4230 flags |= ((uint32_t)
4156 4231 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4157 4232 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4158 4233 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4159 4234 MPI2_SGE_FLAGS_SHIFT);
4160 4235
4161 4236 /*
4162 4237 * If this is the last cookie, we set the flags
4163 4238 * to indicate so
4164 4239 */
4165 4240 if (cookiec == 0) {
4166 4241 flags |=
4167 4242 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4168 4243 | MPI2_SGE_FLAGS_END_OF_BUFFER
4169 4244 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4170 4245 MPI2_SGE_FLAGS_SHIFT);
4171 4246 }
4172 4247 if (cmd->cmd_flags & CFLAG_DMASEND) {
4173 4248 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4174 4249 MPI2_SGE_FLAGS_SHIFT);
4175 4250 } else {
4176 4251 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4177 4252 MPI2_SGE_FLAGS_SHIFT);
4178 4253 }
4179 4254 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4180 4255 dmap++;
4181 4256 sge++;
4182 4257 }
4183 4258 } else {
4184 4259 /*
4185 4260 * Hereby we start to deal with multiple frames.
4186 4261 * The process is as follows:
4187 4262 * 1. Determine how many frames are needed for SGL element
4188 4263 * storage; Note that all frames are stored in contiguous
4189 4264 * memory space and in 64-bit DMA mode each element is
4190 4265 * 3 double-words (12 bytes) long.
4191 4266 * 2. Fill up the main frame. We need to do this separately
4192 4267 * since it contains the SCSI IO request header and needs
4193 4268 * dedicated processing. Note that the last 4 double-words
4194 4269 * of the SCSI IO header is for SGL element storage
4195 4270 * (MPI2_SGE_IO_UNION).
4196 4271 * 3. Fill the chain element in the main frame, so the DMA
4197 4272 * engine can use the following frames.
4198 4273 * 4. Enter a loop to fill the remaining frames. Note that the
4199 4274 * last frame contains no chain element. The remaining
4200 4275 * frames go into the mpt SGL buffer allocated on the fly,
4201 4276 * not immediately following the main message frame, as in
4202 4277 * Gen1.
4203 4278 * Some restrictions:
4204 4279 * 1. For 64-bit DMA, the simple element and chain element
4205 4280 * are both of 3 double-words (12 bytes) in size, even
4206 4281 * though all frames are stored in the first 4G of mem
4207 4282 * range and the higher 32-bits of the address are always 0.
4208 4283 * 2. On some controllers (like the 1064/1068), a frame can
4209 4284 * hold SGL elements with the last 1 or 2 double-words
4210 4285 * (4 or 8 bytes) un-used. On these controllers, we should
4211 4286 * recognize that there's not enough room for another SGL
4212 4287 * element and move the sge pointer to the next frame.
4213 4288 */
4214 4289 int i, j, k, l, frames, sgemax;
4215 4290 int temp;
4216 4291 uint8_t chainflags;
4217 4292 uint16_t chainlength;
4218 4293 mptsas_cache_frames_t *p;
4219 4294
4220 4295 /*
4221 4296 * Sgemax is the number of SGE's that will fit
4222 4297 * each extra frame and frames is total
4223 4298 * number of frames we'll need. 1 sge entry per
4224 4299 * frame is reseverd for the chain element thus the -1 below.
4225 4300 */
4226 4301 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4227 4302 - 1);
4228 4303 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4229 4304
4230 4305 /*
4231 4306 * A little check to see if we need to round up the number
4232 4307 * of frames we need
4233 4308 */
4234 4309 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4235 4310 sgemax) > 1) {
4236 4311 frames = (temp + 1);
4237 4312 } else {
4238 4313 frames = temp;
4239 4314 }
4240 4315 dmap = cmd->cmd_sg;
4241 4316 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4242 4317
4243 4318 /*
4244 4319 * First fill in the main frame
4245 4320 */
4246 4321 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4247 4322 ddi_put32(acc_hdl, &sge->Address.Low,
4248 4323 dmap->addr.address64.Low);
4249 4324 ddi_put32(acc_hdl, &sge->Address.High,
4250 4325 dmap->addr.address64.High);
4251 4326 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4252 4327 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4253 4328 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4254 4329 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4255 4330 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4256 4331 MPI2_SGE_FLAGS_SHIFT);
4257 4332
4258 4333 /*
4259 4334 * If this is the last SGE of this frame
4260 4335 * we set the end of list flag
4261 4336 */
4262 4337 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4263 4338 flags |= ((uint32_t)
4264 4339 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4265 4340 MPI2_SGE_FLAGS_SHIFT);
4266 4341 }
4267 4342 if (cmd->cmd_flags & CFLAG_DMASEND) {
4268 4343 flags |=
4269 4344 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4270 4345 MPI2_SGE_FLAGS_SHIFT);
4271 4346 } else {
4272 4347 flags |=
4273 4348 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4274 4349 MPI2_SGE_FLAGS_SHIFT);
4275 4350 }
4276 4351 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4277 4352 dmap++;
4278 4353 sge++;
4279 4354 }
4280 4355
4281 4356 /*
4282 4357 * Fill in the chain element in the main frame.
4283 4358 * About calculation on ChainOffset:
4284 4359 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4285 4360 * in the end reserved for SGL element storage
4286 4361 * (MPI2_SGE_IO_UNION); we should count it in our
4287 4362 * calculation. See its definition in the header file.
4288 4363 * 2. Constant j is the counter of the current SGL element
4289 4364 * that will be processed, and (j - 1) is the number of
4290 4365 * SGL elements that have been processed (stored in the
4291 4366 * main frame).
4292 4367 * 3. ChainOffset value should be in units of double-words (4
4293 4368 * bytes) so the last value should be divided by 4.
4294 4369 */
4295 4370 ddi_put8(acc_hdl, &frame->ChainOffset,
4296 4371 (sizeof (MPI2_SCSI_IO_REQUEST) -
4297 4372 sizeof (MPI2_SGE_IO_UNION) +
4298 4373 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4299 4374 sgechain = (pMpi2SGEChain64_t)sge;
4300 4375 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4301 4376 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4302 4377 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4303 4378 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4304 4379
4305 4380 /*
4306 4381 * The size of the next frame is the accurate size of space
4307 4382 * (in bytes) used to store the SGL elements. j is the counter
4308 4383 * of SGL elements. (j - 1) is the number of SGL elements that
4309 4384 * have been processed (stored in frames).
4310 4385 */
4311 4386 if (frames >= 2) {
4312 4387 chainlength = mpt->m_req_frame_size /
4313 4388 sizeof (MPI2_SGE_SIMPLE64) *
4314 4389 sizeof (MPI2_SGE_SIMPLE64);
4315 4390 } else {
4316 4391 chainlength = ((cookiec - (j - 1)) *
4317 4392 sizeof (MPI2_SGE_SIMPLE64));
4318 4393 }
4319 4394
4320 4395 p = cmd->cmd_extra_frames;
4321 4396
4322 4397 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4323 4398 ddi_put32(acc_hdl, &sgechain->Address.Low,
4324 4399 p->m_phys_addr);
4325 4400 /* SGL is allocated in the first 4G mem range */
4326 4401 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4327 4402
4328 4403 /*
4329 4404 * If there are more than 2 frames left we have to
4330 4405 * fill in the next chain offset to the location of
4331 4406 * the chain element in the next frame.
4332 4407 * sgemax is the number of simple elements in an extra
4333 4408 * frame. Note that the value NextChainOffset should be
4334 4409 * in double-words (4 bytes).
4335 4410 */
4336 4411 if (frames >= 2) {
4337 4412 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4338 4413 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4339 4414 } else {
4340 4415 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4341 4416 }
4342 4417
4343 4418 /*
4344 4419 * Jump to next frame;
4345 4420 * Starting here, chain buffers go into the per command SGL.
4346 4421 * This buffer is allocated when chain buffers are needed.
4347 4422 */
4348 4423 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4349 4424 i = cookiec;
4350 4425
4351 4426 /*
4352 4427 * Start filling in frames with SGE's. If we
4353 4428 * reach the end of frame and still have SGE's
4354 4429 * to fill we need to add a chain element and
4355 4430 * use another frame. j will be our counter
4356 4431 * for what cookie we are at and i will be
4357 4432 * the total cookiec. k is the current frame
4358 4433 */
4359 4434 for (k = 1; k <= frames; k++) {
4360 4435 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4361 4436
4362 4437 /*
4363 4438 * If we have reached the end of frame
4364 4439 * and we have more SGE's to fill in
4365 4440 * we have to fill the final entry
4366 4441 * with a chain element and then
4367 4442 * continue to the next frame
4368 4443 */
4369 4444 if ((l == (sgemax + 1)) && (k != frames)) {
4370 4445 sgechain = (pMpi2SGEChain64_t)sge;
4371 4446 j--;
4372 4447 chainflags = (
4373 4448 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4374 4449 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4375 4450 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4376 4451 ddi_put8(p->m_acc_hdl,
4377 4452 &sgechain->Flags, chainflags);
4378 4453 /*
4379 4454 * k is the frame counter and (k + 1)
4380 4455 * is the number of the next frame.
4381 4456 * Note that frames are in contiguous
4382 4457 * memory space.
4383 4458 */
4384 4459 ddi_put32(p->m_acc_hdl,
4385 4460 &sgechain->Address.Low,
4386 4461 (p->m_phys_addr +
4387 4462 (mpt->m_req_frame_size * k)));
4388 4463 ddi_put32(p->m_acc_hdl,
4389 4464 &sgechain->Address.High, 0);
4390 4465
4391 4466 /*
4392 4467 * If there are more than 2 frames left
4393 4468 * we have to next chain offset to
4394 4469 * the location of the chain element
4395 4470 * in the next frame and fill in the
4396 4471 * length of the next chain
4397 4472 */
4398 4473 if ((frames - k) >= 2) {
4399 4474 ddi_put8(p->m_acc_hdl,
4400 4475 &sgechain->NextChainOffset,
4401 4476 (sgemax *
4402 4477 sizeof (MPI2_SGE_SIMPLE64))
4403 4478 >> 2);
4404 4479 ddi_put16(p->m_acc_hdl,
4405 4480 &sgechain->Length,
4406 4481 mpt->m_req_frame_size /
4407 4482 sizeof (MPI2_SGE_SIMPLE64) *
4408 4483 sizeof (MPI2_SGE_SIMPLE64));
4409 4484 } else {
4410 4485 /*
4411 4486 * This is the last frame. Set
4412 4487 * the NextChainOffset to 0 and
4413 4488 * Length is the total size of
4414 4489 * all remaining simple elements
4415 4490 */
4416 4491 ddi_put8(p->m_acc_hdl,
4417 4492 &sgechain->NextChainOffset,
4418 4493 0);
4419 4494 ddi_put16(p->m_acc_hdl,
4420 4495 &sgechain->Length,
4421 4496 (cookiec - j) *
4422 4497 sizeof (MPI2_SGE_SIMPLE64));
4423 4498 }
4424 4499
4425 4500 /* Jump to the next frame */
4426 4501 sge = (pMpi2SGESimple64_t)
4427 4502 ((char *)p->m_frames_addr +
4428 4503 (int)mpt->m_req_frame_size * k);
4429 4504
4430 4505 continue;
4431 4506 }
4432 4507
4433 4508 ddi_put32(p->m_acc_hdl,
4434 4509 &sge->Address.Low,
4435 4510 dmap->addr.address64.Low);
4436 4511 ddi_put32(p->m_acc_hdl,
4437 4512 &sge->Address.High,
4438 4513 dmap->addr.address64.High);
4439 4514 ddi_put32(p->m_acc_hdl,
4440 4515 &sge->FlagsLength, dmap->count);
4441 4516 flags = ddi_get32(p->m_acc_hdl,
4442 4517 &sge->FlagsLength);
4443 4518 flags |= ((uint32_t)(
4444 4519 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4445 4520 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4446 4521 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4447 4522 MPI2_SGE_FLAGS_SHIFT);
4448 4523
4449 4524 /*
4450 4525 * If we are at the end of the frame and
4451 4526 * there is another frame to fill in
4452 4527 * we set the last simple element as last
4453 4528 * element
4454 4529 */
4455 4530 if ((l == sgemax) && (k != frames)) {
4456 4531 flags |= ((uint32_t)
4457 4532 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4458 4533 MPI2_SGE_FLAGS_SHIFT);
4459 4534 }
4460 4535
4461 4536 /*
4462 4537 * If this is the final cookie we
4463 4538 * indicate it by setting the flags
4464 4539 */
4465 4540 if (j == i) {
4466 4541 flags |= ((uint32_t)
4467 4542 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4468 4543 MPI2_SGE_FLAGS_END_OF_BUFFER |
4469 4544 MPI2_SGE_FLAGS_END_OF_LIST) <<
4470 4545 MPI2_SGE_FLAGS_SHIFT);
4471 4546 }
4472 4547 if (cmd->cmd_flags & CFLAG_DMASEND) {
4473 4548 flags |=
4474 4549 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4475 4550 MPI2_SGE_FLAGS_SHIFT);
4476 4551 } else {
4477 4552 flags |=
4478 4553 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4479 4554 MPI2_SGE_FLAGS_SHIFT);
4480 4555 }
4481 4556 ddi_put32(p->m_acc_hdl,
4482 4557 &sge->FlagsLength, flags);
4483 4558 dmap++;
4484 4559 sge++;
4485 4560 }
4486 4561 }
4487 4562
4488 4563 /*
4489 4564 * Sync DMA with the chain buffers that were just created
4490 4565 */
4491 4566 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4492 4567 }
4493 4568 }
4494 4569
4495 4570 /*
4496 4571 * Interrupt handling
4497 4572 * Utility routine. Poll for status of a command sent to HBA
4498 4573 * without interrupts (a FLAG_NOINTR command).
4499 4574 */
4500 4575 int
4501 4576 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4502 4577 {
4503 4578 int rval = TRUE;
4504 4579
4505 4580 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4506 4581
4507 4582 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4508 4583 mptsas_restart_hba(mpt);
4509 4584 }
4510 4585
4511 4586 /*
4512 4587 * Wait, using drv_usecwait(), long enough for the command to
4513 4588 * reasonably return from the target if the target isn't
4514 4589 * "dead". A polled command may well be sent from scsi_poll, and
4515 4590 * there are retries built in to scsi_poll if the transport
4516 4591 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4517 4592 * and retries the transport up to scsi_poll_busycnt times
4518 4593 * (currently 60) if
4519 4594 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4520 4595 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4521 4596 *
4522 4597 * limit the waiting to avoid a hang in the event that the
4523 4598 * cmd never gets started but we are still receiving interrupts
4524 4599 */
4525 4600 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4526 4601 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4527 4602 NDBG5(("mptsas_poll: command incomplete"));
4528 4603 rval = FALSE;
4529 4604 break;
4530 4605 }
4531 4606 }
4532 4607
4533 4608 if (rval == FALSE) {
4534 4609
4535 4610 /*
4536 4611 * this isn't supposed to happen, the hba must be wedged
4537 4612 * Mark this cmd as a timeout.
4538 4613 */
4539 4614 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4540 4615 (STAT_TIMEOUT|STAT_ABORTED));
4541 4616
4542 4617 if (poll_cmd->cmd_queued == FALSE) {
4543 4618
4544 4619 NDBG5(("mptsas_poll: not on waitq"));
4545 4620
4546 4621 poll_cmd->cmd_pkt->pkt_state |=
4547 4622 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4548 4623 } else {
4549 4624
4550 4625 /* find and remove it from the waitq */
4551 4626 NDBG5(("mptsas_poll: delete from waitq"));
4552 4627 mptsas_waitq_delete(mpt, poll_cmd);
4553 4628 }
4554 4629
4555 4630 }
4556 4631 mptsas_fma_check(mpt, poll_cmd);
4557 4632 NDBG5(("mptsas_poll: done"));
4558 4633 return (rval);
4559 4634 }
4560 4635
4561 4636 /*
4562 4637 * Used for polling cmds and TM function
4563 4638 */
4564 4639 static int
4565 4640 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4566 4641 {
4567 4642 int cnt;
4568 4643 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4569 4644 uint32_t int_mask;
4570 4645
4571 4646 NDBG5(("mptsas_wait_intr"));
4572 4647
4573 4648 mpt->m_polled_intr = 1;
4574 4649
4575 4650 /*
4576 4651 * Get the current interrupt mask and disable interrupts. When
4577 4652 * re-enabling ints, set mask to saved value.
4578 4653 */
4579 4654 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4580 4655 MPTSAS_DISABLE_INTR(mpt);
4581 4656
4582 4657 /*
4583 4658 * Keep polling for at least (polltime * 1000) seconds
4584 4659 */
4585 4660 for (cnt = 0; cnt < polltime; cnt++) {
4586 4661 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4587 4662 DDI_DMA_SYNC_FORCPU);
4588 4663
4589 4664 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4590 4665 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4591 4666
4592 4667 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4593 4668 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4594 4669 ddi_get32(mpt->m_acc_post_queue_hdl,
4595 4670 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4596 4671 drv_usecwait(1000);
4597 4672 continue;
4598 4673 }
4599 4674
4600 4675 /*
4601 4676 * The reply is valid, process it according to its
4602 4677 * type.
4603 4678 */
4604 4679 mptsas_process_intr(mpt, reply_desc_union);
4605 4680
4606 4681 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4607 4682 mpt->m_post_index = 0;
4608 4683 }
4609 4684
4610 4685 /*
4611 4686 * Update the global reply index
4612 4687 */
4613 4688 ddi_put32(mpt->m_datap,
4614 4689 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4615 4690 mpt->m_polled_intr = 0;
4616 4691
4617 4692 /*
4618 4693 * Re-enable interrupts and quit.
4619 4694 */
4620 4695 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4621 4696 int_mask);
4622 4697 return (TRUE);
4623 4698
4624 4699 }
4625 4700
4626 4701 /*
4627 4702 * Clear polling flag, re-enable interrupts and quit.
4628 4703 */
4629 4704 mpt->m_polled_intr = 0;
4630 4705 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4631 4706 return (FALSE);
4632 4707 }
4633 4708
4634 4709 static void
4635 4710 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4636 4711 pMpi2ReplyDescriptorsUnion_t reply_desc)
4637 4712 {
4638 4713 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4639 4714 uint16_t SMID;
4640 4715 mptsas_slots_t *slots = mpt->m_active;
4641 4716 mptsas_cmd_t *cmd = NULL;
4642 4717 struct scsi_pkt *pkt;
4643 4718
|
↓ open down ↓ |
1320 lines elided |
↑ open up ↑ |
4644 4719 ASSERT(mutex_owned(&mpt->m_mutex));
4645 4720
4646 4721 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4647 4722 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4648 4723
4649 4724 /*
4650 4725 * This is a success reply so just complete the IO. First, do a sanity
4651 4726 * check on the SMID. The final slot is used for TM requests, which
4652 4727 * would not come into this reply handler.
4653 4728 */
4654 - if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4729 + if ((SMID == 0) || (SMID > slots->m_n_normal)) {
4655 4730 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4656 4731 SMID);
4657 4732 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4658 4733 return;
4659 4734 }
4660 4735
4661 4736 cmd = slots->m_slot[SMID];
4662 4737
4663 4738 /*
4664 4739 * print warning and return if the slot is empty
4665 4740 */
4666 4741 if (cmd == NULL) {
4667 4742 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4668 4743 "in slot %d", SMID);
4669 4744 return;
4670 4745 }
4671 4746
4672 4747 pkt = CMD2PKT(cmd);
4673 4748 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4674 4749 STATE_GOT_STATUS);
4675 4750 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4676 4751 pkt->pkt_state |= STATE_XFERRED_DATA;
4677 4752 }
4678 4753 pkt->pkt_resid = 0;
4679 4754
4680 4755 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4681 4756 cmd->cmd_flags |= CFLAG_FINISHED;
4682 4757 cv_broadcast(&mpt->m_passthru_cv);
4683 4758 return;
4684 4759 } else {
4685 4760 mptsas_remove_cmd(mpt, cmd);
4686 4761 }
4687 4762
4688 4763 if (cmd->cmd_flags & CFLAG_RETRY) {
4689 4764 /*
4690 4765 * The target returned QFULL or busy, do not add tihs
4691 4766 * pkt to the doneq since the hba will retry
4692 4767 * this cmd.
4693 4768 *
4694 4769 * The pkt has already been resubmitted in
4695 4770 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4696 4771 * Remove this cmd_flag here.
4697 4772 */
4698 4773 cmd->cmd_flags &= ~CFLAG_RETRY;
4699 4774 } else {
4700 4775 mptsas_doneq_add(mpt, cmd);
4701 4776 }
4702 4777 }
4703 4778
4704 4779 static void
4705 4780 mptsas_handle_address_reply(mptsas_t *mpt,
4706 4781 pMpi2ReplyDescriptorsUnion_t reply_desc)
4707 4782 {
4708 4783 pMpi2AddressReplyDescriptor_t address_reply;
4709 4784 pMPI2DefaultReply_t reply;
4710 4785 mptsas_fw_diagnostic_buffer_t *pBuffer;
4711 4786 uint32_t reply_addr;
4712 4787 uint16_t SMID, iocstatus;
4713 4788 mptsas_slots_t *slots = mpt->m_active;
4714 4789 mptsas_cmd_t *cmd = NULL;
4715 4790 uint8_t function, buffer_type;
4716 4791 m_replyh_arg_t *args;
4717 4792 int reply_frame_no;
4718 4793
4719 4794 ASSERT(mutex_owned(&mpt->m_mutex));
4720 4795
4721 4796 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4722 4797 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4723 4798 &address_reply->ReplyFrameAddress);
4724 4799 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4725 4800
4726 4801 /*
4727 4802 * If reply frame is not in the proper range we should ignore this
4728 4803 * message and exit the interrupt handler.
4729 4804 */
4730 4805 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4731 4806 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4732 4807 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4733 4808 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4734 4809 mpt->m_reply_frame_size != 0)) {
4735 4810 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4736 4811 "address 0x%x\n", reply_addr);
4737 4812 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4738 4813 return;
4739 4814 }
4740 4815
4741 4816 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4742 4817 DDI_DMA_SYNC_FORCPU);
4743 4818 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4744 4819 mpt->m_reply_frame_dma_addr));
4745 4820 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4746 4821
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
4747 4822 /*
4748 4823 * don't get slot information and command for events since these values
4749 4824 * don't exist
4750 4825 */
4751 4826 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4752 4827 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4753 4828 /*
4754 4829 * This could be a TM reply, which use the last allocated SMID,
4755 4830 * so allow for that.
4756 4831 */
4757 - if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4832 + if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
4758 4833 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4759 4834 "%d\n", SMID);
4760 4835 ddi_fm_service_impact(mpt->m_dip,
4761 4836 DDI_SERVICE_UNAFFECTED);
4762 4837 return;
4763 4838 }
4764 4839
4765 4840 cmd = slots->m_slot[SMID];
4766 4841
4767 4842 /*
4768 4843 * print warning and return if the slot is empty
4769 4844 */
4770 4845 if (cmd == NULL) {
4771 4846 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4772 4847 "reply in slot %d", SMID);
4773 4848 return;
4774 4849 }
4775 4850 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4776 4851 (cmd->cmd_flags & CFLAG_CONFIG) ||
4777 4852 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4778 4853 cmd->cmd_rfm = reply_addr;
4779 4854 cmd->cmd_flags |= CFLAG_FINISHED;
4780 4855 cv_broadcast(&mpt->m_passthru_cv);
4781 4856 cv_broadcast(&mpt->m_config_cv);
4782 4857 cv_broadcast(&mpt->m_fw_diag_cv);
4783 4858 return;
4784 4859 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4785 4860 mptsas_remove_cmd(mpt, cmd);
4786 4861 }
4787 4862 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4788 4863 }
4789 4864 /*
4790 4865 * Depending on the function, we need to handle
4791 4866 * the reply frame (and cmd) differently.
4792 4867 */
4793 4868 switch (function) {
4794 4869 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4795 4870 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4796 4871 break;
4797 4872 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4798 4873 cmd->cmd_rfm = reply_addr;
4799 4874 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4800 4875 cmd);
4801 4876 break;
4802 4877 case MPI2_FUNCTION_FW_DOWNLOAD:
4803 4878 cmd->cmd_flags |= CFLAG_FINISHED;
4804 4879 cv_signal(&mpt->m_fw_cv);
4805 4880 break;
4806 4881 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4807 4882 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4808 4883 mpt->m_reply_frame_size;
4809 4884 args = &mpt->m_replyh_args[reply_frame_no];
4810 4885 args->mpt = (void *)mpt;
4811 4886 args->rfm = reply_addr;
4812 4887
4813 4888 /*
4814 4889 * Record the event if its type is enabled in
4815 4890 * this mpt instance by ioctl.
4816 4891 */
4817 4892 mptsas_record_event(args);
4818 4893
4819 4894 /*
4820 4895 * Handle time critical events
4821 4896 * NOT_RESPONDING/ADDED only now
4822 4897 */
4823 4898 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4824 4899 /*
4825 4900 * Would not return main process,
4826 4901 * just let taskq resolve ack action
4827 4902 * and ack would be sent in taskq thread
4828 4903 */
4829 4904 NDBG20(("send mptsas_handle_event_sync success"));
4830 4905 }
4831 4906
4832 4907 if (mpt->m_in_reset) {
4833 4908 NDBG20(("dropping event received during reset"));
4834 4909 return;
4835 4910 }
4836 4911
4837 4912 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4838 4913 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4839 4914 mptsas_log(mpt, CE_WARN, "No memory available"
4840 4915 "for dispatch taskq");
4841 4916 /*
4842 4917 * Return the reply frame to the free queue.
4843 4918 */
4844 4919 ddi_put32(mpt->m_acc_free_queue_hdl,
4845 4920 &((uint32_t *)(void *)
4846 4921 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4847 4922 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4848 4923 DDI_DMA_SYNC_FORDEV);
4849 4924 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4850 4925 mpt->m_free_index = 0;
4851 4926 }
4852 4927
4853 4928 ddi_put32(mpt->m_datap,
4854 4929 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4855 4930 }
4856 4931 return;
4857 4932 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4858 4933 /*
4859 4934 * If SMID is 0, this implies that the reply is due to a
4860 4935 * release function with a status that the buffer has been
4861 4936 * released. Set the buffer flags accordingly.
4862 4937 */
4863 4938 if (SMID == 0) {
4864 4939 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4865 4940 &reply->IOCStatus);
4866 4941 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4867 4942 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4868 4943 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4869 4944 pBuffer =
4870 4945 &mpt->m_fw_diag_buffer_list[buffer_type];
4871 4946 pBuffer->valid_data = TRUE;
4872 4947 pBuffer->owned_by_firmware = FALSE;
4873 4948 pBuffer->immediate = FALSE;
4874 4949 }
4875 4950 } else {
4876 4951 /*
4877 4952 * Normal handling of diag post reply with SMID.
4878 4953 */
4879 4954 cmd = slots->m_slot[SMID];
4880 4955
4881 4956 /*
4882 4957 * print warning and return if the slot is empty
4883 4958 */
4884 4959 if (cmd == NULL) {
4885 4960 mptsas_log(mpt, CE_WARN, "?NULL command for "
4886 4961 "address reply in slot %d", SMID);
4887 4962 return;
4888 4963 }
4889 4964 cmd->cmd_rfm = reply_addr;
4890 4965 cmd->cmd_flags |= CFLAG_FINISHED;
4891 4966 cv_broadcast(&mpt->m_fw_diag_cv);
4892 4967 }
4893 4968 return;
4894 4969 default:
4895 4970 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4896 4971 break;
4897 4972 }
4898 4973
4899 4974 /*
4900 4975 * Return the reply frame to the free queue.
4901 4976 */
4902 4977 ddi_put32(mpt->m_acc_free_queue_hdl,
4903 4978 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4904 4979 reply_addr);
4905 4980 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4906 4981 DDI_DMA_SYNC_FORDEV);
4907 4982 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4908 4983 mpt->m_free_index = 0;
4909 4984 }
4910 4985 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4911 4986 mpt->m_free_index);
4912 4987
4913 4988 if (cmd->cmd_flags & CFLAG_FW_CMD)
4914 4989 return;
4915 4990
4916 4991 if (cmd->cmd_flags & CFLAG_RETRY) {
4917 4992 /*
4918 4993 * The target returned QFULL or busy, do not add tihs
4919 4994 * pkt to the doneq since the hba will retry
4920 4995 * this cmd.
4921 4996 *
4922 4997 * The pkt has already been resubmitted in
4923 4998 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4924 4999 * Remove this cmd_flag here.
4925 5000 */
4926 5001 cmd->cmd_flags &= ~CFLAG_RETRY;
4927 5002 } else {
4928 5003 mptsas_doneq_add(mpt, cmd);
4929 5004 }
4930 5005 }
4931 5006
4932 5007 static void
4933 5008 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4934 5009 mptsas_cmd_t *cmd)
4935 5010 {
4936 5011 uint8_t scsi_status, scsi_state;
4937 5012 uint16_t ioc_status;
4938 5013 uint32_t xferred, sensecount, responsedata, loginfo = 0;
4939 5014 struct scsi_pkt *pkt;
4940 5015 struct scsi_arq_status *arqstat;
4941 5016 struct buf *bp;
4942 5017 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
4943 5018 uint8_t *sensedata = NULL;
4944 5019
4945 5020 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4946 5021 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4947 5022 bp = cmd->cmd_ext_arq_buf;
4948 5023 } else {
4949 5024 bp = cmd->cmd_arq_buf;
4950 5025 }
4951 5026
4952 5027 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4953 5028 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4954 5029 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4955 5030 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
4956 5031 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
4957 5032 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
4958 5033 &reply->ResponseInfo);
4959 5034
4960 5035 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
4961 5036 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
4962 5037 &reply->IOCLogInfo);
4963 5038 mptsas_log(mpt, CE_NOTE,
4964 5039 "?Log info 0x%x received for target %d.\n"
4965 5040 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4966 5041 loginfo, Tgt(cmd), scsi_status, ioc_status,
4967 5042 scsi_state);
4968 5043 }
4969 5044
4970 5045 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4971 5046 scsi_status, ioc_status, scsi_state));
4972 5047
4973 5048 pkt = CMD2PKT(cmd);
4974 5049 *(pkt->pkt_scbp) = scsi_status;
4975 5050
4976 5051 if (loginfo == 0x31170000) {
4977 5052 /*
4978 5053 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4979 5054 * 0x31170000 comes, that means the device missing delay
4980 5055 * is in progressing, the command need retry later.
4981 5056 */
4982 5057 *(pkt->pkt_scbp) = STATUS_BUSY;
4983 5058 return;
4984 5059 }
4985 5060
4986 5061 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4987 5062 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4988 5063 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4989 5064 pkt->pkt_reason = CMD_INCOMPLETE;
4990 5065 pkt->pkt_state |= STATE_GOT_BUS;
4991 5066 if (ptgt->m_reset_delay == 0) {
4992 5067 mptsas_set_throttle(mpt, ptgt,
4993 5068 DRAIN_THROTTLE);
4994 5069 }
4995 5070 return;
4996 5071 }
4997 5072
4998 5073 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4999 5074 responsedata &= 0x000000FF;
5000 5075 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5001 5076 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5002 5077 pkt->pkt_reason = CMD_TLR_OFF;
5003 5078 return;
5004 5079 }
5005 5080 }
5006 5081
5007 5082
5008 5083 switch (scsi_status) {
5009 5084 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5010 5085 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5011 5086 arqstat = (void*)(pkt->pkt_scbp);
5012 5087 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5013 5088 (pkt->pkt_scbp));
5014 5089 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5015 5090 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5016 5091 if (cmd->cmd_flags & CFLAG_XARQ) {
5017 5092 pkt->pkt_state |= STATE_XARQ_DONE;
5018 5093 }
5019 5094 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5020 5095 pkt->pkt_state |= STATE_XFERRED_DATA;
5021 5096 }
5022 5097 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5023 5098 arqstat->sts_rqpkt_state = pkt->pkt_state;
5024 5099 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5025 5100 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5026 5101 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5027 5102
5028 5103 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5029 5104 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5030 5105 cmd->cmd_rqslen));
5031 5106 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5032 5107 cmd->cmd_flags |= CFLAG_CMDARQ;
5033 5108 /*
5034 5109 * Set proper status for pkt if autosense was valid
5035 5110 */
5036 5111 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5037 5112 struct scsi_status zero_status = { 0 };
5038 5113 arqstat->sts_rqpkt_status = zero_status;
5039 5114 }
5040 5115
5041 5116 /*
5042 5117 * ASC=0x47 is parity error
5043 5118 * ASC=0x48 is initiator detected error received
5044 5119 */
5045 5120 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5046 5121 ((scsi_sense_asc(sensedata) == 0x47) ||
5047 5122 (scsi_sense_asc(sensedata) == 0x48))) {
5048 5123 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5049 5124 }
5050 5125
5051 5126 /*
5052 5127 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5053 5128 * ASC/ASCQ=0x25/0x00 means invalid lun
5054 5129 */
5055 5130 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5056 5131 (scsi_sense_asc(sensedata) == 0x3F) &&
5057 5132 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5058 5133 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5059 5134 (scsi_sense_asc(sensedata) == 0x25) &&
5060 5135 (scsi_sense_ascq(sensedata) == 0x00))) {
5061 5136 mptsas_topo_change_list_t *topo_node = NULL;
5062 5137
5063 5138 topo_node = kmem_zalloc(
|
↓ open down ↓ |
296 lines elided |
↑ open up ↑ |
5064 5139 sizeof (mptsas_topo_change_list_t),
5065 5140 KM_NOSLEEP);
5066 5141 if (topo_node == NULL) {
5067 5142 mptsas_log(mpt, CE_NOTE, "No memory"
5068 5143 "resource for handle SAS dynamic"
5069 5144 "reconfigure.\n");
5070 5145 break;
5071 5146 }
5072 5147 topo_node->mpt = mpt;
5073 5148 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5074 - topo_node->un.phymask = ptgt->m_phymask;
5149 + topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5075 5150 topo_node->devhdl = ptgt->m_devhdl;
5076 5151 topo_node->object = (void *)ptgt;
5077 5152 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5078 5153
5079 5154 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5080 5155 mptsas_handle_dr,
5081 5156 (void *)topo_node,
5082 5157 DDI_NOSLEEP)) != DDI_SUCCESS) {
5083 5158 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5084 5159 "for handle SAS dynamic reconfigure"
5085 5160 "failed. \n");
5086 5161 }
5087 5162 }
5088 5163 break;
5089 5164 case MPI2_SCSI_STATUS_GOOD:
5090 5165 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5091 5166 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5092 5167 pkt->pkt_reason = CMD_DEV_GONE;
5093 5168 pkt->pkt_state |= STATE_GOT_BUS;
5094 5169 if (ptgt->m_reset_delay == 0) {
5095 5170 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5096 5171 }
5097 5172 NDBG31(("lost disk for target%d, command:%x",
5098 5173 Tgt(cmd), pkt->pkt_cdbp[0]));
5099 5174 break;
5100 5175 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5101 5176 NDBG31(("data overrun: xferred=%d", xferred));
5102 5177 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5103 5178 pkt->pkt_reason = CMD_DATA_OVR;
5104 5179 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5105 5180 | STATE_SENT_CMD | STATE_GOT_STATUS
5106 5181 | STATE_XFERRED_DATA);
5107 5182 pkt->pkt_resid = 0;
5108 5183 break;
5109 5184 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5110 5185 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5111 5186 NDBG31(("data underrun: xferred=%d", xferred));
5112 5187 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5113 5188 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5114 5189 | STATE_SENT_CMD | STATE_GOT_STATUS);
5115 5190 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5116 5191 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5117 5192 pkt->pkt_state |= STATE_XFERRED_DATA;
5118 5193 }
5119 5194 break;
5120 5195 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5121 5196 mptsas_set_pkt_reason(mpt,
5122 5197 cmd, CMD_RESET, STAT_BUS_RESET);
5123 5198 break;
5124 5199 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5125 5200 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5126 5201 mptsas_set_pkt_reason(mpt,
5127 5202 cmd, CMD_RESET, STAT_DEV_RESET);
5128 5203 break;
5129 5204 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
5130 5205 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5131 5206 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5132 5207 mptsas_set_pkt_reason(mpt,
5133 5208 cmd, CMD_TERMINATED, STAT_TERMINATED);
5134 5209 break;
5135 5210 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5136 5211 case MPI2_IOCSTATUS_BUSY:
5137 5212 /*
5138 5213 * set throttles to drain
5139 5214 */
5140 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5141 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5142 - while (ptgt != NULL) {
5215 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5216 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
5143 5217 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5144 -
5145 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5146 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5147 5218 }
5148 5219
5149 5220 /*
5150 5221 * retry command
5151 5222 */
5152 5223 cmd->cmd_flags |= CFLAG_RETRY;
5153 5224 cmd->cmd_pkt_flags |= FLAG_HEAD;
5154 5225
5155 5226 (void) mptsas_accept_pkt(mpt, cmd);
5156 5227 break;
5157 5228 default:
5158 5229 mptsas_log(mpt, CE_WARN,
5159 5230 "unknown ioc_status = %x\n", ioc_status);
5160 5231 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5161 5232 "count = %x, scsi_status = %x", scsi_state,
5162 5233 xferred, scsi_status);
5163 5234 break;
5164 5235 }
5165 5236 break;
5166 5237 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5167 5238 mptsas_handle_qfull(mpt, cmd);
5168 5239 break;
5169 5240 case MPI2_SCSI_STATUS_BUSY:
5170 5241 NDBG31(("scsi_status busy received"));
5171 5242 break;
5172 5243 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5173 5244 NDBG31(("scsi_status reservation conflict received"));
5174 5245 break;
5175 5246 default:
5176 5247 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5177 5248 scsi_status, ioc_status);
5178 5249 mptsas_log(mpt, CE_WARN,
5179 5250 "mptsas_process_intr: invalid scsi status\n");
5180 5251 break;
5181 5252 }
5182 5253 }
5183 5254
5184 5255 static void
5185 5256 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5186 5257 mptsas_cmd_t *cmd)
5187 5258 {
5188 5259 uint8_t task_type;
5189 5260 uint16_t ioc_status;
5190 5261 uint32_t log_info;
5191 5262 uint16_t dev_handle;
5192 5263 struct scsi_pkt *pkt = CMD2PKT(cmd);
5193 5264
5194 5265 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5195 5266 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5196 5267 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5197 5268 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5198 5269
5199 5270 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5200 5271 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5201 5272 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5202 5273 task_type, ioc_status, log_info, dev_handle);
5203 5274 pkt->pkt_reason = CMD_INCOMPLETE;
5204 5275 return;
5205 5276 }
5206 5277
5207 5278 switch (task_type) {
5208 5279 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5209 5280 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5210 5281 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5211 5282 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5212 5283 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5213 5284 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5214 5285 break;
5215 5286 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5216 5287 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5217 5288 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5218 5289 /*
5219 5290 * Check for invalid DevHandle of 0 in case application
5220 5291 * sends bad command. DevHandle of 0 could cause problems.
5221 5292 */
5222 5293 if (dev_handle == 0) {
5223 5294 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5224 5295 " DevHandle of 0.");
5225 5296 } else {
5226 5297 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5227 5298 task_type);
5228 5299 }
5229 5300 break;
5230 5301 default:
5231 5302 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5232 5303 task_type);
5233 5304 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5234 5305 break;
5235 5306 }
5236 5307 }
5237 5308
5238 5309 static void
5239 5310 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5240 5311 {
5241 5312 mptsas_t *mpt = arg->mpt;
5242 5313 uint64_t t = arg->t;
5243 5314 mptsas_cmd_t *cmd;
5244 5315 struct scsi_pkt *pkt;
5245 5316 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5246 5317
5247 5318 mutex_enter(&item->mutex);
5248 5319 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5249 5320 if (!item->doneq) {
5250 5321 cv_wait(&item->cv, &item->mutex);
5251 5322 }
5252 5323 pkt = NULL;
5253 5324 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5254 5325 cmd->cmd_flags |= CFLAG_COMPLETED;
5255 5326 pkt = CMD2PKT(cmd);
5256 5327 }
5257 5328 mutex_exit(&item->mutex);
5258 5329 if (pkt) {
5259 5330 mptsas_pkt_comp(pkt, cmd);
5260 5331 }
5261 5332 mutex_enter(&item->mutex);
5262 5333 }
5263 5334 mutex_exit(&item->mutex);
5264 5335 mutex_enter(&mpt->m_doneq_mutex);
5265 5336 mpt->m_doneq_thread_n--;
5266 5337 cv_broadcast(&mpt->m_doneq_thread_cv);
5267 5338 mutex_exit(&mpt->m_doneq_mutex);
5268 5339 }
5269 5340
5270 5341
5271 5342 /*
5272 5343 * mpt interrupt handler.
5273 5344 */
5274 5345 static uint_t
5275 5346 mptsas_intr(caddr_t arg1, caddr_t arg2)
5276 5347 {
5277 5348 mptsas_t *mpt = (void *)arg1;
5278 5349 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5279 5350 uchar_t did_reply = FALSE;
5280 5351
5281 5352 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5282 5353
5283 5354 mutex_enter(&mpt->m_mutex);
5284 5355
5285 5356 /*
5286 5357 * If interrupts are shared by two channels then check whether this
5287 5358 * interrupt is genuinely for this channel by making sure first the
5288 5359 * chip is in high power state.
5289 5360 */
5290 5361 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5291 5362 (mpt->m_power_level != PM_LEVEL_D0)) {
5292 5363 mutex_exit(&mpt->m_mutex);
5293 5364 return (DDI_INTR_UNCLAIMED);
5294 5365 }
5295 5366
5296 5367 /*
5297 5368 * If polling, interrupt was triggered by some shared interrupt because
5298 5369 * IOC interrupts are disabled during polling, so polling routine will
5299 5370 * handle any replies. Considering this, if polling is happening,
5300 5371 * return with interrupt unclaimed.
5301 5372 */
5302 5373 if (mpt->m_polled_intr) {
5303 5374 mutex_exit(&mpt->m_mutex);
5304 5375 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5305 5376 return (DDI_INTR_UNCLAIMED);
5306 5377 }
5307 5378
5308 5379 /*
5309 5380 * Read the istat register.
5310 5381 */
5311 5382 if ((INTPENDING(mpt)) != 0) {
5312 5383 /*
5313 5384 * read fifo until empty.
5314 5385 */
5315 5386 #ifndef __lock_lint
5316 5387 _NOTE(CONSTCOND)
5317 5388 #endif
5318 5389 while (TRUE) {
5319 5390 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5320 5391 DDI_DMA_SYNC_FORCPU);
5321 5392 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5322 5393 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5323 5394
5324 5395 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5325 5396 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5326 5397 ddi_get32(mpt->m_acc_post_queue_hdl,
5327 5398 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5328 5399 break;
5329 5400 }
5330 5401
5331 5402 /*
5332 5403 * The reply is valid, process it according to its
5333 5404 * type. Also, set a flag for updating the reply index
5334 5405 * after they've all been processed.
5335 5406 */
5336 5407 did_reply = TRUE;
5337 5408
5338 5409 mptsas_process_intr(mpt, reply_desc_union);
5339 5410
5340 5411 /*
5341 5412 * Increment post index and roll over if needed.
5342 5413 */
5343 5414 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5344 5415 mpt->m_post_index = 0;
5345 5416 }
5346 5417 }
5347 5418
5348 5419 /*
5349 5420 * Update the global reply index if at least one reply was
5350 5421 * processed.
5351 5422 */
5352 5423 if (did_reply) {
5353 5424 ddi_put32(mpt->m_datap,
5354 5425 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5355 5426 }
5356 5427 } else {
5357 5428 mutex_exit(&mpt->m_mutex);
5358 5429 return (DDI_INTR_UNCLAIMED);
5359 5430 }
5360 5431 NDBG1(("mptsas_intr complete"));
5361 5432
5362 5433 /*
5363 5434 * If no helper threads are created, process the doneq in ISR. If
5364 5435 * helpers are created, use the doneq length as a metric to measure the
5365 5436 * load on the interrupt CPU. If it is long enough, which indicates the
5366 5437 * load is heavy, then we deliver the IO completions to the helpers.
5367 5438 * This measurement has some limitations, although it is simple and
5368 5439 * straightforward and works well for most of the cases at present.
5369 5440 */
5370 5441 if (!mpt->m_doneq_thread_n ||
5371 5442 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5372 5443 mptsas_doneq_empty(mpt);
5373 5444 } else {
5374 5445 mptsas_deliver_doneq_thread(mpt);
5375 5446 }
5376 5447
5377 5448 /*
5378 5449 * If there are queued cmd, start them now.
5379 5450 */
5380 5451 if (mpt->m_waitq != NULL) {
5381 5452 mptsas_restart_waitq(mpt);
5382 5453 }
5383 5454
5384 5455 mutex_exit(&mpt->m_mutex);
5385 5456 return (DDI_INTR_CLAIMED);
5386 5457 }
5387 5458
5388 5459 static void
5389 5460 mptsas_process_intr(mptsas_t *mpt,
5390 5461 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5391 5462 {
5392 5463 uint8_t reply_type;
5393 5464
5394 5465 ASSERT(mutex_owned(&mpt->m_mutex));
5395 5466
5396 5467 /*
5397 5468 * The reply is valid, process it according to its
5398 5469 * type. Also, set a flag for updated the reply index
5399 5470 * after they've all been processed.
5400 5471 */
5401 5472 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5402 5473 &reply_desc_union->Default.ReplyFlags);
5403 5474 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5404 5475 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5405 5476 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5406 5477 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5407 5478 mptsas_handle_address_reply(mpt, reply_desc_union);
5408 5479 } else {
5409 5480 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5410 5481 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5411 5482 }
5412 5483
5413 5484 /*
5414 5485 * Clear the reply descriptor for re-use and increment
5415 5486 * index.
5416 5487 */
5417 5488 ddi_put64(mpt->m_acc_post_queue_hdl,
5418 5489 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5419 5490 0xFFFFFFFFFFFFFFFF);
5420 5491 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5421 5492 DDI_DMA_SYNC_FORDEV);
5422 5493 }
5423 5494
5424 5495 /*
5425 5496 * handle qfull condition
5426 5497 */
5427 5498 static void
5428 5499 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5429 5500 {
5430 5501 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5431 5502
5432 5503 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5433 5504 (ptgt->m_qfull_retries == 0)) {
5434 5505 /*
5435 5506 * We have exhausted the retries on QFULL, or,
5436 5507 * the target driver has indicated that it
5437 5508 * wants to handle QFULL itself by setting
5438 5509 * qfull-retries capability to 0. In either case
5439 5510 * we want the target driver's QFULL handling
5440 5511 * to kick in. We do this by having pkt_reason
5441 5512 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5442 5513 */
5443 5514 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5444 5515 } else {
5445 5516 if (ptgt->m_reset_delay == 0) {
5446 5517 ptgt->m_t_throttle =
5447 5518 max((ptgt->m_t_ncmds - 2), 0);
5448 5519 }
5449 5520
5450 5521 cmd->cmd_pkt_flags |= FLAG_HEAD;
5451 5522 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5452 5523 cmd->cmd_flags |= CFLAG_RETRY;
5453 5524
5454 5525 (void) mptsas_accept_pkt(mpt, cmd);
5455 5526
5456 5527 /*
5457 5528 * when target gives queue full status with no commands
5458 5529 * outstanding (m_t_ncmds == 0), throttle is set to 0
5459 5530 * (HOLD_THROTTLE), and the queue full handling start
5460 5531 * (see psarc/1994/313); if there are commands outstanding,
5461 5532 * throttle is set to (m_t_ncmds - 2)
5462 5533 */
5463 5534 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5464 5535 /*
5465 5536 * By setting throttle to QFULL_THROTTLE, we
5466 5537 * avoid submitting new commands and in
5467 5538 * mptsas_restart_cmd find out slots which need
5468 5539 * their throttles to be cleared.
5469 5540 */
5470 5541 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5471 5542 if (mpt->m_restart_cmd_timeid == 0) {
5472 5543 mpt->m_restart_cmd_timeid =
5473 5544 timeout(mptsas_restart_cmd, mpt,
5474 5545 ptgt->m_qfull_retry_interval);
5475 5546 }
5476 5547 }
5477 5548 }
5478 5549 }
5479 5550
5480 5551 mptsas_phymask_t
5481 5552 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5482 5553 {
5483 5554 mptsas_phymask_t phy_mask = 0;
5484 5555 uint8_t i = 0;
5485 5556
5486 5557 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5487 5558
5488 5559 ASSERT(mutex_owned(&mpt->m_mutex));
5489 5560
5490 5561 /*
5491 5562 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5492 5563 */
5493 5564 if (physport == 0xFF) {
5494 5565 return (0);
5495 5566 }
5496 5567
5497 5568 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5498 5569 if (mpt->m_phy_info[i].attached_devhdl &&
5499 5570 (mpt->m_phy_info[i].phy_mask != 0) &&
5500 5571 (mpt->m_phy_info[i].port_num == physport)) {
5501 5572 phy_mask = mpt->m_phy_info[i].phy_mask;
5502 5573 break;
5503 5574 }
5504 5575 }
5505 5576 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5506 5577 mpt->m_instance, physport, phy_mask));
5507 5578 return (phy_mask);
5508 5579 }
5509 5580
5510 5581 /*
5511 5582 * mpt free device handle after device gone, by use of passthrough
5512 5583 */
5513 5584 static int
5514 5585 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5515 5586 {
5516 5587 Mpi2SasIoUnitControlRequest_t req;
5517 5588 Mpi2SasIoUnitControlReply_t rep;
5518 5589 int ret;
5519 5590
5520 5591 ASSERT(mutex_owned(&mpt->m_mutex));
5521 5592
5522 5593 /*
5523 5594 * Need to compose a SAS IO Unit Control request message
5524 5595 * and call mptsas_do_passthru() function
5525 5596 */
5526 5597 bzero(&req, sizeof (req));
5527 5598 bzero(&rep, sizeof (rep));
5528 5599
5529 5600 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5530 5601 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5531 5602 req.DevHandle = LE_16(devhdl);
5532 5603
5533 5604 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5534 5605 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5535 5606 if (ret != 0) {
5536 5607 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5537 5608 "Control error %d", ret);
5538 5609 return (DDI_FAILURE);
5539 5610 }
5540 5611
5541 5612 /* do passthrough success, check the ioc status */
5542 5613 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5543 5614 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5544 5615 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5545 5616 return (DDI_FAILURE);
5546 5617 }
5547 5618
5548 5619 return (DDI_SUCCESS);
5549 5620 }
5550 5621
5551 5622 static void
5552 5623 mptsas_update_phymask(mptsas_t *mpt)
5553 5624 {
5554 5625 mptsas_phymask_t mask = 0, phy_mask;
5555 5626 char *phy_mask_name;
5556 5627 uint8_t current_port;
5557 5628 int i, j;
5558 5629
5559 5630 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5560 5631
5561 5632 ASSERT(mutex_owned(&mpt->m_mutex));
5562 5633
5563 5634 (void) mptsas_get_sas_io_unit_page(mpt);
5564 5635
5565 5636 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5566 5637
5567 5638 for (i = 0; i < mpt->m_num_phys; i++) {
5568 5639 phy_mask = 0x00;
5569 5640
5570 5641 if (mpt->m_phy_info[i].attached_devhdl == 0)
5571 5642 continue;
5572 5643
5573 5644 bzero(phy_mask_name, sizeof (phy_mask_name));
5574 5645
5575 5646 current_port = mpt->m_phy_info[i].port_num;
5576 5647
5577 5648 if ((mask & (1 << i)) != 0)
5578 5649 continue;
5579 5650
5580 5651 for (j = 0; j < mpt->m_num_phys; j++) {
5581 5652 if (mpt->m_phy_info[j].attached_devhdl &&
5582 5653 (mpt->m_phy_info[j].port_num == current_port)) {
5583 5654 phy_mask |= (1 << j);
5584 5655 }
5585 5656 }
5586 5657 mask = mask | phy_mask;
5587 5658
5588 5659 for (j = 0; j < mpt->m_num_phys; j++) {
5589 5660 if ((phy_mask >> j) & 0x01) {
5590 5661 mpt->m_phy_info[j].phy_mask = phy_mask;
5591 5662 }
5592 5663 }
5593 5664
5594 5665 (void) sprintf(phy_mask_name, "%x", phy_mask);
5595 5666
5596 5667 mutex_exit(&mpt->m_mutex);
5597 5668 /*
5598 5669 * register a iport, if the port has already been existed
5599 5670 * SCSA will do nothing and just return.
5600 5671 */
5601 5672 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5602 5673 mutex_enter(&mpt->m_mutex);
5603 5674 }
5604 5675 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5605 5676 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5606 5677 }
5607 5678
5608 5679 /*
5609 5680 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5610 5681 * 1. Directly attched Device Added/Removed.
5611 5682 * 2. Expander Device Added/Removed.
5612 5683 * 3. Indirectly Attached Device Added/Expander.
5613 5684 * 4. LUNs of a existing device status change.
5614 5685 * 5. RAID volume created/deleted.
5615 5686 * 6. Member of RAID volume is released because of RAID deletion.
5616 5687 * 7. Physical disks are removed because of RAID creation.
5617 5688 */
5618 5689 static void
5619 5690 mptsas_handle_dr(void *args) {
5620 5691 mptsas_topo_change_list_t *topo_node = NULL;
5621 5692 mptsas_topo_change_list_t *save_node = NULL;
5622 5693 mptsas_t *mpt;
5623 5694 dev_info_t *parent = NULL;
5624 5695 mptsas_phymask_t phymask = 0;
5625 5696 char *phy_mask_name;
5626 5697 uint8_t flags = 0, physport = 0xff;
5627 5698 uint8_t port_update = 0;
5628 5699 uint_t event;
5629 5700
5630 5701 topo_node = (mptsas_topo_change_list_t *)args;
5631 5702
5632 5703 mpt = topo_node->mpt;
5633 5704 event = topo_node->event;
5634 5705 flags = topo_node->flags;
5635 5706
5636 5707 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5637 5708
5638 5709 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5639 5710
5640 5711 switch (event) {
5641 5712 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5642 5713 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5643 5714 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5644 5715 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5645 5716 /*
5646 5717 * Direct attached or expander attached device added
5647 5718 * into system or a Phys Disk that is being unhidden.
5648 5719 */
5649 5720 port_update = 1;
5650 5721 }
5651 5722 break;
5652 5723 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5653 5724 /*
5654 5725 * New expander added into system, it must be the head
5655 5726 * of topo_change_list_t
5656 5727 */
5657 5728 port_update = 1;
5658 5729 break;
5659 5730 default:
5660 5731 port_update = 0;
5661 5732 break;
5662 5733 }
5663 5734 /*
5664 5735 * All cases port_update == 1 may cause initiator port form change
5665 5736 */
5666 5737 mutex_enter(&mpt->m_mutex);
5667 5738 if (mpt->m_port_chng && port_update) {
5668 5739 /*
5669 5740 * mpt->m_port_chng flag indicates some PHYs of initiator
5670 5741 * port have changed to online. So when expander added or
5671 5742 * directly attached device online event come, we force to
5672 5743 * update port information by issueing SAS IO Unit Page and
5673 5744 * update PHYMASKs.
5674 5745 */
5675 5746 (void) mptsas_update_phymask(mpt);
5676 5747 mpt->m_port_chng = 0;
5677 5748
5678 5749 }
5679 5750 mutex_exit(&mpt->m_mutex);
5680 5751 while (topo_node) {
5681 5752 phymask = 0;
5682 5753 if (parent == NULL) {
5683 5754 physport = topo_node->un.physport;
5684 5755 event = topo_node->event;
5685 5756 flags = topo_node->flags;
5686 5757 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5687 5758 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5688 5759 /*
5689 5760 * For all offline events, phymask is known
5690 5761 */
5691 5762 phymask = topo_node->un.phymask;
5692 5763 goto find_parent;
5693 5764 }
5694 5765 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5695 5766 goto handle_topo_change;
5696 5767 }
5697 5768 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5698 5769 phymask = topo_node->un.phymask;
5699 5770 goto find_parent;
5700 5771 }
5701 5772
5702 5773 if ((flags ==
5703 5774 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5704 5775 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5705 5776 /*
5706 5777 * There is no any field in IR_CONFIG_CHANGE
5707 5778 * event indicate physport/phynum, let's get
5708 5779 * parent after SAS Device Page0 request.
5709 5780 */
5710 5781 goto handle_topo_change;
5711 5782 }
5712 5783
5713 5784 mutex_enter(&mpt->m_mutex);
5714 5785 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5715 5786 /*
5716 5787 * If the direct attached device added or a
5717 5788 * phys disk is being unhidden, argument
5718 5789 * physport actually is PHY#, so we have to get
5719 5790 * phymask according PHY#.
5720 5791 */
5721 5792 physport = mpt->m_phy_info[physport].port_num;
5722 5793 }
5723 5794
5724 5795 /*
5725 5796 * Translate physport to phymask so that we can search
5726 5797 * parent dip.
5727 5798 */
5728 5799 phymask = mptsas_physport_to_phymask(mpt,
5729 5800 physport);
5730 5801 mutex_exit(&mpt->m_mutex);
5731 5802
5732 5803 find_parent:
5733 5804 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5734 5805 /*
5735 5806 * For RAID topology change node, write the iport name
5736 5807 * as v0.
5737 5808 */
5738 5809 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5739 5810 (void) sprintf(phy_mask_name, "v0");
5740 5811 } else {
5741 5812 /*
5742 5813 * phymask can bo 0 if the drive has been
5743 5814 * pulled by the time an add event is
5744 5815 * processed. If phymask is 0, just skip this
5745 5816 * event and continue.
5746 5817 */
5747 5818 if (phymask == 0) {
5748 5819 mutex_enter(&mpt->m_mutex);
5749 5820 save_node = topo_node;
5750 5821 topo_node = topo_node->next;
5751 5822 ASSERT(save_node);
5752 5823 kmem_free(save_node,
5753 5824 sizeof (mptsas_topo_change_list_t));
5754 5825 mutex_exit(&mpt->m_mutex);
5755 5826
5756 5827 parent = NULL;
5757 5828 continue;
5758 5829 }
5759 5830 (void) sprintf(phy_mask_name, "%x", phymask);
5760 5831 }
5761 5832 parent = scsi_hba_iport_find(mpt->m_dip,
5762 5833 phy_mask_name);
5763 5834 if (parent == NULL) {
5764 5835 mptsas_log(mpt, CE_WARN, "Failed to find an "
5765 5836 "iport, should not happen!");
5766 5837 goto out;
5767 5838 }
5768 5839
5769 5840 }
5770 5841 ASSERT(parent);
5771 5842 handle_topo_change:
5772 5843
5773 5844 mutex_enter(&mpt->m_mutex);
5774 5845 /*
5775 5846 * If HBA is being reset, don't perform operations depending
5776 5847 * on the IOC. We must free the topo list, however.
5777 5848 */
5778 5849 if (!mpt->m_in_reset)
5779 5850 mptsas_handle_topo_change(topo_node, parent);
5780 5851 else
5781 5852 NDBG20(("skipping topo change received during reset"));
5782 5853 save_node = topo_node;
5783 5854 topo_node = topo_node->next;
5784 5855 ASSERT(save_node);
5785 5856 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5786 5857 mutex_exit(&mpt->m_mutex);
5787 5858
5788 5859 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5789 5860 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5790 5861 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5791 5862 /*
5792 5863 * If direct attached device associated, make sure
5793 5864 * reset the parent before start the next one. But
5794 5865 * all devices associated with expander shares the
5795 5866 * parent. Also, reset parent if this is for RAID.
5796 5867 */
5797 5868 parent = NULL;
5798 5869 }
5799 5870 }
5800 5871 out:
5801 5872 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5802 5873 }
5803 5874
5804 5875 static void
5805 5876 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5806 5877 dev_info_t *parent)
5807 5878 {
5808 5879 mptsas_target_t *ptgt = NULL;
5809 5880 mptsas_smp_t *psmp = NULL;
5810 5881 mptsas_t *mpt = (void *)topo_node->mpt;
5811 5882 uint16_t devhdl;
5812 5883 uint16_t attached_devhdl;
5813 5884 uint64_t sas_wwn = 0;
5814 5885 int rval = 0;
5815 5886 uint32_t page_address;
5816 5887 uint8_t phy, flags;
5817 5888 char *addr = NULL;
5818 5889 dev_info_t *lundip;
5819 5890 int circ = 0, circ1 = 0;
5820 5891 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5821 5892
5822 5893 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5823 5894
5824 5895 ASSERT(mutex_owned(&mpt->m_mutex));
5825 5896
5826 5897 switch (topo_node->event) {
|
↓ open down ↓ |
670 lines elided |
↑ open up ↑ |
5827 5898 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5828 5899 {
5829 5900 char *phy_mask_name;
5830 5901 mptsas_phymask_t phymask = 0;
5831 5902
5832 5903 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5833 5904 /*
5834 5905 * Get latest RAID info.
5835 5906 */
5836 5907 (void) mptsas_get_raid_info(mpt);
5837 - ptgt = mptsas_search_by_devhdl(
5838 - &mpt->m_active->m_tgttbl, topo_node->devhdl);
5908 + ptgt = refhash_linear_search(mpt->m_targets,
5909 + mptsas_target_eval_devhdl, &topo_node->devhdl);
5839 5910 if (ptgt == NULL)
5840 5911 break;
5841 5912 } else {
5842 5913 ptgt = (void *)topo_node->object;
5843 5914 }
5844 5915
5845 5916 if (ptgt == NULL) {
5846 5917 /*
5847 5918 * If a Phys Disk was deleted, RAID info needs to be
5848 5919 * updated to reflect the new topology.
5849 5920 */
5850 5921 (void) mptsas_get_raid_info(mpt);
5851 5922
5852 5923 /*
5853 5924 * Get sas device page 0 by DevHandle to make sure if
5854 5925 * SSP/SATA end device exist.
5855 5926 */
5856 5927 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5857 5928 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5858 5929 topo_node->devhdl;
5859 5930
5860 5931 rval = mptsas_get_target_device_info(mpt, page_address,
5861 5932 &devhdl, &ptgt);
5862 5933 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5863 5934 mptsas_log(mpt, CE_NOTE,
5864 5935 "mptsas_handle_topo_change: target %d is "
5865 5936 "not a SAS/SATA device. \n",
5866 5937 topo_node->devhdl);
5867 5938 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5868 5939 mptsas_log(mpt, CE_NOTE,
5869 5940 "mptsas_handle_topo_change: could not "
5870 5941 "allocate memory. \n");
5871 5942 }
5872 5943 /*
5873 5944 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5874 5945 * else to do, just leave.
5875 5946 */
5876 5947 if (rval != DEV_INFO_SUCCESS) {
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
5877 5948 return;
5878 5949 }
5879 5950 }
5880 5951
5881 5952 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5882 5953
5883 5954 mutex_exit(&mpt->m_mutex);
5884 5955 flags = topo_node->flags;
5885 5956
5886 5957 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5887 - phymask = ptgt->m_phymask;
5958 + phymask = ptgt->m_addr.mta_phymask;
5888 5959 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5889 5960 (void) sprintf(phy_mask_name, "%x", phymask);
5890 5961 parent = scsi_hba_iport_find(mpt->m_dip,
5891 5962 phy_mask_name);
5892 5963 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5893 5964 if (parent == NULL) {
5894 5965 mptsas_log(mpt, CE_WARN, "Failed to find a "
5895 5966 "iport for PD, should not happen!");
5896 5967 mutex_enter(&mpt->m_mutex);
5897 5968 break;
5898 5969 }
5899 5970 }
5900 5971
5901 5972 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5902 5973 ndi_devi_enter(parent, &circ1);
5903 5974 (void) mptsas_config_raid(parent, topo_node->devhdl,
5904 5975 &lundip);
5905 5976 ndi_devi_exit(parent, circ1);
5906 5977 } else {
5907 5978 /*
5908 5979 * hold nexus for bus configure
5909 5980 */
5910 5981 ndi_devi_enter(scsi_vhci_dip, &circ);
5911 5982 ndi_devi_enter(parent, &circ1);
5912 5983 rval = mptsas_config_target(parent, ptgt);
5913 5984 /*
5914 5985 * release nexus for bus configure
5915 5986 */
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
5916 5987 ndi_devi_exit(parent, circ1);
5917 5988 ndi_devi_exit(scsi_vhci_dip, circ);
5918 5989
5919 5990 /*
5920 5991 * Add parent's props for SMHBA support
5921 5992 */
5922 5993 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5923 5994 bzero(attached_wwnstr,
5924 5995 sizeof (attached_wwnstr));
5925 5996 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
5926 - ptgt->m_sas_wwn);
5997 + ptgt->m_addr.mta_wwn);
5927 5998 if (ddi_prop_update_string(DDI_DEV_T_NONE,
5928 5999 parent,
5929 6000 SCSI_ADDR_PROP_ATTACHED_PORT,
5930 6001 attached_wwnstr)
5931 6002 != DDI_PROP_SUCCESS) {
5932 6003 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5933 6004 parent,
5934 6005 SCSI_ADDR_PROP_ATTACHED_PORT);
5935 6006 mptsas_log(mpt, CE_WARN, "Failed to"
5936 6007 "attached-port props");
5937 6008 return;
5938 6009 }
5939 6010 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5940 6011 MPTSAS_NUM_PHYS, 1) !=
5941 6012 DDI_PROP_SUCCESS) {
5942 6013 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5943 6014 parent, MPTSAS_NUM_PHYS);
5944 6015 mptsas_log(mpt, CE_WARN, "Failed to"
5945 6016 " create num-phys props");
5946 6017 return;
5947 6018 }
5948 6019
5949 6020 /*
5950 6021 * Update PHY info for smhba
5951 6022 */
5952 6023 mutex_enter(&mpt->m_mutex);
5953 6024 if (mptsas_smhba_phy_init(mpt)) {
5954 6025 mutex_exit(&mpt->m_mutex);
5955 6026 mptsas_log(mpt, CE_WARN, "mptsas phy"
5956 6027 " update failed");
5957 6028 return;
5958 6029 }
5959 6030 mutex_exit(&mpt->m_mutex);
5960 6031
5961 6032 /*
5962 6033 * topo_node->un.physport is really the PHY#
5963 6034 * for direct attached devices
5964 6035 */
5965 6036 mptsas_smhba_set_one_phy_props(mpt, parent,
5966 6037 topo_node->un.physport, &attached_devhdl);
5967 6038
5968 6039 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5969 6040 MPTSAS_VIRTUAL_PORT, 0) !=
5970 6041 DDI_PROP_SUCCESS) {
5971 6042 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5972 6043 parent, MPTSAS_VIRTUAL_PORT);
5973 6044 mptsas_log(mpt, CE_WARN,
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
5974 6045 "mptsas virtual-port"
5975 6046 "port prop update failed");
5976 6047 return;
5977 6048 }
5978 6049 }
5979 6050 }
5980 6051 mutex_enter(&mpt->m_mutex);
5981 6052
5982 6053 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5983 6054 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5984 - ptgt->m_phymask));
6055 + ptgt->m_addr.mta_phymask));
5985 6056 break;
5986 6057 }
5987 6058 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5988 6059 {
5989 - mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5990 6060 devhdl = topo_node->devhdl;
5991 - ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
6061 + ptgt = refhash_linear_search(mpt->m_targets,
6062 + mptsas_target_eval_devhdl, &devhdl);
5992 6063 if (ptgt == NULL)
5993 6064 break;
5994 6065
5995 - sas_wwn = ptgt->m_sas_wwn;
6066 + sas_wwn = ptgt->m_addr.mta_wwn;
5996 6067 phy = ptgt->m_phynum;
5997 6068
5998 6069 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5999 6070
6000 6071 if (sas_wwn) {
6001 6072 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6002 6073 } else {
6003 6074 (void) sprintf(addr, "p%x", phy);
6004 6075 }
6005 6076 ASSERT(ptgt->m_devhdl == devhdl);
6006 6077
6007 6078 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6008 6079 (topo_node->flags ==
6009 6080 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6010 6081 /*
6011 6082 * Get latest RAID info if RAID volume status changes
6012 6083 * or Phys Disk status changes
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
6013 6084 */
6014 6085 (void) mptsas_get_raid_info(mpt);
6015 6086 }
6016 6087 /*
6017 6088 * Abort all outstanding command on the device
6018 6089 */
6019 6090 rval = mptsas_do_scsi_reset(mpt, devhdl);
6020 6091 if (rval) {
6021 6092 NDBG20(("mptsas%d handle_topo_change to reset target "
6022 6093 "before offline devhdl:%x, phymask:%x, rval:%x",
6023 - mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6024 - rval));
6094 + mpt->m_instance, ptgt->m_devhdl,
6095 + ptgt->m_addr.mta_phymask, rval));
6025 6096 }
6026 6097
6027 6098 mutex_exit(&mpt->m_mutex);
6028 6099
6029 6100 ndi_devi_enter(scsi_vhci_dip, &circ);
6030 6101 ndi_devi_enter(parent, &circ1);
6031 6102 rval = mptsas_offline_target(parent, addr);
6032 6103 ndi_devi_exit(parent, circ1);
6033 6104 ndi_devi_exit(scsi_vhci_dip, circ);
6034 6105 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6035 6106 "phymask:%x, rval:%x", mpt->m_instance,
6036 - ptgt->m_devhdl, ptgt->m_phymask, rval));
6107 + ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6037 6108
6038 6109 kmem_free(addr, SCSI_MAXNAMELEN);
6039 6110
6040 6111 /*
6041 6112 * Clear parent's props for SMHBA support
6042 6113 */
6043 6114 flags = topo_node->flags;
6044 6115 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6045 6116 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6046 6117 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6047 6118 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6048 6119 DDI_PROP_SUCCESS) {
6049 6120 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6050 6121 SCSI_ADDR_PROP_ATTACHED_PORT);
6051 6122 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6052 6123 "prop update failed");
6053 6124 break;
6054 6125 }
6055 6126 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6056 6127 MPTSAS_NUM_PHYS, 0) !=
6057 6128 DDI_PROP_SUCCESS) {
6058 6129 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6059 6130 MPTSAS_NUM_PHYS);
6060 6131 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6061 6132 "prop update failed");
6062 6133 break;
6063 6134 }
6064 6135 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6065 6136 MPTSAS_VIRTUAL_PORT, 1) !=
6066 6137 DDI_PROP_SUCCESS) {
6067 6138 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6068 6139 MPTSAS_VIRTUAL_PORT);
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
6069 6140 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6070 6141 "prop update failed");
6071 6142 break;
6072 6143 }
6073 6144 }
6074 6145
6075 6146 mutex_enter(&mpt->m_mutex);
6076 6147 ptgt->m_led_status = 0;
6077 6148 (void) mptsas_flush_led_status(mpt, ptgt);
6078 6149 if (rval == DDI_SUCCESS) {
6079 - mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6080 - ptgt->m_sas_wwn, ptgt->m_phymask);
6150 + refhash_remove(mpt->m_targets, ptgt);
6081 6151 ptgt = NULL;
6082 6152 } else {
6083 6153 /*
6084 6154 * clean DR_INTRANSITION flag to allow I/O down to
6085 6155 * PHCI driver since failover finished.
6086 6156 * Invalidate the devhdl
6087 6157 */
6088 6158 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6089 6159 ptgt->m_tgt_unconfigured = 0;
6090 6160 mutex_enter(&mpt->m_tx_waitq_mutex);
6091 6161 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6092 6162 mutex_exit(&mpt->m_tx_waitq_mutex);
6093 6163 }
6094 6164
6095 6165 /*
6096 6166 * Send SAS IO Unit Control to free the dev handle
6097 6167 */
6098 6168 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6099 6169 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6100 6170 rval = mptsas_free_devhdl(mpt, devhdl);
6101 6171
6102 6172 NDBG20(("mptsas%d handle_topo_change to remove "
6103 6173 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6104 6174 rval));
6105 6175 }
6106 6176
6107 6177 break;
6108 6178 }
6109 6179 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6110 6180 {
6111 6181 devhdl = topo_node->devhdl;
6112 6182 /*
6113 6183 * If this is the remove handle event, do a reset first.
6114 6184 */
6115 6185 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6116 6186 rval = mptsas_do_scsi_reset(mpt, devhdl);
6117 6187 if (rval) {
6118 6188 NDBG20(("mpt%d reset target before remove "
6119 6189 "devhdl:%x, rval:%x", mpt->m_instance,
6120 6190 devhdl, rval));
6121 6191 }
6122 6192 }
6123 6193
6124 6194 /*
6125 6195 * Send SAS IO Unit Control to free the dev handle
6126 6196 */
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
6127 6197 rval = mptsas_free_devhdl(mpt, devhdl);
6128 6198 NDBG20(("mptsas%d handle_topo_change to remove "
6129 6199 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6130 6200 rval));
6131 6201 break;
6132 6202 }
6133 6203 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6134 6204 {
6135 6205 mptsas_smp_t smp;
6136 6206 dev_info_t *smpdip;
6137 - mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6138 6207
6139 6208 devhdl = topo_node->devhdl;
6140 6209
6141 6210 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6142 6211 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6143 6212 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6144 6213 if (rval != DDI_SUCCESS) {
6145 6214 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6146 6215 "handle %x", devhdl);
6147 6216 return;
6148 6217 }
6149 6218
6150 - psmp = mptsas_smp_alloc(smptbl, &smp);
6219 + psmp = mptsas_smp_alloc(mpt, &smp);
6151 6220 if (psmp == NULL) {
6152 6221 return;
6153 6222 }
6154 6223
6155 6224 mutex_exit(&mpt->m_mutex);
6156 6225 ndi_devi_enter(parent, &circ1);
6157 6226 (void) mptsas_online_smp(parent, psmp, &smpdip);
6158 6227 ndi_devi_exit(parent, circ1);
6159 6228
6160 6229 mutex_enter(&mpt->m_mutex);
6161 6230 break;
6162 6231 }
6163 6232 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6164 6233 {
6165 - mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6166 6234 devhdl = topo_node->devhdl;
6167 6235 uint32_t dev_info;
6168 6236
6169 - psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6237 + psmp = refhash_linear_search(mpt->m_smp_targets,
6238 + mptsas_smp_eval_devhdl, &devhdl);
6170 6239 if (psmp == NULL)
6171 6240 break;
6172 6241 /*
6173 6242 * The mptsas_smp_t data is released only if the dip is offlined
6174 6243 * successfully.
6175 6244 */
6176 6245 mutex_exit(&mpt->m_mutex);
6177 6246
6178 6247 ndi_devi_enter(parent, &circ1);
6179 6248 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6180 6249 ndi_devi_exit(parent, circ1);
6181 6250
6182 6251 dev_info = psmp->m_deviceinfo;
6183 6252 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6184 6253 DEVINFO_DIRECT_ATTACHED) {
6185 6254 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6186 6255 MPTSAS_VIRTUAL_PORT, 1) !=
6187 6256 DDI_PROP_SUCCESS) {
6188 6257 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6189 6258 MPTSAS_VIRTUAL_PORT);
6190 6259 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6191 6260 "prop update failed");
6192 6261 return;
6193 6262 }
6194 6263 /*
6195 6264 * Check whether the smp connected to the iport,
6196 6265 */
6197 6266 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6198 6267 MPTSAS_NUM_PHYS, 0) !=
6199 6268 DDI_PROP_SUCCESS) {
6200 6269 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6201 6270 MPTSAS_NUM_PHYS);
6202 6271 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6203 6272 "prop update failed");
6204 6273 return;
6205 6274 }
6206 6275 /*
6207 6276 * Clear parent's attached-port props
6208 6277 */
6209 6278 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6210 6279 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6211 6280 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6212 6281 DDI_PROP_SUCCESS) {
6213 6282 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6214 6283 SCSI_ADDR_PROP_ATTACHED_PORT);
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
6215 6284 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6216 6285 "prop update failed");
6217 6286 return;
6218 6287 }
6219 6288 }
6220 6289
6221 6290 mutex_enter(&mpt->m_mutex);
6222 6291 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6223 6292 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6224 6293 if (rval == DDI_SUCCESS) {
6225 - mptsas_smp_free(smptbl, psmp->m_sasaddr,
6226 - psmp->m_phymask);
6294 + refhash_remove(mpt->m_smp_targets, psmp);
6227 6295 } else {
6228 6296 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6229 6297 }
6230 6298
6231 6299 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6232 6300
6233 6301 break;
6234 6302 }
6235 6303 default:
6236 6304 return;
6237 6305 }
6238 6306 }
6239 6307
6240 6308 /*
6241 6309 * Record the event if its type is enabled in mpt instance by ioctl.
6242 6310 */
6243 6311 static void
6244 6312 mptsas_record_event(void *args)
6245 6313 {
6246 6314 m_replyh_arg_t *replyh_arg;
6247 6315 pMpi2EventNotificationReply_t eventreply;
6248 6316 uint32_t event, rfm;
6249 6317 mptsas_t *mpt;
6250 6318 int i, j;
6251 6319 uint16_t event_data_len;
6252 6320 boolean_t sendAEN = FALSE;
6253 6321
6254 6322 replyh_arg = (m_replyh_arg_t *)args;
6255 6323 rfm = replyh_arg->rfm;
6256 6324 mpt = replyh_arg->mpt;
6257 6325
6258 6326 eventreply = (pMpi2EventNotificationReply_t)
6259 6327 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6260 6328 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6261 6329
6262 6330
6263 6331 /*
6264 6332 * Generate a system event to let anyone who cares know that a
6265 6333 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6266 6334 * event mask is set to.
6267 6335 */
6268 6336 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6269 6337 sendAEN = TRUE;
6270 6338 }
6271 6339
6272 6340 /*
6273 6341 * Record the event only if it is not masked. Determine which dword
6274 6342 * and bit of event mask to test.
6275 6343 */
6276 6344 i = (uint8_t)(event / 32);
6277 6345 j = (uint8_t)(event % 32);
6278 6346 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6279 6347 i = mpt->m_event_index;
6280 6348 mpt->m_events[i].Type = event;
6281 6349 mpt->m_events[i].Number = ++mpt->m_event_number;
6282 6350 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6283 6351 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6284 6352 &eventreply->EventDataLength);
6285 6353
6286 6354 if (event_data_len > 0) {
6287 6355 /*
6288 6356 * Limit data to size in m_event entry
6289 6357 */
6290 6358 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6291 6359 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6292 6360 }
6293 6361 for (j = 0; j < event_data_len; j++) {
6294 6362 mpt->m_events[i].Data[j] =
6295 6363 ddi_get32(mpt->m_acc_reply_frame_hdl,
6296 6364 &(eventreply->EventData[j]));
6297 6365 }
6298 6366
6299 6367 /*
6300 6368 * check for index wrap-around
6301 6369 */
6302 6370 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6303 6371 i = 0;
6304 6372 }
6305 6373 mpt->m_event_index = (uint8_t)i;
6306 6374
6307 6375 /*
6308 6376 * Set flag to send the event.
6309 6377 */
6310 6378 sendAEN = TRUE;
6311 6379 }
6312 6380 }
6313 6381
6314 6382 /*
6315 6383 * Generate a system event if flag is set to let anyone who cares know
6316 6384 * that an event has occurred.
6317 6385 */
6318 6386 if (sendAEN) {
6319 6387 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6320 6388 "SAS", NULL, NULL, DDI_NOSLEEP);
6321 6389 }
6322 6390 }
6323 6391
6324 6392 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6325 6393 /*
6326 6394 * handle sync events from ioc in interrupt
6327 6395 * return value:
6328 6396 * DDI_SUCCESS: The event is handled by this func
6329 6397 * DDI_FAILURE: Event is not handled
6330 6398 */
6331 6399 static int
6332 6400 mptsas_handle_event_sync(void *args)
6333 6401 {
6334 6402 m_replyh_arg_t *replyh_arg;
6335 6403 pMpi2EventNotificationReply_t eventreply;
6336 6404 uint32_t event, rfm;
6337 6405 mptsas_t *mpt;
6338 6406 uint_t iocstatus;
6339 6407
6340 6408 replyh_arg = (m_replyh_arg_t *)args;
6341 6409 rfm = replyh_arg->rfm;
6342 6410 mpt = replyh_arg->mpt;
6343 6411
6344 6412 ASSERT(mutex_owned(&mpt->m_mutex));
6345 6413
6346 6414 eventreply = (pMpi2EventNotificationReply_t)
6347 6415 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6348 6416 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6349 6417
6350 6418 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6351 6419 &eventreply->IOCStatus)) {
6352 6420 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6353 6421 mptsas_log(mpt, CE_WARN,
6354 6422 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6355 6423 "IOCLogInfo=0x%x", iocstatus,
6356 6424 ddi_get32(mpt->m_acc_reply_frame_hdl,
6357 6425 &eventreply->IOCLogInfo));
6358 6426 } else {
6359 6427 mptsas_log(mpt, CE_WARN,
6360 6428 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6361 6429 "IOCLogInfo=0x%x", iocstatus,
6362 6430 ddi_get32(mpt->m_acc_reply_frame_hdl,
6363 6431 &eventreply->IOCLogInfo));
6364 6432 }
6365 6433 }
6366 6434
6367 6435 /*
6368 6436 * figure out what kind of event we got and handle accordingly
6369 6437 */
6370 6438 switch (event) {
6371 6439 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6372 6440 {
6373 6441 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6374 6442 uint8_t num_entries, expstatus, phy;
|
↓ open down ↓ |
138 lines elided |
↑ open up ↑ |
6375 6443 uint8_t phystatus, physport, state, i;
6376 6444 uint8_t start_phy_num, link_rate;
6377 6445 uint16_t dev_handle, reason_code;
6378 6446 uint16_t enc_handle, expd_handle;
6379 6447 char string[80], curr[80], prev[80];
6380 6448 mptsas_topo_change_list_t *topo_head = NULL;
6381 6449 mptsas_topo_change_list_t *topo_tail = NULL;
6382 6450 mptsas_topo_change_list_t *topo_node = NULL;
6383 6451 mptsas_target_t *ptgt;
6384 6452 mptsas_smp_t *psmp;
6385 - mptsas_hash_table_t *tgttbl, *smptbl;
6386 6453 uint8_t flags = 0, exp_flag;
6387 6454 smhba_info_t *pSmhba = NULL;
6388 6455
6389 6456 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6390 6457
6391 - tgttbl = &mpt->m_active->m_tgttbl;
6392 - smptbl = &mpt->m_active->m_smptbl;
6393 -
6394 6458 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6395 6459 eventreply->EventData;
6396 6460
6397 6461 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6398 6462 &sas_topo_change_list->EnclosureHandle);
6399 6463 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6400 6464 &sas_topo_change_list->ExpanderDevHandle);
6401 6465 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6402 6466 &sas_topo_change_list->NumEntries);
6403 6467 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6404 6468 &sas_topo_change_list->StartPhyNum);
6405 6469 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6406 6470 &sas_topo_change_list->ExpStatus);
6407 6471 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6408 6472 &sas_topo_change_list->PhysicalPort);
6409 6473
6410 6474 string[0] = 0;
6411 6475 if (expd_handle) {
6412 6476 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6413 6477 switch (expstatus) {
6414 6478 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6415 6479 (void) sprintf(string, " added");
6416 6480 /*
6417 6481 * New expander device added
6418 6482 */
6419 6483 mpt->m_port_chng = 1;
6420 6484 topo_node = kmem_zalloc(
6421 6485 sizeof (mptsas_topo_change_list_t),
6422 6486 KM_SLEEP);
6423 6487 topo_node->mpt = mpt;
6424 6488 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6425 6489 topo_node->un.physport = physport;
6426 6490 topo_node->devhdl = expd_handle;
6427 6491 topo_node->flags = flags;
6428 6492 topo_node->object = NULL;
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
6429 6493 if (topo_head == NULL) {
6430 6494 topo_head = topo_tail = topo_node;
6431 6495 } else {
6432 6496 topo_tail->next = topo_node;
6433 6497 topo_tail = topo_node;
6434 6498 }
6435 6499 break;
6436 6500 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6437 6501 (void) sprintf(string, " not responding, "
6438 6502 "removed");
6439 - psmp = mptsas_search_by_devhdl(smptbl,
6440 - expd_handle);
6503 + psmp = refhash_linear_search(mpt->m_smp_targets,
6504 + mptsas_smp_eval_devhdl, &expd_handle);
6441 6505 if (psmp == NULL)
6442 6506 break;
6443 6507
6444 6508 topo_node = kmem_zalloc(
6445 6509 sizeof (mptsas_topo_change_list_t),
6446 6510 KM_SLEEP);
6447 6511 topo_node->mpt = mpt;
6448 - topo_node->un.phymask = psmp->m_phymask;
6512 + topo_node->un.phymask =
6513 + psmp->m_addr.mta_phymask;
6449 6514 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6450 6515 topo_node->devhdl = expd_handle;
6451 6516 topo_node->flags = flags;
6452 6517 topo_node->object = NULL;
6453 6518 if (topo_head == NULL) {
6454 6519 topo_head = topo_tail = topo_node;
6455 6520 } else {
6456 6521 topo_tail->next = topo_node;
6457 6522 topo_tail = topo_node;
6458 6523 }
6459 6524 break;
6460 6525 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6461 6526 break;
6462 6527 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6463 6528 (void) sprintf(string, " not responding, "
6464 6529 "delaying removal");
6465 6530 break;
6466 6531 default:
6467 6532 break;
6468 6533 }
6469 6534 } else {
6470 6535 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6471 6536 }
6472 6537
6473 6538 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6474 6539 enc_handle, expd_handle, string));
6475 6540 for (i = 0; i < num_entries; i++) {
6476 6541 phy = i + start_phy_num;
6477 6542 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6478 6543 &sas_topo_change_list->PHY[i].PhyStatus);
6479 6544 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6480 6545 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6481 6546 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6482 6547 /*
6483 6548 * Filter out processing of Phy Vacant Status unless
6484 6549 * the reason code is "Not Responding". Process all
6485 6550 * other combinations of Phy Status and Reason Codes.
6486 6551 */
6487 6552 if ((phystatus &
6488 6553 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6489 6554 (reason_code !=
6490 6555 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6491 6556 continue;
6492 6557 }
6493 6558 curr[0] = 0;
6494 6559 prev[0] = 0;
6495 6560 string[0] = 0;
6496 6561 switch (reason_code) {
6497 6562 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6498 6563 {
6499 6564 NDBG20(("mptsas%d phy %d physical_port %d "
6500 6565 "dev_handle %d added", mpt->m_instance, phy,
6501 6566 physport, dev_handle));
6502 6567 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6503 6568 &sas_topo_change_list->PHY[i].LinkRate);
6504 6569 state = (link_rate &
6505 6570 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6506 6571 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6507 6572 switch (state) {
6508 6573 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6509 6574 (void) sprintf(curr, "is disabled");
6510 6575 break;
6511 6576 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6512 6577 (void) sprintf(curr, "is offline, "
6513 6578 "failed speed negotiation");
6514 6579 break;
6515 6580 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6516 6581 (void) sprintf(curr, "SATA OOB "
6517 6582 "complete");
6518 6583 break;
6519 6584 case SMP_RESET_IN_PROGRESS:
6520 6585 (void) sprintf(curr, "SMP reset in "
6521 6586 "progress");
6522 6587 break;
6523 6588 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6524 6589 (void) sprintf(curr, "is online at "
6525 6590 "1.5 Gbps");
6526 6591 break;
6527 6592 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6528 6593 (void) sprintf(curr, "is online at 3.0 "
6529 6594 "Gbps");
6530 6595 break;
6531 6596 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6532 6597 (void) sprintf(curr, "is online at 6.0 "
6533 6598 "Gbps");
6534 6599 break;
6535 6600 default:
6536 6601 (void) sprintf(curr, "state is "
6537 6602 "unknown");
6538 6603 break;
6539 6604 }
6540 6605 /*
6541 6606 * New target device added into the system.
6542 6607 * Set association flag according to if an
6543 6608 * expander is used or not.
6544 6609 */
6545 6610 exp_flag =
6546 6611 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6547 6612 if (flags ==
6548 6613 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6549 6614 flags = exp_flag;
6550 6615 }
6551 6616 topo_node = kmem_zalloc(
6552 6617 sizeof (mptsas_topo_change_list_t),
6553 6618 KM_SLEEP);
6554 6619 topo_node->mpt = mpt;
6555 6620 topo_node->event =
6556 6621 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6557 6622 if (expd_handle == 0) {
6558 6623 /*
6559 6624 * Per MPI 2, if expander dev handle
6560 6625 * is 0, it's a directly attached
6561 6626 * device. So driver use PHY to decide
6562 6627 * which iport is associated
6563 6628 */
6564 6629 physport = phy;
6565 6630 mpt->m_port_chng = 1;
6566 6631 }
6567 6632 topo_node->un.physport = physport;
6568 6633 topo_node->devhdl = dev_handle;
6569 6634 topo_node->flags = flags;
6570 6635 topo_node->object = NULL;
6571 6636 if (topo_head == NULL) {
6572 6637 topo_head = topo_tail = topo_node;
6573 6638 } else {
6574 6639 topo_tail->next = topo_node;
6575 6640 topo_tail = topo_node;
6576 6641 }
6577 6642 break;
6578 6643 }
6579 6644 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6580 6645 {
6581 6646 NDBG20(("mptsas%d phy %d physical_port %d "
6582 6647 "dev_handle %d removed", mpt->m_instance,
6583 6648 phy, physport, dev_handle));
6584 6649 /*
6585 6650 * Set association flag according to if an
6586 6651 * expander is used or not.
6587 6652 */
6588 6653 exp_flag =
|
↓ open down ↓ |
130 lines elided |
↑ open up ↑ |
6589 6654 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6590 6655 if (flags ==
6591 6656 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6592 6657 flags = exp_flag;
6593 6658 }
6594 6659 /*
6595 6660 * Target device is removed from the system
6596 6661 * Before the device is really offline from
6597 6662 * from system.
6598 6663 */
6599 - ptgt = mptsas_search_by_devhdl(tgttbl,
6600 - dev_handle);
6664 + ptgt = refhash_linear_search(mpt->m_targets,
6665 + mptsas_target_eval_devhdl, &dev_handle);
6601 6666 /*
6602 6667 * If ptgt is NULL here, it means that the
6603 6668 * DevHandle is not in the hash table. This is
6604 6669 * reasonable sometimes. For example, if a
6605 6670 * disk was pulled, then added, then pulled
6606 6671 * again, the disk will not have been put into
6607 6672 * the hash table because the add event will
6608 6673 * have an invalid phymask. BUT, this does not
6609 6674 * mean that the DevHandle is invalid. The
6610 6675 * controller will still have a valid DevHandle
6611 6676 * that must be removed. To do this, use the
6612 6677 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6613 6678 */
6614 6679 if (ptgt == NULL) {
6615 6680 topo_node = kmem_zalloc(
6616 6681 sizeof (mptsas_topo_change_list_t),
6617 6682 KM_SLEEP);
6618 6683 topo_node->mpt = mpt;
6619 6684 topo_node->un.phymask = 0;
6620 6685 topo_node->event =
6621 6686 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6622 6687 topo_node->devhdl = dev_handle;
6623 6688 topo_node->flags = flags;
6624 6689 topo_node->object = NULL;
6625 6690 if (topo_head == NULL) {
6626 6691 topo_head = topo_tail =
6627 6692 topo_node;
6628 6693 } else {
6629 6694 topo_tail->next = topo_node;
6630 6695 topo_tail = topo_node;
6631 6696 }
6632 6697 break;
6633 6698 }
6634 6699
6635 6700 /*
6636 6701 * Update DR flag immediately avoid I/O failure
6637 6702 * before failover finish. Pay attention to the
6638 6703 * mutex protect, we need grab m_tx_waitq_mutex
6639 6704 * during set m_dr_flag because we won't add
6640 6705 * the following command into waitq, instead,
6641 6706 * we need return TRAN_BUSY in the tran_start
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
6642 6707 * context.
6643 6708 */
6644 6709 mutex_enter(&mpt->m_tx_waitq_mutex);
6645 6710 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6646 6711 mutex_exit(&mpt->m_tx_waitq_mutex);
6647 6712
6648 6713 topo_node = kmem_zalloc(
6649 6714 sizeof (mptsas_topo_change_list_t),
6650 6715 KM_SLEEP);
6651 6716 topo_node->mpt = mpt;
6652 - topo_node->un.phymask = ptgt->m_phymask;
6717 + topo_node->un.phymask =
6718 + ptgt->m_addr.mta_phymask;
6653 6719 topo_node->event =
6654 6720 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6655 6721 topo_node->devhdl = dev_handle;
6656 6722 topo_node->flags = flags;
6657 6723 topo_node->object = NULL;
6658 6724 if (topo_head == NULL) {
6659 6725 topo_head = topo_tail = topo_node;
6660 6726 } else {
6661 6727 topo_tail->next = topo_node;
6662 6728 topo_tail = topo_node;
6663 6729 }
6664 6730 break;
6665 6731 }
6666 6732 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6667 6733 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6668 6734 &sas_topo_change_list->PHY[i].LinkRate);
6669 6735 state = (link_rate &
6670 6736 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6671 6737 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6672 6738 pSmhba = &mpt->m_phy_info[i].smhba_info;
6673 6739 pSmhba->negotiated_link_rate = state;
6674 6740 switch (state) {
6675 6741 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6676 6742 (void) sprintf(curr, "is disabled");
6677 6743 mptsas_smhba_log_sysevent(mpt,
6678 6744 ESC_SAS_PHY_EVENT,
6679 6745 SAS_PHY_REMOVE,
6680 6746 &mpt->m_phy_info[i].smhba_info);
6681 6747 mpt->m_phy_info[i].smhba_info.
6682 6748 negotiated_link_rate
6683 6749 = 0x1;
6684 6750 break;
6685 6751 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6686 6752 (void) sprintf(curr, "is offline, "
6687 6753 "failed speed negotiation");
6688 6754 mptsas_smhba_log_sysevent(mpt,
6689 6755 ESC_SAS_PHY_EVENT,
6690 6756 SAS_PHY_OFFLINE,
6691 6757 &mpt->m_phy_info[i].smhba_info);
6692 6758 break;
6693 6759 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6694 6760 (void) sprintf(curr, "SATA OOB "
6695 6761 "complete");
6696 6762 break;
6697 6763 case SMP_RESET_IN_PROGRESS:
6698 6764 (void) sprintf(curr, "SMP reset in "
6699 6765 "progress");
6700 6766 break;
6701 6767 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6702 6768 (void) sprintf(curr, "is online at "
6703 6769 "1.5 Gbps");
6704 6770 if ((expd_handle == 0) &&
6705 6771 (enc_handle == 1)) {
6706 6772 mpt->m_port_chng = 1;
6707 6773 }
6708 6774 mptsas_smhba_log_sysevent(mpt,
6709 6775 ESC_SAS_PHY_EVENT,
6710 6776 SAS_PHY_ONLINE,
6711 6777 &mpt->m_phy_info[i].smhba_info);
6712 6778 break;
6713 6779 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6714 6780 (void) sprintf(curr, "is online at 3.0 "
6715 6781 "Gbps");
6716 6782 if ((expd_handle == 0) &&
6717 6783 (enc_handle == 1)) {
6718 6784 mpt->m_port_chng = 1;
6719 6785 }
6720 6786 mptsas_smhba_log_sysevent(mpt,
6721 6787 ESC_SAS_PHY_EVENT,
6722 6788 SAS_PHY_ONLINE,
6723 6789 &mpt->m_phy_info[i].smhba_info);
6724 6790 break;
6725 6791 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6726 6792 (void) sprintf(curr, "is online at "
6727 6793 "6.0 Gbps");
6728 6794 if ((expd_handle == 0) &&
6729 6795 (enc_handle == 1)) {
6730 6796 mpt->m_port_chng = 1;
6731 6797 }
6732 6798 mptsas_smhba_log_sysevent(mpt,
6733 6799 ESC_SAS_PHY_EVENT,
6734 6800 SAS_PHY_ONLINE,
6735 6801 &mpt->m_phy_info[i].smhba_info);
6736 6802 break;
6737 6803 default:
6738 6804 (void) sprintf(curr, "state is "
6739 6805 "unknown");
6740 6806 break;
6741 6807 }
6742 6808
6743 6809 state = (link_rate &
6744 6810 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6745 6811 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6746 6812 switch (state) {
6747 6813 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6748 6814 (void) sprintf(prev, ", was disabled");
6749 6815 break;
6750 6816 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6751 6817 (void) sprintf(prev, ", was offline, "
6752 6818 "failed speed negotiation");
6753 6819 break;
6754 6820 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6755 6821 (void) sprintf(prev, ", was SATA OOB "
6756 6822 "complete");
6757 6823 break;
6758 6824 case SMP_RESET_IN_PROGRESS:
6759 6825 (void) sprintf(prev, ", was SMP reset "
6760 6826 "in progress");
6761 6827 break;
6762 6828 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6763 6829 (void) sprintf(prev, ", was online at "
6764 6830 "1.5 Gbps");
6765 6831 break;
6766 6832 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6767 6833 (void) sprintf(prev, ", was online at "
6768 6834 "3.0 Gbps");
6769 6835 break;
6770 6836 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6771 6837 (void) sprintf(prev, ", was online at "
6772 6838 "6.0 Gbps");
6773 6839 break;
6774 6840 default:
6775 6841 break;
6776 6842 }
6777 6843 (void) sprintf(&string[strlen(string)], "link "
6778 6844 "changed, ");
6779 6845 break;
6780 6846 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6781 6847 continue;
6782 6848 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6783 6849 (void) sprintf(&string[strlen(string)],
6784 6850 "target not responding, delaying "
6785 6851 "removal");
6786 6852 break;
6787 6853 }
6788 6854 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6789 6855 mpt->m_instance, phy, dev_handle, string, curr,
6790 6856 prev));
6791 6857 }
6792 6858 if (topo_head != NULL) {
6793 6859 /*
6794 6860 * Launch DR taskq to handle topology change
6795 6861 */
6796 6862 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6797 6863 mptsas_handle_dr, (void *)topo_head,
6798 6864 DDI_NOSLEEP)) != DDI_SUCCESS) {
6799 6865 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6800 6866 "for handle SAS DR event failed. \n");
6801 6867 }
|
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
6802 6868 }
6803 6869 break;
6804 6870 }
6805 6871 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6806 6872 {
6807 6873 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6808 6874 mptsas_topo_change_list_t *topo_head = NULL;
6809 6875 mptsas_topo_change_list_t *topo_tail = NULL;
6810 6876 mptsas_topo_change_list_t *topo_node = NULL;
6811 6877 mptsas_target_t *ptgt;
6812 - mptsas_hash_table_t *tgttbl;
6813 6878 uint8_t num_entries, i, reason;
6814 6879 uint16_t volhandle, diskhandle;
6815 6880
6816 6881 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6817 6882 eventreply->EventData;
6818 6883 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6819 6884 &irChangeList->NumElements);
6820 6885
6821 - tgttbl = &mpt->m_active->m_tgttbl;
6822 -
6823 6886 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6824 6887 mpt->m_instance));
6825 6888
6826 6889 for (i = 0; i < num_entries; i++) {
6827 6890 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6828 6891 &irChangeList->ConfigElement[i].ReasonCode);
6829 6892 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6830 6893 &irChangeList->ConfigElement[i].VolDevHandle);
6831 6894 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6832 6895 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6833 6896
6834 6897 switch (reason) {
6835 6898 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6836 6899 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6837 6900 {
6838 6901 NDBG20(("mptsas %d volume added\n",
6839 6902 mpt->m_instance));
6840 6903
6841 6904 topo_node = kmem_zalloc(
6842 6905 sizeof (mptsas_topo_change_list_t),
6843 6906 KM_SLEEP);
6844 6907
6845 6908 topo_node->mpt = mpt;
6846 6909 topo_node->event =
6847 6910 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6848 6911 topo_node->un.physport = 0xff;
6849 6912 topo_node->devhdl = volhandle;
6850 6913 topo_node->flags =
6851 6914 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6852 6915 topo_node->object = NULL;
6853 6916 if (topo_head == NULL) {
6854 6917 topo_head = topo_tail = topo_node;
6855 6918 } else {
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
6856 6919 topo_tail->next = topo_node;
6857 6920 topo_tail = topo_node;
6858 6921 }
6859 6922 break;
6860 6923 }
6861 6924 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6862 6925 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6863 6926 {
6864 6927 NDBG20(("mptsas %d volume deleted\n",
6865 6928 mpt->m_instance));
6866 - ptgt = mptsas_search_by_devhdl(tgttbl,
6867 - volhandle);
6929 + ptgt = refhash_linear_search(mpt->m_targets,
6930 + mptsas_target_eval_devhdl, &volhandle);
6868 6931 if (ptgt == NULL)
6869 6932 break;
6870 6933
6871 6934 /*
6872 6935 * Clear any flags related to volume
6873 6936 */
6874 6937 (void) mptsas_delete_volume(mpt, volhandle);
6875 6938
6876 6939 /*
6877 6940 * Update DR flag immediately avoid I/O failure
6878 6941 */
6879 6942 mutex_enter(&mpt->m_tx_waitq_mutex);
6880 6943 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6881 6944 mutex_exit(&mpt->m_tx_waitq_mutex);
6882 6945
6883 6946 topo_node = kmem_zalloc(
6884 6947 sizeof (mptsas_topo_change_list_t),
6885 6948 KM_SLEEP);
6886 6949 topo_node->mpt = mpt;
6887 - topo_node->un.phymask = ptgt->m_phymask;
6950 + topo_node->un.phymask =
6951 + ptgt->m_addr.mta_phymask;
6888 6952 topo_node->event =
6889 6953 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6890 6954 topo_node->devhdl = volhandle;
6891 6955 topo_node->flags =
6892 6956 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6893 6957 topo_node->object = (void *)ptgt;
6894 6958 if (topo_head == NULL) {
6895 6959 topo_head = topo_tail = topo_node;
6896 6960 } else {
6897 6961 topo_tail->next = topo_node;
6898 6962 topo_tail = topo_node;
6899 6963 }
6900 6964 break;
6901 6965 }
6902 6966 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6903 6967 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6904 6968 {
6905 - ptgt = mptsas_search_by_devhdl(tgttbl,
6906 - diskhandle);
6969 + ptgt = refhash_linear_search(mpt->m_targets,
6970 + mptsas_target_eval_devhdl, &diskhandle);
6907 6971 if (ptgt == NULL)
6908 6972 break;
6909 6973
6910 6974 /*
6911 6975 * Update DR flag immediately avoid I/O failure
6912 6976 */
6913 6977 mutex_enter(&mpt->m_tx_waitq_mutex);
6914 6978 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6915 6979 mutex_exit(&mpt->m_tx_waitq_mutex);
6916 6980
6917 6981 topo_node = kmem_zalloc(
6918 6982 sizeof (mptsas_topo_change_list_t),
6919 6983 KM_SLEEP);
6920 6984 topo_node->mpt = mpt;
6921 - topo_node->un.phymask = ptgt->m_phymask;
6985 + topo_node->un.phymask =
6986 + ptgt->m_addr.mta_phymask;
6922 6987 topo_node->event =
6923 6988 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6924 6989 topo_node->devhdl = diskhandle;
6925 6990 topo_node->flags =
6926 6991 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6927 6992 topo_node->object = (void *)ptgt;
6928 6993 if (topo_head == NULL) {
6929 6994 topo_head = topo_tail = topo_node;
6930 6995 } else {
6931 6996 topo_tail->next = topo_node;
6932 6997 topo_tail = topo_node;
6933 6998 }
6934 6999 break;
6935 7000 }
6936 7001 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6937 7002 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6938 7003 {
6939 7004 /*
6940 7005 * The physical drive is released by a IR
6941 7006 * volume. But we cannot get the the physport
6942 7007 * or phynum from the event data, so we only
6943 7008 * can get the physport/phynum after SAS
6944 7009 * Device Page0 request for the devhdl.
6945 7010 */
6946 7011 topo_node = kmem_zalloc(
6947 7012 sizeof (mptsas_topo_change_list_t),
6948 7013 KM_SLEEP);
6949 7014 topo_node->mpt = mpt;
6950 7015 topo_node->un.phymask = 0;
6951 7016 topo_node->event =
6952 7017 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6953 7018 topo_node->devhdl = diskhandle;
6954 7019 topo_node->flags =
6955 7020 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6956 7021 topo_node->object = NULL;
6957 7022 mpt->m_port_chng = 1;
6958 7023 if (topo_head == NULL) {
6959 7024 topo_head = topo_tail = topo_node;
6960 7025 } else {
6961 7026 topo_tail->next = topo_node;
6962 7027 topo_tail = topo_node;
6963 7028 }
6964 7029 break;
6965 7030 }
6966 7031 default:
6967 7032 break;
6968 7033 }
6969 7034 }
6970 7035
6971 7036 if (topo_head != NULL) {
6972 7037 /*
6973 7038 * Launch DR taskq to handle topology change
6974 7039 */
6975 7040 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6976 7041 mptsas_handle_dr, (void *)topo_head,
6977 7042 DDI_NOSLEEP)) != DDI_SUCCESS) {
6978 7043 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6979 7044 "for handle SAS DR event failed. \n");
6980 7045 }
6981 7046 }
6982 7047 break;
6983 7048 }
6984 7049 default:
6985 7050 return (DDI_FAILURE);
6986 7051 }
6987 7052
6988 7053 return (DDI_SUCCESS);
6989 7054 }
6990 7055
6991 7056 /*
6992 7057 * handle events from ioc
6993 7058 */
6994 7059 static void
6995 7060 mptsas_handle_event(void *args)
6996 7061 {
6997 7062 m_replyh_arg_t *replyh_arg;
6998 7063 pMpi2EventNotificationReply_t eventreply;
6999 7064 uint32_t event, iocloginfo, rfm;
7000 7065 uint32_t status;
7001 7066 uint8_t port;
7002 7067 mptsas_t *mpt;
7003 7068 uint_t iocstatus;
7004 7069
7005 7070 replyh_arg = (m_replyh_arg_t *)args;
7006 7071 rfm = replyh_arg->rfm;
7007 7072 mpt = replyh_arg->mpt;
7008 7073
7009 7074 mutex_enter(&mpt->m_mutex);
7010 7075 /*
7011 7076 * If HBA is being reset, drop incoming event.
7012 7077 */
7013 7078 if (mpt->m_in_reset) {
7014 7079 NDBG20(("dropping event received prior to reset"));
7015 7080 mutex_exit(&mpt->m_mutex);
7016 7081 return;
7017 7082 }
7018 7083
7019 7084 eventreply = (pMpi2EventNotificationReply_t)
7020 7085 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7021 7086 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7022 7087
7023 7088 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7024 7089 &eventreply->IOCStatus)) {
7025 7090 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7026 7091 mptsas_log(mpt, CE_WARN,
7027 7092 "!mptsas_handle_event: IOCStatus=0x%x, "
7028 7093 "IOCLogInfo=0x%x", iocstatus,
7029 7094 ddi_get32(mpt->m_acc_reply_frame_hdl,
7030 7095 &eventreply->IOCLogInfo));
7031 7096 } else {
7032 7097 mptsas_log(mpt, CE_WARN,
7033 7098 "mptsas_handle_event: IOCStatus=0x%x, "
7034 7099 "IOCLogInfo=0x%x", iocstatus,
7035 7100 ddi_get32(mpt->m_acc_reply_frame_hdl,
7036 7101 &eventreply->IOCLogInfo));
7037 7102 }
7038 7103 }
7039 7104
7040 7105 /*
7041 7106 * figure out what kind of event we got and handle accordingly
7042 7107 */
7043 7108 switch (event) {
7044 7109 case MPI2_EVENT_LOG_ENTRY_ADDED:
7045 7110 break;
7046 7111 case MPI2_EVENT_LOG_DATA:
7047 7112 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7048 7113 &eventreply->IOCLogInfo);
7049 7114 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7050 7115 iocloginfo));
7051 7116 break;
7052 7117 case MPI2_EVENT_STATE_CHANGE:
7053 7118 NDBG20(("mptsas%d state change.", mpt->m_instance));
7054 7119 break;
7055 7120 case MPI2_EVENT_HARD_RESET_RECEIVED:
7056 7121 NDBG20(("mptsas%d event change.", mpt->m_instance));
7057 7122 break;
7058 7123 case MPI2_EVENT_SAS_DISCOVERY:
7059 7124 {
7060 7125 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7061 7126 char string[80];
7062 7127 uint8_t rc;
7063 7128
7064 7129 sasdiscovery =
7065 7130 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7066 7131
7067 7132 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7068 7133 &sasdiscovery->ReasonCode);
7069 7134 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7070 7135 &sasdiscovery->PhysicalPort);
7071 7136 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7072 7137 &sasdiscovery->DiscoveryStatus);
7073 7138
7074 7139 string[0] = 0;
7075 7140 switch (rc) {
7076 7141 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7077 7142 (void) sprintf(string, "STARTING");
7078 7143 break;
7079 7144 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7080 7145 (void) sprintf(string, "COMPLETED");
7081 7146 break;
7082 7147 default:
7083 7148 (void) sprintf(string, "UNKNOWN");
7084 7149 break;
7085 7150 }
7086 7151
7087 7152 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7088 7153 port, status));
7089 7154
7090 7155 break;
7091 7156 }
7092 7157 case MPI2_EVENT_EVENT_CHANGE:
7093 7158 NDBG20(("mptsas%d event change.", mpt->m_instance));
7094 7159 break;
7095 7160 case MPI2_EVENT_TASK_SET_FULL:
7096 7161 {
7097 7162 pMpi2EventDataTaskSetFull_t taskfull;
7098 7163
7099 7164 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7100 7165
7101 7166 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7102 7167 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7103 7168 &taskfull->CurrentDepth)));
7104 7169 break;
7105 7170 }
7106 7171 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7107 7172 {
7108 7173 /*
7109 7174 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7110 7175 * in mptsas_handle_event_sync() of interrupt context
7111 7176 */
7112 7177 break;
7113 7178 }
7114 7179 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7115 7180 {
7116 7181 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7117 7182 uint8_t rc;
7118 7183 char string[80];
7119 7184
7120 7185 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7121 7186 eventreply->EventData;
7122 7187
7123 7188 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7124 7189 &encstatus->ReasonCode);
7125 7190 switch (rc) {
7126 7191 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7127 7192 (void) sprintf(string, "added");
7128 7193 break;
7129 7194 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7130 7195 (void) sprintf(string, ", not responding");
7131 7196 break;
7132 7197 default:
7133 7198 break;
7134 7199 }
7135 7200 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7136 7201 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7137 7202 &encstatus->EnclosureHandle), string));
7138 7203 break;
7139 7204 }
7140 7205
7141 7206 /*
7142 7207 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7143 7208 * mptsas_handle_event_sync,in here just send ack message.
7144 7209 */
7145 7210 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7146 7211 {
7147 7212 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7148 7213 uint8_t rc;
7149 7214 uint16_t devhdl;
7150 7215 uint64_t wwn = 0;
7151 7216 uint32_t wwn_lo, wwn_hi;
7152 7217
7153 7218 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7154 7219 eventreply->EventData;
7155 7220 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7156 7221 &statuschange->ReasonCode);
7157 7222 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7158 7223 (uint32_t *)(void *)&statuschange->SASAddress);
7159 7224 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7160 7225 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7161 7226 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7162 7227 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7163 7228 &statuschange->DevHandle);
7164 7229
7165 7230 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7166 7231 wwn));
7167 7232
7168 7233 switch (rc) {
7169 7234 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7170 7235 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7171 7236 ddi_get8(mpt->m_acc_reply_frame_hdl,
7172 7237 &statuschange->ASC),
7173 7238 ddi_get8(mpt->m_acc_reply_frame_hdl,
7174 7239 &statuschange->ASCQ)));
7175 7240 break;
7176 7241
7177 7242 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7178 7243 NDBG20(("Device not supported"));
7179 7244 break;
7180 7245
7181 7246 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7182 7247 NDBG20(("IOC internally generated the Target Reset "
7183 7248 "for devhdl:%x", devhdl));
7184 7249 break;
7185 7250
7186 7251 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7187 7252 NDBG20(("IOC's internally generated Target Reset "
7188 7253 "completed for devhdl:%x", devhdl));
7189 7254 break;
7190 7255
7191 7256 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7192 7257 NDBG20(("IOC internally generated Abort Task"));
7193 7258 break;
7194 7259
7195 7260 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7196 7261 NDBG20(("IOC's internally generated Abort Task "
7197 7262 "completed"));
7198 7263 break;
7199 7264
7200 7265 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7201 7266 NDBG20(("IOC internally generated Abort Task Set"));
7202 7267 break;
7203 7268
7204 7269 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7205 7270 NDBG20(("IOC internally generated Clear Task Set"));
7206 7271 break;
7207 7272
7208 7273 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7209 7274 NDBG20(("IOC internally generated Query Task"));
7210 7275 break;
7211 7276
7212 7277 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7213 7278 NDBG20(("Device sent an Asynchronous Notification"));
7214 7279 break;
7215 7280
7216 7281 default:
7217 7282 break;
7218 7283 }
7219 7284 break;
7220 7285 }
7221 7286 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7222 7287 {
7223 7288 /*
7224 7289 * IR TOPOLOGY CHANGE LIST Event has already been handled
7225 7290 * in mpt_handle_event_sync() of interrupt context
7226 7291 */
7227 7292 break;
7228 7293 }
7229 7294 case MPI2_EVENT_IR_OPERATION_STATUS:
7230 7295 {
7231 7296 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7232 7297 char reason_str[80];
7233 7298 uint8_t rc, percent;
7234 7299 uint16_t handle;
7235 7300
7236 7301 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7237 7302 eventreply->EventData;
7238 7303 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7239 7304 &irOpStatus->RAIDOperation);
7240 7305 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7241 7306 &irOpStatus->PercentComplete);
7242 7307 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7243 7308 &irOpStatus->VolDevHandle);
7244 7309
7245 7310 switch (rc) {
7246 7311 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7247 7312 (void) sprintf(reason_str, "resync");
7248 7313 break;
7249 7314 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7250 7315 (void) sprintf(reason_str, "online capacity "
7251 7316 "expansion");
7252 7317 break;
7253 7318 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7254 7319 (void) sprintf(reason_str, "consistency check");
7255 7320 break;
7256 7321 default:
7257 7322 (void) sprintf(reason_str, "unknown reason %x",
7258 7323 rc);
7259 7324 }
7260 7325
7261 7326 NDBG20(("mptsas%d raid operational status: (%s)"
7262 7327 "\thandle(0x%04x), percent complete(%d)\n",
7263 7328 mpt->m_instance, reason_str, handle, percent));
7264 7329 break;
7265 7330 }
7266 7331 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7267 7332 {
7268 7333 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7269 7334 uint8_t phy_num;
7270 7335 uint8_t primitive;
7271 7336
7272 7337 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7273 7338 eventreply->EventData;
7274 7339
7275 7340 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7276 7341 &sas_broadcast->PhyNum);
7277 7342 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7278 7343 &sas_broadcast->Primitive);
7279 7344
7280 7345 switch (primitive) {
7281 7346 case MPI2_EVENT_PRIMITIVE_CHANGE:
7282 7347 mptsas_smhba_log_sysevent(mpt,
7283 7348 ESC_SAS_HBA_PORT_BROADCAST,
7284 7349 SAS_PORT_BROADCAST_CHANGE,
7285 7350 &mpt->m_phy_info[phy_num].smhba_info);
7286 7351 break;
7287 7352 case MPI2_EVENT_PRIMITIVE_SES:
7288 7353 mptsas_smhba_log_sysevent(mpt,
7289 7354 ESC_SAS_HBA_PORT_BROADCAST,
7290 7355 SAS_PORT_BROADCAST_SES,
7291 7356 &mpt->m_phy_info[phy_num].smhba_info);
7292 7357 break;
7293 7358 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7294 7359 mptsas_smhba_log_sysevent(mpt,
7295 7360 ESC_SAS_HBA_PORT_BROADCAST,
7296 7361 SAS_PORT_BROADCAST_D01_4,
7297 7362 &mpt->m_phy_info[phy_num].smhba_info);
7298 7363 break;
7299 7364 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7300 7365 mptsas_smhba_log_sysevent(mpt,
7301 7366 ESC_SAS_HBA_PORT_BROADCAST,
7302 7367 SAS_PORT_BROADCAST_D04_7,
7303 7368 &mpt->m_phy_info[phy_num].smhba_info);
7304 7369 break;
7305 7370 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7306 7371 mptsas_smhba_log_sysevent(mpt,
7307 7372 ESC_SAS_HBA_PORT_BROADCAST,
7308 7373 SAS_PORT_BROADCAST_D16_7,
7309 7374 &mpt->m_phy_info[phy_num].smhba_info);
7310 7375 break;
7311 7376 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7312 7377 mptsas_smhba_log_sysevent(mpt,
7313 7378 ESC_SAS_HBA_PORT_BROADCAST,
7314 7379 SAS_PORT_BROADCAST_D29_7,
7315 7380 &mpt->m_phy_info[phy_num].smhba_info);
7316 7381 break;
7317 7382 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7318 7383 mptsas_smhba_log_sysevent(mpt,
7319 7384 ESC_SAS_HBA_PORT_BROADCAST,
7320 7385 SAS_PORT_BROADCAST_D24_0,
7321 7386 &mpt->m_phy_info[phy_num].smhba_info);
7322 7387 break;
7323 7388 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7324 7389 mptsas_smhba_log_sysevent(mpt,
7325 7390 ESC_SAS_HBA_PORT_BROADCAST,
7326 7391 SAS_PORT_BROADCAST_D27_4,
7327 7392 &mpt->m_phy_info[phy_num].smhba_info);
7328 7393 break;
7329 7394 default:
7330 7395 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7331 7396 " %x received",
7332 7397 mpt->m_instance, primitive));
7333 7398 break;
7334 7399 }
7335 7400 NDBG20(("mptsas%d sas broadcast primitive: "
|
↓ open down ↓ |
404 lines elided |
↑ open up ↑ |
7336 7401 "\tprimitive(0x%04x), phy(%d) complete\n",
7337 7402 mpt->m_instance, primitive, phy_num));
7338 7403 break;
7339 7404 }
7340 7405 case MPI2_EVENT_IR_VOLUME:
7341 7406 {
7342 7407 Mpi2EventDataIrVolume_t *irVolume;
7343 7408 uint16_t devhandle;
7344 7409 uint32_t state;
7345 7410 int config, vol;
7346 - mptsas_slots_t *slots = mpt->m_active;
7347 7411 uint8_t found = FALSE;
7348 7412
7349 7413 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7350 7414 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7351 7415 &irVolume->NewValue);
7352 7416 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7353 7417 &irVolume->VolDevHandle);
7354 7418
7355 7419 NDBG20(("EVENT_IR_VOLUME event is received"));
7356 7420
7357 7421 /*
7358 7422 * Get latest RAID info and then find the DevHandle for this
7359 7423 * event in the configuration. If the DevHandle is not found
7360 7424 * just exit the event.
7361 7425 */
7362 7426 (void) mptsas_get_raid_info(mpt);
7363 - for (config = 0; (config < slots->m_num_raid_configs) &&
7427 + for (config = 0; (config < mpt->m_num_raid_configs) &&
7364 7428 (!found); config++) {
7365 7429 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7366 - if (slots->m_raidconfig[config].m_raidvol[vol].
7430 + if (mpt->m_raidconfig[config].m_raidvol[vol].
7367 7431 m_raidhandle == devhandle) {
7368 7432 found = TRUE;
7369 7433 break;
7370 7434 }
7371 7435 }
7372 7436 }
7373 7437 if (!found) {
7374 7438 break;
7375 7439 }
7376 7440
7377 7441 switch (irVolume->ReasonCode) {
7378 7442 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7379 7443 {
7380 7444 uint32_t i;
7381 - slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7445 + mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7382 7446 state;
7383 7447
7384 7448 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7385 7449 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7386 7450 ", auto-config of hot-swap drives is %s"
7387 7451 ", write caching is %s"
7388 7452 ", hot-spare pool mask is %02x\n",
7389 7453 vol, state &
7390 7454 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7391 7455 ? "disabled" : "enabled",
7392 7456 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7393 7457 ? "controlled by member disks" :
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
7394 7458 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7395 7459 ? "disabled" :
7396 7460 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7397 7461 ? "enabled" :
7398 7462 "incorrectly set",
7399 7463 (state >> 16) & 0xff);
7400 7464 break;
7401 7465 }
7402 7466 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7403 7467 {
7404 - slots->m_raidconfig[config].m_raidvol[vol].m_state =
7468 + mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7405 7469 (uint8_t)state;
7406 7470
7407 7471 mptsas_log(mpt, CE_NOTE,
7408 7472 "Volume %d is now %s\n", vol,
7409 7473 state == MPI2_RAID_VOL_STATE_OPTIMAL
7410 7474 ? "optimal" :
7411 7475 state == MPI2_RAID_VOL_STATE_DEGRADED
7412 7476 ? "degraded" :
7413 7477 state == MPI2_RAID_VOL_STATE_ONLINE
7414 7478 ? "online" :
7415 7479 state == MPI2_RAID_VOL_STATE_INITIALIZING
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
7416 7480 ? "initializing" :
7417 7481 state == MPI2_RAID_VOL_STATE_FAILED
7418 7482 ? "failed" :
7419 7483 state == MPI2_RAID_VOL_STATE_MISSING
7420 7484 ? "missing" :
7421 7485 "state unknown");
7422 7486 break;
7423 7487 }
7424 7488 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7425 7489 {
7426 - slots->m_raidconfig[config].m_raidvol[vol].
7490 + mpt->m_raidconfig[config].m_raidvol[vol].
7427 7491 m_statusflags = state;
7428 7492
7429 7493 mptsas_log(mpt, CE_NOTE,
7430 7494 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7431 7495 vol,
7432 7496 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7433 7497 ? ", enabled" : ", disabled",
7434 7498 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7435 7499 ? ", quiesced" : "",
7436 7500 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7437 7501 ? ", inactive" : ", active",
7438 7502 state &
7439 7503 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7440 7504 ? ", bad block table is full" : "",
7441 7505 state &
7442 7506 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7443 7507 ? ", resync in progress" : "",
7444 7508 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7445 7509 ? ", background initialization in progress" : "",
7446 7510 state &
7447 7511 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7448 7512 ? ", capacity expansion in progress" : "",
7449 7513 state &
7450 7514 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7451 7515 ? ", consistency check in progress" : "",
7452 7516 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7453 7517 ? ", data scrub in progress" : "");
7454 7518 break;
7455 7519 }
7456 7520 default:
7457 7521 break;
7458 7522 }
7459 7523 break;
7460 7524 }
7461 7525 case MPI2_EVENT_IR_PHYSICAL_DISK:
7462 7526 {
7463 7527 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7464 7528 uint16_t devhandle, enchandle, slot;
7465 7529 uint32_t status, state;
7466 7530 uint8_t physdisknum, reason;
7467 7531
7468 7532 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7469 7533 eventreply->EventData;
7470 7534 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7471 7535 &irPhysDisk->PhysDiskNum);
7472 7536 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7473 7537 &irPhysDisk->PhysDiskDevHandle);
7474 7538 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7475 7539 &irPhysDisk->EnclosureHandle);
7476 7540 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7477 7541 &irPhysDisk->Slot);
7478 7542 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7479 7543 &irPhysDisk->NewValue);
7480 7544 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7481 7545 &irPhysDisk->ReasonCode);
7482 7546
7483 7547 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7484 7548
7485 7549 switch (reason) {
7486 7550 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7487 7551 mptsas_log(mpt, CE_NOTE,
7488 7552 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7489 7553 "for enclosure with handle 0x%x is now in hot "
7490 7554 "spare pool %d",
7491 7555 physdisknum, devhandle, slot, enchandle,
7492 7556 (state >> 16) & 0xff);
7493 7557 break;
7494 7558
7495 7559 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7496 7560 status = state;
7497 7561 mptsas_log(mpt, CE_NOTE,
7498 7562 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7499 7563 "for enclosure with handle 0x%x is now "
7500 7564 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7501 7565 enchandle,
7502 7566 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7503 7567 ? ", inactive" : ", active",
7504 7568 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7505 7569 ? ", out of sync" : "",
7506 7570 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7507 7571 ? ", quiesced" : "",
7508 7572 status &
7509 7573 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7510 7574 ? ", write cache enabled" : "",
7511 7575 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7512 7576 ? ", capacity expansion target" : "");
7513 7577 break;
7514 7578
7515 7579 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7516 7580 mptsas_log(mpt, CE_NOTE,
7517 7581 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7518 7582 "for enclosure with handle 0x%x is now %s\n",
7519 7583 physdisknum, devhandle, slot, enchandle,
7520 7584 state == MPI2_RAID_PD_STATE_OPTIMAL
7521 7585 ? "optimal" :
7522 7586 state == MPI2_RAID_PD_STATE_REBUILDING
7523 7587 ? "rebuilding" :
7524 7588 state == MPI2_RAID_PD_STATE_DEGRADED
7525 7589 ? "degraded" :
7526 7590 state == MPI2_RAID_PD_STATE_HOT_SPARE
7527 7591 ? "a hot spare" :
7528 7592 state == MPI2_RAID_PD_STATE_ONLINE
7529 7593 ? "online" :
7530 7594 state == MPI2_RAID_PD_STATE_OFFLINE
7531 7595 ? "offline" :
7532 7596 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7533 7597 ? "not compatible" :
7534 7598 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7535 7599 ? "not configured" :
7536 7600 "state unknown");
7537 7601 break;
7538 7602 }
7539 7603 break;
7540 7604 }
7541 7605 default:
7542 7606 NDBG20(("mptsas%d: unknown event %x received",
7543 7607 mpt->m_instance, event));
7544 7608 break;
7545 7609 }
7546 7610
7547 7611 /*
7548 7612 * Return the reply frame to the free queue.
7549 7613 */
7550 7614 ddi_put32(mpt->m_acc_free_queue_hdl,
7551 7615 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7552 7616 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7553 7617 DDI_DMA_SYNC_FORDEV);
7554 7618 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7555 7619 mpt->m_free_index = 0;
7556 7620 }
7557 7621 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7558 7622 mpt->m_free_index);
7559 7623 mutex_exit(&mpt->m_mutex);
7560 7624 }
7561 7625
7562 7626 /*
7563 7627 * invoked from timeout() to restart qfull cmds with throttle == 0
7564 7628 */
|
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
7565 7629 static void
7566 7630 mptsas_restart_cmd(void *arg)
7567 7631 {
7568 7632 mptsas_t *mpt = arg;
7569 7633 mptsas_target_t *ptgt = NULL;
7570 7634
7571 7635 mutex_enter(&mpt->m_mutex);
7572 7636
7573 7637 mpt->m_restart_cmd_timeid = 0;
7574 7638
7575 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7576 - MPTSAS_HASH_FIRST);
7577 - while (ptgt != NULL) {
7639 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
7640 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
7578 7641 if (ptgt->m_reset_delay == 0) {
7579 7642 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7580 7643 mptsas_set_throttle(mpt, ptgt,
7581 7644 MAX_THROTTLE);
7582 7645 }
7583 7646 }
7584 -
7585 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7586 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7587 7647 }
7588 7648 mptsas_restart_hba(mpt);
7589 7649 mutex_exit(&mpt->m_mutex);
7590 7650 }
7591 7651
7592 7652 void
7593 7653 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7594 7654 {
7595 7655 int slot;
7596 7656 mptsas_slots_t *slots = mpt->m_active;
7597 7657 int t;
7598 7658 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7599 7659
7600 7660 ASSERT(cmd != NULL);
7601 7661 ASSERT(cmd->cmd_queued == FALSE);
7602 7662
7603 7663 /*
7604 7664 * Task Management cmds are removed in their own routines. Also,
7605 7665 * we don't want to modify timeout based on TM cmds.
7606 7666 */
7607 7667 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7608 7668 return;
7609 7669 }
7610 7670
7611 7671 t = Tgt(cmd);
7612 7672 slot = cmd->cmd_slot;
7613 7673
7614 7674 /*
7615 7675 * remove the cmd.
7616 7676 */
7617 7677 if (cmd == slots->m_slot[slot]) {
7618 7678 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7619 7679 slots->m_slot[slot] = NULL;
7620 7680 mpt->m_ncmds--;
7621 7681
7622 7682 /*
7623 7683 * only decrement per target ncmds if command
7624 7684 * has a target associated with it.
7625 7685 */
7626 7686 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7627 7687 ptgt->m_t_ncmds--;
7628 7688 /*
7629 7689 * reset throttle if we just ran an untagged command
7630 7690 * to a tagged target
7631 7691 */
7632 7692 if ((ptgt->m_t_ncmds == 0) &&
7633 7693 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7634 7694 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7635 7695 }
7636 7696 }
7637 7697
7638 7698 }
7639 7699
7640 7700 /*
7641 7701 * This is all we need to do for ioc commands.
7642 7702 */
7643 7703 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7644 7704 mptsas_return_to_pool(mpt, cmd);
7645 7705 return;
7646 7706 }
7647 7707
|
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
7648 7708 /*
7649 7709 * Figure out what to set tag Q timeout for...
7650 7710 *
7651 7711 * Optimize: If we have duplicate's of same timeout
7652 7712 * we're using, then we'll use it again until we run
7653 7713 * out of duplicates. This should be the normal case
7654 7714 * for block and raw I/O.
7655 7715 * If no duplicates, we have to scan through tag que and
7656 7716 * find the longest timeout value and use it. This is
7657 7717 * going to take a while...
7658 - * Add 1 to m_n_slots to account for TM request.
7718 + * Add 1 to m_n_normal to account for TM request.
7659 7719 */
7660 7720 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7661 7721 if (--(ptgt->m_dups) == 0) {
7662 7722 if (ptgt->m_t_ncmds) {
7663 7723 mptsas_cmd_t *ssp;
7664 7724 uint_t n = 0;
7665 - ushort_t nslots = (slots->m_n_slots + 1);
7725 + ushort_t nslots = (slots->m_n_normal + 1);
7666 7726 ushort_t i;
7667 7727 /*
7668 7728 * This crude check assumes we don't do
7669 7729 * this too often which seems reasonable
7670 7730 * for block and raw I/O.
7671 7731 */
7672 7732 for (i = 0; i < nslots; i++) {
7673 7733 ssp = slots->m_slot[i];
7674 7734 if (ssp && (Tgt(ssp) == t) &&
7675 7735 (ssp->cmd_pkt->pkt_time > n)) {
7676 7736 n = ssp->cmd_pkt->pkt_time;
7677 7737 ptgt->m_dups = 1;
7678 7738 } else if (ssp && (Tgt(ssp) == t) &&
7679 7739 (ssp->cmd_pkt->pkt_time == n)) {
7680 7740 ptgt->m_dups++;
7681 7741 }
7682 7742 }
7683 7743 ptgt->m_timebase = n;
7684 7744 } else {
7685 7745 ptgt->m_dups = 0;
7686 7746 ptgt->m_timebase = 0;
7687 7747 }
7688 7748 }
7689 7749 }
7690 7750 ptgt->m_timeout = ptgt->m_timebase;
7691 7751
7692 7752 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7693 7753 }
7694 7754
7695 7755 /*
7696 7756 * accept all cmds on the tx_waitq if any and then
7697 7757 * start a fresh request from the top of the device queue.
7698 7758 *
7699 7759 * since there are always cmds queued on the tx_waitq, and rare cmds on
7700 7760 * the instance waitq, so this function should not be invoked in the ISR,
7701 7761 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7702 7762 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7703 7763 */
7704 7764 static void
7705 7765 mptsas_restart_hba(mptsas_t *mpt)
7706 7766 {
7707 7767 ASSERT(mutex_owned(&mpt->m_mutex));
7708 7768
7709 7769 mutex_enter(&mpt->m_tx_waitq_mutex);
7710 7770 if (mpt->m_tx_waitq) {
7711 7771 mptsas_accept_tx_waitq(mpt);
7712 7772 }
7713 7773 mutex_exit(&mpt->m_tx_waitq_mutex);
7714 7774 mptsas_restart_waitq(mpt);
7715 7775 }
7716 7776
7717 7777 /*
7718 7778 * start a fresh request from the top of the device queue
7719 7779 */
7720 7780 static void
7721 7781 mptsas_restart_waitq(mptsas_t *mpt)
7722 7782 {
7723 7783 mptsas_cmd_t *cmd, *next_cmd;
7724 7784 mptsas_target_t *ptgt = NULL;
7725 7785
7726 7786 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7727 7787
7728 7788 ASSERT(mutex_owned(&mpt->m_mutex));
7729 7789
7730 7790 /*
7731 7791 * If there is a reset delay, don't start any cmds. Otherwise, start
7732 7792 * as many cmds as possible.
7733 7793 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7734 7794 * commands is m_max_requests - 2.
7735 7795 */
7736 7796 cmd = mpt->m_waitq;
7737 7797
7738 7798 while (cmd != NULL) {
7739 7799 next_cmd = cmd->cmd_linkp;
7740 7800 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7741 7801 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7742 7802 /*
7743 7803 * passthru command get slot need
7744 7804 * set CFLAG_PREPARED.
7745 7805 */
7746 7806 cmd->cmd_flags |= CFLAG_PREPARED;
7747 7807 mptsas_waitq_delete(mpt, cmd);
7748 7808 mptsas_start_passthru(mpt, cmd);
7749 7809 }
7750 7810 cmd = next_cmd;
7751 7811 continue;
7752 7812 }
7753 7813 if (cmd->cmd_flags & CFLAG_CONFIG) {
7754 7814 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7755 7815 /*
7756 7816 * Send the config page request and delete it
7757 7817 * from the waitq.
7758 7818 */
7759 7819 cmd->cmd_flags |= CFLAG_PREPARED;
7760 7820 mptsas_waitq_delete(mpt, cmd);
7761 7821 mptsas_start_config_page_access(mpt, cmd);
7762 7822 }
7763 7823 cmd = next_cmd;
7764 7824 continue;
7765 7825 }
7766 7826 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7767 7827 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7768 7828 /*
7769 7829 * Send the FW Diag request and delete if from
7770 7830 * the waitq.
7771 7831 */
7772 7832 cmd->cmd_flags |= CFLAG_PREPARED;
7773 7833 mptsas_waitq_delete(mpt, cmd);
7774 7834 mptsas_start_diag(mpt, cmd);
7775 7835 }
7776 7836 cmd = next_cmd;
7777 7837 continue;
7778 7838 }
7779 7839
7780 7840 ptgt = cmd->cmd_tgt_addr;
7781 7841 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7782 7842 (ptgt->m_t_ncmds == 0)) {
7783 7843 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7784 7844 }
7785 7845 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7786 7846 (ptgt && (ptgt->m_reset_delay == 0)) &&
7787 7847 (ptgt && (ptgt->m_t_ncmds <
7788 7848 ptgt->m_t_throttle))) {
7789 7849 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7790 7850 mptsas_waitq_delete(mpt, cmd);
7791 7851 (void) mptsas_start_cmd(mpt, cmd);
7792 7852 }
7793 7853 }
7794 7854 cmd = next_cmd;
7795 7855 }
7796 7856 }
7797 7857 /*
7798 7858 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7799 7859 * Accept all those queued cmds before new cmd is accept so that the
7800 7860 * cmds are sent in order.
7801 7861 */
7802 7862 static void
7803 7863 mptsas_accept_tx_waitq(mptsas_t *mpt)
7804 7864 {
7805 7865 mptsas_cmd_t *cmd;
7806 7866
7807 7867 ASSERT(mutex_owned(&mpt->m_mutex));
7808 7868 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7809 7869
7810 7870 /*
7811 7871 * A Bus Reset could occur at any time and flush the tx_waitq,
7812 7872 * so we cannot count on the tx_waitq to contain even one cmd.
7813 7873 * And when the m_tx_waitq_mutex is released and run
7814 7874 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7815 7875 */
7816 7876 cmd = mpt->m_tx_waitq;
7817 7877 for (;;) {
7818 7878 if ((cmd = mpt->m_tx_waitq) == NULL) {
7819 7879 mpt->m_tx_draining = 0;
7820 7880 break;
7821 7881 }
7822 7882 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7823 7883 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7824 7884 }
7825 7885 cmd->cmd_linkp = NULL;
7826 7886 mutex_exit(&mpt->m_tx_waitq_mutex);
7827 7887 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7828 7888 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7829 7889 "to accept cmd on queue\n");
7830 7890 mutex_enter(&mpt->m_tx_waitq_mutex);
7831 7891 }
7832 7892 }
7833 7893
7834 7894
7835 7895 /*
7836 7896 * mpt tag type lookup
7837 7897 */
7838 7898 static char mptsas_tag_lookup[] =
7839 7899 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7840 7900
7841 7901 static int
7842 7902 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7843 7903 {
7844 7904 struct scsi_pkt *pkt = CMD2PKT(cmd);
7845 7905 uint32_t control = 0;
7846 7906 int n;
7847 7907 caddr_t mem;
7848 7908 pMpi2SCSIIORequest_t io_request;
7849 7909 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7850 7910 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7851 7911 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7852 7912 uint16_t SMID, io_flags = 0;
7853 7913 uint32_t request_desc_low, request_desc_high;
7854 7914
7855 7915 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7856 7916
7857 7917 /*
7858 7918 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7859 7919 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7860 7920 */
7861 7921 SMID = cmd->cmd_slot;
7862 7922
7863 7923 /*
7864 7924 * It is possible for back to back device reset to
7865 7925 * happen before the reset delay has expired. That's
7866 7926 * ok, just let the device reset go out on the bus.
7867 7927 */
7868 7928 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7869 7929 ASSERT(ptgt->m_reset_delay == 0);
7870 7930 }
7871 7931
7872 7932 /*
7873 7933 * if a non-tagged cmd is submitted to an active tagged target
7874 7934 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7875 7935 * to be untagged
7876 7936 */
7877 7937 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7878 7938 (ptgt->m_t_ncmds > 1) &&
7879 7939 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7880 7940 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7881 7941 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7882 7942 NDBG23(("target=%d, untagged cmd, start draining\n",
7883 7943 ptgt->m_devhdl));
7884 7944
7885 7945 if (ptgt->m_reset_delay == 0) {
7886 7946 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7887 7947 }
7888 7948
7889 7949 mptsas_remove_cmd(mpt, cmd);
7890 7950 cmd->cmd_pkt_flags |= FLAG_HEAD;
7891 7951 mptsas_waitq_add(mpt, cmd);
7892 7952 }
7893 7953 return (DDI_FAILURE);
7894 7954 }
7895 7955
7896 7956 /*
7897 7957 * Set correct tag bits.
7898 7958 */
7899 7959 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7900 7960 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7901 7961 FLAG_TAGMASK) >> 12)]) {
7902 7962 case MSG_SIMPLE_QTAG:
7903 7963 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7904 7964 break;
7905 7965 case MSG_HEAD_QTAG:
7906 7966 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7907 7967 break;
7908 7968 case MSG_ORDERED_QTAG:
7909 7969 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7910 7970 break;
7911 7971 default:
7912 7972 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7913 7973 break;
7914 7974 }
7915 7975 } else {
7916 7976 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7917 7977 ptgt->m_t_throttle = 1;
7918 7978 }
7919 7979 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7920 7980 }
7921 7981
7922 7982 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7923 7983 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7924 7984 }
7925 7985
7926 7986 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7927 7987 io_request = (pMpi2SCSIIORequest_t)mem;
7928 7988
7929 7989 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7930 7990 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7931 7991 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7932 7992 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7933 7993 MPI2_FUNCTION_SCSI_IO_REQUEST);
7934 7994
7935 7995 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7936 7996 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7937 7997
7938 7998 io_flags = cmd->cmd_cdblen;
7939 7999 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7940 8000 /*
7941 8001 * setup the Scatter/Gather DMA list for this request
7942 8002 */
7943 8003 if (cmd->cmd_cookiec > 0) {
7944 8004 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7945 8005 } else {
7946 8006 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7947 8007 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7948 8008 MPI2_SGE_FLAGS_END_OF_BUFFER |
7949 8009 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7950 8010 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7951 8011 }
7952 8012
7953 8013 /*
7954 8014 * save ARQ information
7955 8015 */
7956 8016 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7957 8017 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7958 8018 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7959 8019 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7960 8020 cmd->cmd_ext_arqcookie.dmac_address);
7961 8021 } else {
7962 8022 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7963 8023 cmd->cmd_arqcookie.dmac_address);
7964 8024 }
7965 8025
7966 8026 ddi_put32(acc_hdl, &io_request->Control, control);
7967 8027
7968 8028 NDBG31(("starting message=0x%p, with cmd=0x%p",
7969 8029 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7970 8030
7971 8031 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7972 8032
7973 8033 /*
7974 8034 * Build request descriptor and write it to the request desc post reg.
7975 8035 */
7976 8036 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7977 8037 request_desc_high = ptgt->m_devhdl << 16;
7978 8038 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7979 8039
7980 8040 /*
7981 8041 * Start timeout.
7982 8042 */
7983 8043 #ifdef MPTSAS_TEST
7984 8044 /*
7985 8045 * Temporarily set timebase = 0; needed for
7986 8046 * timeout torture test.
7987 8047 */
7988 8048 if (mptsas_test_timeouts) {
7989 8049 ptgt->m_timebase = 0;
7990 8050 }
7991 8051 #endif
7992 8052 n = pkt->pkt_time - ptgt->m_timebase;
7993 8053
7994 8054 if (n == 0) {
7995 8055 (ptgt->m_dups)++;
7996 8056 ptgt->m_timeout = ptgt->m_timebase;
7997 8057 } else if (n > 0) {
7998 8058 ptgt->m_timeout =
7999 8059 ptgt->m_timebase = pkt->pkt_time;
8000 8060 ptgt->m_dups = 1;
8001 8061 } else if (n < 0) {
8002 8062 ptgt->m_timeout = ptgt->m_timebase;
8003 8063 }
8004 8064 #ifdef MPTSAS_TEST
8005 8065 /*
8006 8066 * Set back to a number higher than
8007 8067 * mptsas_scsi_watchdog_tick
8008 8068 * so timeouts will happen in mptsas_watchsubr
8009 8069 */
8010 8070 if (mptsas_test_timeouts) {
8011 8071 ptgt->m_timebase = 60;
8012 8072 }
8013 8073 #endif
8014 8074
8015 8075 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8016 8076 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8017 8077 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8018 8078 return (DDI_FAILURE);
8019 8079 }
8020 8080 return (DDI_SUCCESS);
8021 8081 }
8022 8082
8023 8083 /*
8024 8084 * Select a helper thread to handle current doneq
8025 8085 */
8026 8086 static void
8027 8087 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8028 8088 {
8029 8089 uint64_t t, i;
8030 8090 uint32_t min = 0xffffffff;
8031 8091 mptsas_doneq_thread_list_t *item;
8032 8092
8033 8093 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8034 8094 item = &mpt->m_doneq_thread_id[i];
8035 8095 /*
8036 8096 * If the completed command on help thread[i] less than
8037 8097 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8038 8098 * pick a thread which has least completed command.
8039 8099 */
8040 8100
8041 8101 mutex_enter(&item->mutex);
8042 8102 if (item->len < mpt->m_doneq_thread_threshold) {
8043 8103 t = i;
8044 8104 mutex_exit(&item->mutex);
8045 8105 break;
8046 8106 }
8047 8107 if (item->len < min) {
8048 8108 min = item->len;
8049 8109 t = i;
8050 8110 }
8051 8111 mutex_exit(&item->mutex);
8052 8112 }
8053 8113 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8054 8114 mptsas_doneq_mv(mpt, t);
8055 8115 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8056 8116 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8057 8117 }
8058 8118
8059 8119 /*
8060 8120 * move the current global doneq to the doneq of thead[t]
8061 8121 */
8062 8122 static void
8063 8123 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8064 8124 {
8065 8125 mptsas_cmd_t *cmd;
8066 8126 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8067 8127
8068 8128 ASSERT(mutex_owned(&item->mutex));
8069 8129 while ((cmd = mpt->m_doneq) != NULL) {
8070 8130 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8071 8131 mpt->m_donetail = &mpt->m_doneq;
8072 8132 }
8073 8133 cmd->cmd_linkp = NULL;
8074 8134 *item->donetail = cmd;
8075 8135 item->donetail = &cmd->cmd_linkp;
8076 8136 mpt->m_doneq_len--;
8077 8137 item->len++;
8078 8138 }
8079 8139 }
8080 8140
8081 8141 void
8082 8142 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8083 8143 {
8084 8144 struct scsi_pkt *pkt = CMD2PKT(cmd);
8085 8145
8086 8146 /* Check all acc and dma handles */
8087 8147 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8088 8148 DDI_SUCCESS) ||
8089 8149 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8090 8150 DDI_SUCCESS) ||
8091 8151 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8092 8152 DDI_SUCCESS) ||
8093 8153 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8094 8154 DDI_SUCCESS) ||
8095 8155 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8096 8156 DDI_SUCCESS) ||
8097 8157 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8098 8158 DDI_SUCCESS) ||
8099 8159 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8100 8160 DDI_SUCCESS)) {
8101 8161 ddi_fm_service_impact(mpt->m_dip,
8102 8162 DDI_SERVICE_UNAFFECTED);
8103 8163 ddi_fm_acc_err_clear(mpt->m_config_handle,
8104 8164 DDI_FME_VER0);
8105 8165 pkt->pkt_reason = CMD_TRAN_ERR;
8106 8166 pkt->pkt_statistics = 0;
8107 8167 }
8108 8168 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8109 8169 DDI_SUCCESS) ||
8110 8170 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8111 8171 DDI_SUCCESS) ||
8112 8172 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8113 8173 DDI_SUCCESS) ||
8114 8174 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8115 8175 DDI_SUCCESS) ||
8116 8176 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8117 8177 DDI_SUCCESS)) {
8118 8178 ddi_fm_service_impact(mpt->m_dip,
8119 8179 DDI_SERVICE_UNAFFECTED);
8120 8180 pkt->pkt_reason = CMD_TRAN_ERR;
8121 8181 pkt->pkt_statistics = 0;
8122 8182 }
8123 8183 if (cmd->cmd_dmahandle &&
8124 8184 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8125 8185 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8126 8186 pkt->pkt_reason = CMD_TRAN_ERR;
8127 8187 pkt->pkt_statistics = 0;
8128 8188 }
8129 8189 if ((cmd->cmd_extra_frames &&
8130 8190 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8131 8191 DDI_SUCCESS) ||
8132 8192 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8133 8193 DDI_SUCCESS)))) {
8134 8194 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8135 8195 pkt->pkt_reason = CMD_TRAN_ERR;
8136 8196 pkt->pkt_statistics = 0;
8137 8197 }
8138 8198 if (cmd->cmd_arqhandle &&
8139 8199 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8140 8200 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8141 8201 pkt->pkt_reason = CMD_TRAN_ERR;
8142 8202 pkt->pkt_statistics = 0;
8143 8203 }
8144 8204 if (cmd->cmd_ext_arqhandle &&
8145 8205 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8146 8206 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8147 8207 pkt->pkt_reason = CMD_TRAN_ERR;
8148 8208 pkt->pkt_statistics = 0;
8149 8209 }
8150 8210 }
8151 8211
8152 8212 /*
8153 8213 * These routines manipulate the queue of commands that
8154 8214 * are waiting for their completion routines to be called.
8155 8215 * The queue is usually in FIFO order but on an MP system
8156 8216 * it's possible for the completion routines to get out
8157 8217 * of order. If that's a problem you need to add a global
8158 8218 * mutex around the code that calls the completion routine
8159 8219 * in the interrupt handler.
8160 8220 */
8161 8221 static void
8162 8222 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8163 8223 {
8164 8224 struct scsi_pkt *pkt = CMD2PKT(cmd);
8165 8225
8166 8226 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8167 8227
8168 8228 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8169 8229 cmd->cmd_linkp = NULL;
8170 8230 cmd->cmd_flags |= CFLAG_FINISHED;
8171 8231 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8172 8232
8173 8233 mptsas_fma_check(mpt, cmd);
8174 8234
8175 8235 /*
8176 8236 * only add scsi pkts that have completion routines to
8177 8237 * the doneq. no intr cmds do not have callbacks.
8178 8238 */
8179 8239 if (pkt && (pkt->pkt_comp)) {
8180 8240 *mpt->m_donetail = cmd;
8181 8241 mpt->m_donetail = &cmd->cmd_linkp;
8182 8242 mpt->m_doneq_len++;
8183 8243 }
8184 8244 }
8185 8245
8186 8246 static mptsas_cmd_t *
8187 8247 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8188 8248 {
8189 8249 mptsas_cmd_t *cmd;
8190 8250 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8191 8251
8192 8252 /* pop one off the done queue */
8193 8253 if ((cmd = item->doneq) != NULL) {
8194 8254 /* if the queue is now empty fix the tail pointer */
8195 8255 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8196 8256 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8197 8257 item->donetail = &item->doneq;
8198 8258 }
8199 8259 cmd->cmd_linkp = NULL;
8200 8260 item->len--;
8201 8261 }
8202 8262 return (cmd);
8203 8263 }
8204 8264
8205 8265 static void
8206 8266 mptsas_doneq_empty(mptsas_t *mpt)
8207 8267 {
8208 8268 if (mpt->m_doneq && !mpt->m_in_callback) {
8209 8269 mptsas_cmd_t *cmd, *next;
8210 8270 struct scsi_pkt *pkt;
8211 8271
8212 8272 mpt->m_in_callback = 1;
8213 8273 cmd = mpt->m_doneq;
8214 8274 mpt->m_doneq = NULL;
8215 8275 mpt->m_donetail = &mpt->m_doneq;
8216 8276 mpt->m_doneq_len = 0;
8217 8277
8218 8278 mutex_exit(&mpt->m_mutex);
8219 8279 /*
8220 8280 * run the completion routines of all the
8221 8281 * completed commands
8222 8282 */
8223 8283 while (cmd != NULL) {
8224 8284 next = cmd->cmd_linkp;
8225 8285 cmd->cmd_linkp = NULL;
8226 8286 /* run this command's completion routine */
8227 8287 cmd->cmd_flags |= CFLAG_COMPLETED;
8228 8288 pkt = CMD2PKT(cmd);
8229 8289 mptsas_pkt_comp(pkt, cmd);
8230 8290 cmd = next;
8231 8291 }
8232 8292 mutex_enter(&mpt->m_mutex);
8233 8293 mpt->m_in_callback = 0;
8234 8294 }
8235 8295 }
8236 8296
8237 8297 /*
8238 8298 * These routines manipulate the target's queue of pending requests
8239 8299 */
8240 8300 void
8241 8301 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8242 8302 {
8243 8303 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8244 8304 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8245 8305 cmd->cmd_queued = TRUE;
8246 8306 if (ptgt)
8247 8307 ptgt->m_t_nwait++;
8248 8308 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8249 8309 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8250 8310 mpt->m_waitqtail = &cmd->cmd_linkp;
8251 8311 }
8252 8312 mpt->m_waitq = cmd;
8253 8313 } else {
8254 8314 cmd->cmd_linkp = NULL;
8255 8315 *(mpt->m_waitqtail) = cmd;
8256 8316 mpt->m_waitqtail = &cmd->cmd_linkp;
8257 8317 }
8258 8318 }
8259 8319
8260 8320 static mptsas_cmd_t *
8261 8321 mptsas_waitq_rm(mptsas_t *mpt)
8262 8322 {
8263 8323 mptsas_cmd_t *cmd;
8264 8324 mptsas_target_t *ptgt;
8265 8325 NDBG7(("mptsas_waitq_rm"));
8266 8326
8267 8327 MPTSAS_WAITQ_RM(mpt, cmd);
8268 8328
8269 8329 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8270 8330 if (cmd) {
8271 8331 ptgt = cmd->cmd_tgt_addr;
8272 8332 if (ptgt) {
8273 8333 ptgt->m_t_nwait--;
8274 8334 ASSERT(ptgt->m_t_nwait >= 0);
8275 8335 }
8276 8336 }
8277 8337 return (cmd);
8278 8338 }
8279 8339
8280 8340 /*
8281 8341 * remove specified cmd from the middle of the wait queue.
8282 8342 */
8283 8343 static void
8284 8344 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8285 8345 {
8286 8346 mptsas_cmd_t *prevp = mpt->m_waitq;
8287 8347 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8288 8348
8289 8349 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8290 8350 (void *)mpt, (void *)cmd));
8291 8351 if (ptgt) {
8292 8352 ptgt->m_t_nwait--;
8293 8353 ASSERT(ptgt->m_t_nwait >= 0);
8294 8354 }
8295 8355
8296 8356 if (prevp == cmd) {
8297 8357 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8298 8358 mpt->m_waitqtail = &mpt->m_waitq;
8299 8359
8300 8360 cmd->cmd_linkp = NULL;
8301 8361 cmd->cmd_queued = FALSE;
8302 8362 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8303 8363 (void *)mpt, (void *)cmd));
8304 8364 return;
8305 8365 }
8306 8366
8307 8367 while (prevp != NULL) {
8308 8368 if (prevp->cmd_linkp == cmd) {
8309 8369 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8310 8370 mpt->m_waitqtail = &prevp->cmd_linkp;
8311 8371
8312 8372 cmd->cmd_linkp = NULL;
8313 8373 cmd->cmd_queued = FALSE;
8314 8374 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8315 8375 (void *)mpt, (void *)cmd));
8316 8376 return;
8317 8377 }
8318 8378 prevp = prevp->cmd_linkp;
8319 8379 }
8320 8380 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8321 8381 }
8322 8382
8323 8383 static mptsas_cmd_t *
8324 8384 mptsas_tx_waitq_rm(mptsas_t *mpt)
8325 8385 {
8326 8386 mptsas_cmd_t *cmd;
8327 8387 NDBG7(("mptsas_tx_waitq_rm"));
8328 8388
8329 8389 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8330 8390
8331 8391 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8332 8392
8333 8393 return (cmd);
8334 8394 }
8335 8395
8336 8396 /*
8337 8397 * remove specified cmd from the middle of the tx_waitq.
8338 8398 */
8339 8399 static void
8340 8400 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8341 8401 {
8342 8402 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8343 8403
8344 8404 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8345 8405 (void *)mpt, (void *)cmd));
8346 8406
8347 8407 if (prevp == cmd) {
8348 8408 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8349 8409 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8350 8410
8351 8411 cmd->cmd_linkp = NULL;
8352 8412 cmd->cmd_queued = FALSE;
8353 8413 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8354 8414 (void *)mpt, (void *)cmd));
8355 8415 return;
8356 8416 }
8357 8417
8358 8418 while (prevp != NULL) {
8359 8419 if (prevp->cmd_linkp == cmd) {
8360 8420 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8361 8421 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8362 8422
8363 8423 cmd->cmd_linkp = NULL;
8364 8424 cmd->cmd_queued = FALSE;
8365 8425 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8366 8426 (void *)mpt, (void *)cmd));
8367 8427 return;
8368 8428 }
8369 8429 prevp = prevp->cmd_linkp;
8370 8430 }
8371 8431 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8372 8432 }
8373 8433
8374 8434 /*
8375 8435 * device and bus reset handling
8376 8436 *
8377 8437 * Notes:
8378 8438 * - RESET_ALL: reset the controller
8379 8439 * - RESET_TARGET: reset the target specified in scsi_address
8380 8440 */
8381 8441 static int
8382 8442 mptsas_scsi_reset(struct scsi_address *ap, int level)
8383 8443 {
8384 8444 mptsas_t *mpt = ADDR2MPT(ap);
8385 8445 int rval;
8386 8446 mptsas_tgt_private_t *tgt_private;
8387 8447 mptsas_target_t *ptgt = NULL;
8388 8448
8389 8449 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8390 8450 ptgt = tgt_private->t_private;
8391 8451 if (ptgt == NULL) {
8392 8452 return (FALSE);
8393 8453 }
8394 8454 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8395 8455 level));
8396 8456
8397 8457 mutex_enter(&mpt->m_mutex);
8398 8458 /*
8399 8459 * if we are not in panic set up a reset delay for this target
8400 8460 */
8401 8461 if (!ddi_in_panic()) {
8402 8462 mptsas_setup_bus_reset_delay(mpt);
8403 8463 } else {
8404 8464 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8405 8465 }
8406 8466 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8407 8467 mutex_exit(&mpt->m_mutex);
8408 8468
8409 8469 /*
8410 8470 * The transport layer expect to only see TRUE and
8411 8471 * FALSE. Therefore, we will adjust the return value
8412 8472 * if mptsas_do_scsi_reset returns FAILED.
8413 8473 */
|
↓ open down ↓ |
738 lines elided |
↑ open up ↑ |
8414 8474 if (rval == FAILED)
8415 8475 rval = FALSE;
8416 8476 return (rval);
8417 8477 }
8418 8478
8419 8479 static int
8420 8480 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8421 8481 {
8422 8482 int rval = FALSE;
8423 8483 uint8_t config, disk;
8424 - mptsas_slots_t *slots = mpt->m_active;
8425 8484
8426 8485 ASSERT(mutex_owned(&mpt->m_mutex));
8427 8486
8428 8487 if (mptsas_debug_resets) {
8429 8488 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8430 8489 devhdl);
8431 8490 }
8432 8491
8433 8492 /*
8434 8493 * Issue a Target Reset message to the target specified but not to a
8435 8494 * disk making up a raid volume. Just look through the RAID config
8436 8495 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8437 8496 * list, then don't reset this target.
8438 8497 */
8439 - for (config = 0; config < slots->m_num_raid_configs; config++) {
8498 + for (config = 0; config < mpt->m_num_raid_configs; config++) {
8440 8499 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8441 - if (devhdl == slots->m_raidconfig[config].
8500 + if (devhdl == mpt->m_raidconfig[config].
8442 8501 m_physdisk_devhdl[disk]) {
8443 8502 return (TRUE);
8444 8503 }
8445 8504 }
8446 8505 }
8447 8506
8448 8507 rval = mptsas_ioc_task_management(mpt,
8449 8508 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8450 8509
8451 8510 mptsas_doneq_empty(mpt);
8452 8511 return (rval);
8453 8512 }
8454 8513
8455 8514 static int
8456 8515 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8457 8516 void (*callback)(caddr_t), caddr_t arg)
8458 8517 {
8459 8518 mptsas_t *mpt = ADDR2MPT(ap);
8460 8519
8461 8520 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8462 8521
8463 8522 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8464 8523 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8465 8524 }
8466 8525
8467 8526 static int
8468 8527 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8469 8528 {
8470 8529 dev_info_t *lun_dip = NULL;
8471 8530
8472 8531 ASSERT(sd != NULL);
8473 8532 ASSERT(name != NULL);
8474 8533 lun_dip = sd->sd_dev;
8475 8534 ASSERT(lun_dip != NULL);
8476 8535
8477 8536 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8478 8537 return (1);
8479 8538 } else {
8480 8539 return (0);
8481 8540 }
8482 8541 }
8483 8542
8484 8543 static int
8485 8544 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8486 8545 {
8487 8546 return (mptsas_get_name(sd, name, len));
8488 8547 }
8489 8548
8490 8549 void
8491 8550 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8492 8551 {
8493 8552
8494 8553 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8495 8554
8496 8555 /*
8497 8556 * if the bus is draining/quiesced, no changes to the throttles
8498 8557 * are allowed. Not allowing change of throttles during draining
8499 8558 * limits error recovery but will reduce draining time
8500 8559 *
8501 8560 * all throttles should have been set to HOLD_THROTTLE
8502 8561 */
8503 8562 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8504 8563 return;
8505 8564 }
8506 8565
8507 8566 if (what == HOLD_THROTTLE) {
8508 8567 ptgt->m_t_throttle = HOLD_THROTTLE;
8509 8568 } else if (ptgt->m_reset_delay == 0) {
8510 8569 ptgt->m_t_throttle = what;
8511 8570 }
8512 8571 }
8513 8572
8514 8573 /*
8515 8574 * Clean up from a device reset.
8516 8575 * For the case of target reset, this function clears the waitq of all
8517 8576 * commands for a particular target. For the case of abort task set, this
8518 8577 * function clears the waitq of all commonds for a particular target/lun.
8519 8578 */
8520 8579 static void
8521 8580 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8522 8581 {
8523 8582 mptsas_slots_t *slots = mpt->m_active;
8524 8583 mptsas_cmd_t *cmd, *next_cmd;
8525 8584 int slot;
8526 8585 uchar_t reason;
|
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
8527 8586 uint_t stat;
8528 8587
8529 8588 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8530 8589
8531 8590 /*
8532 8591 * Make sure the I/O Controller has flushed all cmds
8533 8592 * that are associated with this target for a target reset
8534 8593 * and target/lun for abort task set.
8535 8594 * Account for TM requests, which use the last SMID.
8536 8595 */
8537 - for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8596 + for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8538 8597 if ((cmd = slots->m_slot[slot]) == NULL)
8539 8598 continue;
8540 8599 reason = CMD_RESET;
8541 8600 stat = STAT_DEV_RESET;
8542 8601 switch (tasktype) {
8543 8602 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8544 8603 if (Tgt(cmd) == target) {
8545 8604 NDBG25(("mptsas_flush_target discovered non-"
8546 8605 "NULL cmd in slot %d, tasktype 0x%x", slot,
8547 8606 tasktype));
8548 8607 mptsas_dump_cmd(mpt, cmd);
8549 8608 mptsas_remove_cmd(mpt, cmd);
8550 8609 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8551 8610 mptsas_doneq_add(mpt, cmd);
8552 8611 }
8553 8612 break;
8554 8613 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8555 8614 reason = CMD_ABORTED;
8556 8615 stat = STAT_ABORTED;
8557 8616 /*FALLTHROUGH*/
8558 8617 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8559 8618 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8560 8619
8561 8620 NDBG25(("mptsas_flush_target discovered non-"
8562 8621 "NULL cmd in slot %d, tasktype 0x%x", slot,
8563 8622 tasktype));
8564 8623 mptsas_dump_cmd(mpt, cmd);
8565 8624 mptsas_remove_cmd(mpt, cmd);
8566 8625 mptsas_set_pkt_reason(mpt, cmd, reason,
8567 8626 stat);
8568 8627 mptsas_doneq_add(mpt, cmd);
8569 8628 }
8570 8629 break;
8571 8630 default:
8572 8631 break;
8573 8632 }
8574 8633 }
8575 8634
8576 8635 /*
8577 8636 * Flush the waitq and tx_waitq of this target's cmds
8578 8637 */
8579 8638 cmd = mpt->m_waitq;
8580 8639
8581 8640 reason = CMD_RESET;
8582 8641 stat = STAT_DEV_RESET;
8583 8642
8584 8643 switch (tasktype) {
8585 8644 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8586 8645 while (cmd != NULL) {
8587 8646 next_cmd = cmd->cmd_linkp;
8588 8647 if (Tgt(cmd) == target) {
8589 8648 mptsas_waitq_delete(mpt, cmd);
8590 8649 mptsas_set_pkt_reason(mpt, cmd,
8591 8650 reason, stat);
8592 8651 mptsas_doneq_add(mpt, cmd);
8593 8652 }
8594 8653 cmd = next_cmd;
8595 8654 }
8596 8655 mutex_enter(&mpt->m_tx_waitq_mutex);
8597 8656 cmd = mpt->m_tx_waitq;
8598 8657 while (cmd != NULL) {
8599 8658 next_cmd = cmd->cmd_linkp;
8600 8659 if (Tgt(cmd) == target) {
8601 8660 mptsas_tx_waitq_delete(mpt, cmd);
8602 8661 mutex_exit(&mpt->m_tx_waitq_mutex);
8603 8662 mptsas_set_pkt_reason(mpt, cmd,
8604 8663 reason, stat);
8605 8664 mptsas_doneq_add(mpt, cmd);
8606 8665 mutex_enter(&mpt->m_tx_waitq_mutex);
8607 8666 }
8608 8667 cmd = next_cmd;
8609 8668 }
8610 8669 mutex_exit(&mpt->m_tx_waitq_mutex);
8611 8670 break;
8612 8671 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8613 8672 reason = CMD_ABORTED;
8614 8673 stat = STAT_ABORTED;
8615 8674 /*FALLTHROUGH*/
8616 8675 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8617 8676 while (cmd != NULL) {
8618 8677 next_cmd = cmd->cmd_linkp;
8619 8678 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8620 8679 mptsas_waitq_delete(mpt, cmd);
8621 8680 mptsas_set_pkt_reason(mpt, cmd,
8622 8681 reason, stat);
8623 8682 mptsas_doneq_add(mpt, cmd);
8624 8683 }
8625 8684 cmd = next_cmd;
8626 8685 }
8627 8686 mutex_enter(&mpt->m_tx_waitq_mutex);
8628 8687 cmd = mpt->m_tx_waitq;
8629 8688 while (cmd != NULL) {
8630 8689 next_cmd = cmd->cmd_linkp;
8631 8690 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8632 8691 mptsas_tx_waitq_delete(mpt, cmd);
8633 8692 mutex_exit(&mpt->m_tx_waitq_mutex);
8634 8693 mptsas_set_pkt_reason(mpt, cmd,
8635 8694 reason, stat);
8636 8695 mptsas_doneq_add(mpt, cmd);
8637 8696 mutex_enter(&mpt->m_tx_waitq_mutex);
8638 8697 }
8639 8698 cmd = next_cmd;
8640 8699 }
8641 8700 mutex_exit(&mpt->m_tx_waitq_mutex);
8642 8701 break;
8643 8702 default:
8644 8703 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8645 8704 tasktype);
8646 8705 break;
8647 8706 }
8648 8707 }
8649 8708
8650 8709 /*
8651 8710 * Clean up hba state, abort all outstanding command and commands in waitq
8652 8711 * reset timeout of all targets.
8653 8712 */
8654 8713 static void
8655 8714 mptsas_flush_hba(mptsas_t *mpt)
8656 8715 {
8657 8716 mptsas_slots_t *slots = mpt->m_active;
8658 8717 mptsas_cmd_t *cmd;
|
↓ open down ↓ |
111 lines elided |
↑ open up ↑ |
8659 8718 int slot;
8660 8719
8661 8720 NDBG25(("mptsas_flush_hba"));
8662 8721
8663 8722 /*
8664 8723 * The I/O Controller should have already sent back
8665 8724 * all commands via the scsi I/O reply frame. Make
8666 8725 * sure all commands have been flushed.
8667 8726 * Account for TM request, which use the last SMID.
8668 8727 */
8669 - for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8728 + for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8670 8729 if ((cmd = slots->m_slot[slot]) == NULL)
8671 8730 continue;
8672 8731
8673 8732 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8674 8733 /*
8675 8734 * Need to make sure to tell everyone that might be
8676 8735 * waiting on this command that it's going to fail. If
8677 8736 * we get here, this command will never timeout because
8678 8737 * the active command table is going to be re-allocated,
8679 8738 * so there will be nothing to check against a time out.
8680 8739 * Instead, mark the command as failed due to reset.
8681 8740 */
8682 8741 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8683 8742 STAT_BUS_RESET);
8684 8743 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8685 8744 (cmd->cmd_flags & CFLAG_CONFIG) ||
8686 8745 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8687 8746 cmd->cmd_flags |= CFLAG_FINISHED;
8688 8747 cv_broadcast(&mpt->m_passthru_cv);
8689 8748 cv_broadcast(&mpt->m_config_cv);
8690 8749 cv_broadcast(&mpt->m_fw_diag_cv);
8691 8750 }
8692 8751 continue;
8693 8752 }
8694 8753
8695 8754 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8696 8755 slot));
8697 8756 mptsas_dump_cmd(mpt, cmd);
8698 8757
8699 8758 mptsas_remove_cmd(mpt, cmd);
8700 8759 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8701 8760 mptsas_doneq_add(mpt, cmd);
8702 8761 }
8703 8762
8704 8763 /*
8705 8764 * Flush the waitq.
8706 8765 */
8707 8766 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8708 8767 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8709 8768 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8710 8769 (cmd->cmd_flags & CFLAG_CONFIG) ||
8711 8770 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8712 8771 cmd->cmd_flags |= CFLAG_FINISHED;
8713 8772 cv_broadcast(&mpt->m_passthru_cv);
8714 8773 cv_broadcast(&mpt->m_config_cv);
8715 8774 cv_broadcast(&mpt->m_fw_diag_cv);
8716 8775 } else {
8717 8776 mptsas_doneq_add(mpt, cmd);
8718 8777 }
8719 8778 }
8720 8779
8721 8780 /*
8722 8781 * Flush the tx_waitq
8723 8782 */
8724 8783 mutex_enter(&mpt->m_tx_waitq_mutex);
8725 8784 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8726 8785 mutex_exit(&mpt->m_tx_waitq_mutex);
8727 8786 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8728 8787 mptsas_doneq_add(mpt, cmd);
8729 8788 mutex_enter(&mpt->m_tx_waitq_mutex);
8730 8789 }
8731 8790 mutex_exit(&mpt->m_tx_waitq_mutex);
8732 8791
8733 8792 /*
8734 8793 * Drain the taskqs prior to reallocating resources.
8735 8794 */
8736 8795 mutex_exit(&mpt->m_mutex);
8737 8796 ddi_taskq_wait(mpt->m_event_taskq);
8738 8797 ddi_taskq_wait(mpt->m_dr_taskq);
8739 8798 mutex_enter(&mpt->m_mutex);
8740 8799 }
8741 8800
8742 8801 /*
8743 8802 * set pkt_reason and OR in pkt_statistics flag
8744 8803 */
8745 8804 static void
8746 8805 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8747 8806 uint_t stat)
8748 8807 {
8749 8808 #ifndef __lock_lint
8750 8809 _NOTE(ARGUNUSED(mpt))
8751 8810 #endif
8752 8811
8753 8812 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8754 8813 (void *)cmd, reason, stat));
8755 8814
8756 8815 if (cmd) {
8757 8816 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8758 8817 cmd->cmd_pkt->pkt_reason = reason;
8759 8818 }
8760 8819 cmd->cmd_pkt->pkt_statistics |= stat;
8761 8820 }
8762 8821 }
8763 8822
8764 8823 static void
8765 8824 mptsas_start_watch_reset_delay()
8766 8825 {
8767 8826 NDBG22(("mptsas_start_watch_reset_delay"));
8768 8827
8769 8828 mutex_enter(&mptsas_global_mutex);
8770 8829 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8771 8830 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8772 8831 drv_usectohz((clock_t)
8773 8832 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
8774 8833 ASSERT(mptsas_reset_watch != NULL);
8775 8834 }
8776 8835 mutex_exit(&mptsas_global_mutex);
8777 8836 }
8778 8837
8779 8838 static void
8780 8839 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8781 8840 {
8782 8841 mptsas_target_t *ptgt = NULL;
8783 8842
8843 + ASSERT(MUTEX_HELD(&mpt->m_mutex));
8844 +
8784 8845 NDBG22(("mptsas_setup_bus_reset_delay"));
8785 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8786 - MPTSAS_HASH_FIRST);
8787 - while (ptgt != NULL) {
8846 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8847 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
8788 8848 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8789 8849 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8790 -
8791 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8792 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8793 8850 }
8794 8851
8795 8852 mptsas_start_watch_reset_delay();
8796 8853 }
8797 8854
8798 8855 /*
8799 8856 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8800 8857 * mpt instance for active reset delays
8801 8858 */
8802 8859 static void
8803 8860 mptsas_watch_reset_delay(void *arg)
8804 8861 {
8805 8862 #ifndef __lock_lint
8806 8863 _NOTE(ARGUNUSED(arg))
8807 8864 #endif
8808 8865
8809 8866 mptsas_t *mpt;
8810 8867 int not_done = 0;
8811 8868
8812 8869 NDBG22(("mptsas_watch_reset_delay"));
8813 8870
8814 8871 mutex_enter(&mptsas_global_mutex);
8815 8872 mptsas_reset_watch = 0;
8816 8873 mutex_exit(&mptsas_global_mutex);
8817 8874 rw_enter(&mptsas_global_rwlock, RW_READER);
8818 8875 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8819 8876 if (mpt->m_tran == 0) {
8820 8877 continue;
8821 8878 }
8822 8879 mutex_enter(&mpt->m_mutex);
8823 8880 not_done += mptsas_watch_reset_delay_subr(mpt);
8824 8881 mutex_exit(&mpt->m_mutex);
8825 8882 }
8826 8883 rw_exit(&mptsas_global_rwlock);
8827 8884
8828 8885 if (not_done) {
8829 8886 mptsas_start_watch_reset_delay();
8830 8887 }
8831 8888 }
8832 8889
8833 8890 static int
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
8834 8891 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8835 8892 {
8836 8893 int done = 0;
8837 8894 int restart = 0;
8838 8895 mptsas_target_t *ptgt = NULL;
8839 8896
8840 8897 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8841 8898
8842 8899 ASSERT(mutex_owned(&mpt->m_mutex));
8843 8900
8844 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8845 - MPTSAS_HASH_FIRST);
8846 - while (ptgt != NULL) {
8901 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8902 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
8847 8903 if (ptgt->m_reset_delay != 0) {
8848 8904 ptgt->m_reset_delay -=
8849 8905 MPTSAS_WATCH_RESET_DELAY_TICK;
8850 8906 if (ptgt->m_reset_delay <= 0) {
8851 8907 ptgt->m_reset_delay = 0;
8852 8908 mptsas_set_throttle(mpt, ptgt,
8853 8909 MAX_THROTTLE);
8854 8910 restart++;
8855 8911 } else {
8856 8912 done = -1;
8857 8913 }
8858 8914 }
8859 -
8860 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8861 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8862 8915 }
8863 8916
8864 8917 if (restart > 0) {
8865 8918 mptsas_restart_hba(mpt);
8866 8919 }
8867 8920 return (done);
8868 8921 }
8869 8922
8870 8923 #ifdef MPTSAS_TEST
8871 8924 static void
8872 8925 mptsas_test_reset(mptsas_t *mpt, int target)
8873 8926 {
8874 8927 mptsas_target_t *ptgt = NULL;
8875 8928
8876 8929 if (mptsas_rtest == target) {
8877 8930 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8878 8931 mptsas_rtest = -1;
8879 8932 }
8880 8933 if (mptsas_rtest == -1) {
8881 8934 NDBG22(("mptsas_test_reset success"));
8882 8935 }
8883 8936 }
8884 8937 }
8885 8938 #endif
8886 8939
8887 8940 /*
8888 8941 * abort handling:
8889 8942 *
8890 8943 * Notes:
8891 8944 * - if pkt is not NULL, abort just that command
8892 8945 * - if pkt is NULL, abort all outstanding commands for target
8893 8946 */
8894 8947 static int
8895 8948 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8896 8949 {
8897 8950 mptsas_t *mpt = ADDR2MPT(ap);
8898 8951 int rval;
8899 8952 mptsas_tgt_private_t *tgt_private;
8900 8953 int target, lun;
8901 8954
8902 8955 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8903 8956 tran_tgt_private;
8904 8957 ASSERT(tgt_private != NULL);
8905 8958 target = tgt_private->t_private->m_devhdl;
8906 8959 lun = tgt_private->t_lun;
8907 8960
8908 8961 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8909 8962
8910 8963 mutex_enter(&mpt->m_mutex);
8911 8964 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8912 8965 mutex_exit(&mpt->m_mutex);
8913 8966 return (rval);
8914 8967 }
8915 8968
8916 8969 static int
8917 8970 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8918 8971 {
8919 8972 mptsas_cmd_t *sp = NULL;
8920 8973 mptsas_slots_t *slots = mpt->m_active;
8921 8974 int rval = FALSE;
8922 8975
8923 8976 ASSERT(mutex_owned(&mpt->m_mutex));
8924 8977
8925 8978 /*
8926 8979 * Abort the command pkt on the target/lun in ap. If pkt is
8927 8980 * NULL, abort all outstanding commands on that target/lun.
8928 8981 * If you can abort them, return 1, else return 0.
8929 8982 * Each packet that's aborted should be sent back to the target
8930 8983 * driver through the callback routine, with pkt_reason set to
8931 8984 * CMD_ABORTED.
8932 8985 *
8933 8986 * abort cmd pkt on HBA hardware; clean out of outstanding
8934 8987 * command lists, etc.
8935 8988 */
8936 8989 if (pkt != NULL) {
8937 8990 /* abort the specified packet */
8938 8991 sp = PKT2CMD(pkt);
8939 8992
8940 8993 if (sp->cmd_queued) {
8941 8994 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8942 8995 (void *)sp));
8943 8996 mptsas_waitq_delete(mpt, sp);
8944 8997 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8945 8998 STAT_ABORTED);
8946 8999 mptsas_doneq_add(mpt, sp);
8947 9000 rval = TRUE;
8948 9001 goto done;
8949 9002 }
8950 9003
8951 9004 /*
8952 9005 * Have mpt firmware abort this command
8953 9006 */
8954 9007
8955 9008 if (slots->m_slot[sp->cmd_slot] != NULL) {
8956 9009 rval = mptsas_ioc_task_management(mpt,
8957 9010 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8958 9011 lun, NULL, 0, 0);
8959 9012
8960 9013 /*
8961 9014 * The transport layer expects only TRUE and FALSE.
8962 9015 * Therefore, if mptsas_ioc_task_management returns
8963 9016 * FAILED we will return FALSE.
8964 9017 */
8965 9018 if (rval == FAILED)
8966 9019 rval = FALSE;
8967 9020 goto done;
8968 9021 }
8969 9022 }
8970 9023
8971 9024 /*
8972 9025 * If pkt is NULL then abort task set
8973 9026 */
8974 9027 rval = mptsas_ioc_task_management(mpt,
8975 9028 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8976 9029
8977 9030 /*
8978 9031 * The transport layer expects only TRUE and FALSE.
8979 9032 * Therefore, if mptsas_ioc_task_management returns
8980 9033 * FAILED we will return FALSE.
8981 9034 */
8982 9035 if (rval == FAILED)
8983 9036 rval = FALSE;
8984 9037
8985 9038 #ifdef MPTSAS_TEST
8986 9039 if (rval && mptsas_test_stop) {
8987 9040 debug_enter("mptsas_do_scsi_abort");
8988 9041 }
8989 9042 #endif
8990 9043
8991 9044 done:
8992 9045 mptsas_doneq_empty(mpt);
8993 9046 return (rval);
8994 9047 }
8995 9048
8996 9049 /*
8997 9050 * capability handling:
8998 9051 * (*tran_getcap). Get the capability named, and return its value.
8999 9052 */
9000 9053 static int
9001 9054 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9002 9055 {
9003 9056 mptsas_t *mpt = ADDR2MPT(ap);
9004 9057 int ckey;
9005 9058 int rval = FALSE;
9006 9059
9007 9060 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9008 9061 ap->a_target, cap, tgtonly));
9009 9062
9010 9063 mutex_enter(&mpt->m_mutex);
9011 9064
9012 9065 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9013 9066 mutex_exit(&mpt->m_mutex);
9014 9067 return (UNDEFINED);
9015 9068 }
9016 9069
9017 9070 switch (ckey) {
9018 9071 case SCSI_CAP_DMA_MAX:
9019 9072 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9020 9073 break;
9021 9074 case SCSI_CAP_ARQ:
9022 9075 rval = TRUE;
9023 9076 break;
9024 9077 case SCSI_CAP_MSG_OUT:
9025 9078 case SCSI_CAP_PARITY:
9026 9079 case SCSI_CAP_UNTAGGED_QING:
9027 9080 rval = TRUE;
9028 9081 break;
9029 9082 case SCSI_CAP_TAGGED_QING:
9030 9083 rval = TRUE;
9031 9084 break;
9032 9085 case SCSI_CAP_RESET_NOTIFICATION:
9033 9086 rval = TRUE;
9034 9087 break;
9035 9088 case SCSI_CAP_LINKED_CMDS:
9036 9089 rval = FALSE;
9037 9090 break;
9038 9091 case SCSI_CAP_QFULL_RETRIES:
9039 9092 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9040 9093 tran_tgt_private))->t_private->m_qfull_retries;
9041 9094 break;
9042 9095 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9043 9096 rval = drv_hztousec(((mptsas_tgt_private_t *)
9044 9097 (ap->a_hba_tran->tran_tgt_private))->
9045 9098 t_private->m_qfull_retry_interval) / 1000;
9046 9099 break;
9047 9100 case SCSI_CAP_CDB_LEN:
9048 9101 rval = CDB_GROUP4;
9049 9102 break;
9050 9103 case SCSI_CAP_INTERCONNECT_TYPE:
9051 9104 rval = INTERCONNECT_SAS;
9052 9105 break;
9053 9106 case SCSI_CAP_TRAN_LAYER_RETRIES:
9054 9107 if (mpt->m_ioc_capabilities &
9055 9108 MPI2_IOCFACTS_CAPABILITY_TLR)
9056 9109 rval = TRUE;
9057 9110 else
9058 9111 rval = FALSE;
9059 9112 break;
9060 9113 default:
9061 9114 rval = UNDEFINED;
9062 9115 break;
9063 9116 }
9064 9117
9065 9118 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9066 9119
9067 9120 mutex_exit(&mpt->m_mutex);
9068 9121 return (rval);
9069 9122 }
9070 9123
9071 9124 /*
9072 9125 * (*tran_setcap). Set the capability named to the value given.
9073 9126 */
9074 9127 static int
9075 9128 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9076 9129 {
9077 9130 mptsas_t *mpt = ADDR2MPT(ap);
9078 9131 int ckey;
9079 9132 int rval = FALSE;
9080 9133
9081 9134 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9082 9135 ap->a_target, cap, value, tgtonly));
9083 9136
9084 9137 if (!tgtonly) {
9085 9138 return (rval);
9086 9139 }
9087 9140
9088 9141 mutex_enter(&mpt->m_mutex);
9089 9142
9090 9143 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9091 9144 mutex_exit(&mpt->m_mutex);
9092 9145 return (UNDEFINED);
9093 9146 }
9094 9147
9095 9148 switch (ckey) {
9096 9149 case SCSI_CAP_DMA_MAX:
9097 9150 case SCSI_CAP_MSG_OUT:
9098 9151 case SCSI_CAP_PARITY:
9099 9152 case SCSI_CAP_INITIATOR_ID:
9100 9153 case SCSI_CAP_LINKED_CMDS:
9101 9154 case SCSI_CAP_UNTAGGED_QING:
9102 9155 case SCSI_CAP_RESET_NOTIFICATION:
9103 9156 /*
9104 9157 * None of these are settable via
9105 9158 * the capability interface.
9106 9159 */
9107 9160 break;
9108 9161 case SCSI_CAP_ARQ:
9109 9162 /*
9110 9163 * We cannot turn off arq so return false if asked to
9111 9164 */
9112 9165 if (value) {
9113 9166 rval = TRUE;
9114 9167 } else {
9115 9168 rval = FALSE;
9116 9169 }
9117 9170 break;
9118 9171 case SCSI_CAP_TAGGED_QING:
9119 9172 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9120 9173 (ap->a_hba_tran->tran_tgt_private))->t_private,
9121 9174 MAX_THROTTLE);
9122 9175 rval = TRUE;
9123 9176 break;
9124 9177 case SCSI_CAP_QFULL_RETRIES:
9125 9178 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9126 9179 t_private->m_qfull_retries = (uchar_t)value;
9127 9180 rval = TRUE;
9128 9181 break;
9129 9182 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9130 9183 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9131 9184 t_private->m_qfull_retry_interval =
9132 9185 drv_usectohz(value * 1000);
9133 9186 rval = TRUE;
9134 9187 break;
9135 9188 default:
9136 9189 rval = UNDEFINED;
9137 9190 break;
9138 9191 }
9139 9192 mutex_exit(&mpt->m_mutex);
9140 9193 return (rval);
9141 9194 }
9142 9195
9143 9196 /*
9144 9197 * Utility routine for mptsas_ifsetcap/ifgetcap
9145 9198 */
9146 9199 /*ARGSUSED*/
9147 9200 static int
9148 9201 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9149 9202 {
9150 9203 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9151 9204
9152 9205 if (!cap)
9153 9206 return (FALSE);
9154 9207
|
↓ open down ↓ |
283 lines elided |
↑ open up ↑ |
9155 9208 *cidxp = scsi_hba_lookup_capstr(cap);
9156 9209 return (TRUE);
9157 9210 }
9158 9211
9159 9212 static int
9160 9213 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9161 9214 {
9162 9215 mptsas_slots_t *old_active = mpt->m_active;
9163 9216 mptsas_slots_t *new_active;
9164 9217 size_t size;
9165 - int rval = -1, i;
9166 9218
9167 9219 /*
9168 9220 * if there are active commands, then we cannot
9169 9221 * change size of active slots array.
9170 9222 */
9171 9223 ASSERT(mpt->m_ncmds == 0);
9172 9224
9173 9225 size = MPTSAS_SLOTS_SIZE(mpt);
9174 9226 new_active = kmem_zalloc(size, flag);
9175 9227 if (new_active == NULL) {
9176 9228 NDBG1(("new active alloc failed"));
9177 - return (rval);
9229 + return (-1);
9178 9230 }
9179 9231 /*
9180 9232 * Since SMID 0 is reserved and the TM slot is reserved, the
9181 9233 * number of slots that can be used at any one time is
9182 9234 * m_max_requests - 2.
9183 9235 */
9184 - new_active->m_n_slots = (mpt->m_max_requests - 2);
9236 + new_active->m_n_normal = (mpt->m_max_requests - 2);
9185 9237 new_active->m_size = size;
9186 - new_active->m_tags = 1;
9187 - if (old_active) {
9188 - new_active->m_tgttbl = old_active->m_tgttbl;
9189 - new_active->m_smptbl = old_active->m_smptbl;
9190 - new_active->m_num_raid_configs =
9191 - old_active->m_num_raid_configs;
9192 - for (i = 0; i < new_active->m_num_raid_configs; i++) {
9193 - new_active->m_raidconfig[i] =
9194 - old_active->m_raidconfig[i];
9195 - }
9238 + new_active->m_rotor = 1;
9239 + if (old_active)
9196 9240 mptsas_free_active_slots(mpt);
9197 - }
9198 9241 mpt->m_active = new_active;
9199 - rval = 0;
9200 9242
9201 - return (rval);
9243 + return (0);
9202 9244 }
9203 9245
9204 9246 static void
9205 9247 mptsas_free_active_slots(mptsas_t *mpt)
9206 9248 {
9207 9249 mptsas_slots_t *active = mpt->m_active;
9208 9250 size_t size;
9209 9251
9210 9252 if (active == NULL)
9211 9253 return;
9212 9254 size = active->m_size;
9213 9255 kmem_free(active, size);
9214 9256 mpt->m_active = NULL;
9215 9257 }
9216 9258
9217 9259 /*
9218 9260 * Error logging, printing, and debug print routines.
9219 9261 */
9220 9262 static char *mptsas_label = "mpt_sas";
9221 9263
9222 9264 /*PRINTFLIKE3*/
9223 9265 void
9224 9266 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9225 9267 {
9226 9268 dev_info_t *dev;
9227 9269 va_list ap;
9228 9270
9229 9271 if (mpt) {
9230 9272 dev = mpt->m_dip;
9231 9273 } else {
9232 9274 dev = 0;
9233 9275 }
9234 9276
9235 9277 mutex_enter(&mptsas_log_mutex);
9236 9278
9237 9279 va_start(ap, fmt);
9238 9280 (void) vsprintf(mptsas_log_buf, fmt, ap);
9239 9281 va_end(ap);
9240 9282
9241 9283 if (level == CE_CONT) {
9242 9284 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9243 9285 } else {
9244 9286 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9245 9287 }
9246 9288
9247 9289 mutex_exit(&mptsas_log_mutex);
9248 9290 }
9249 9291
9250 9292 #ifdef MPTSAS_DEBUG
9251 9293 /*PRINTFLIKE1*/
9252 9294 void
9253 9295 mptsas_printf(char *fmt, ...)
9254 9296 {
9255 9297 dev_info_t *dev = 0;
9256 9298 va_list ap;
9257 9299
9258 9300 mutex_enter(&mptsas_log_mutex);
9259 9301
9260 9302 va_start(ap, fmt);
9261 9303 (void) vsprintf(mptsas_log_buf, fmt, ap);
9262 9304 va_end(ap);
9263 9305
9264 9306 #ifdef PROM_PRINTF
9265 9307 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9266 9308 #else
9267 9309 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9268 9310 #endif
9269 9311 mutex_exit(&mptsas_log_mutex);
9270 9312 }
9271 9313 #endif
9272 9314
9273 9315 /*
9274 9316 * timeout handling
9275 9317 */
9276 9318 static void
9277 9319 mptsas_watch(void *arg)
9278 9320 {
9279 9321 #ifndef __lock_lint
9280 9322 _NOTE(ARGUNUSED(arg))
9281 9323 #endif
9282 9324
9283 9325 mptsas_t *mpt;
9284 9326 uint32_t doorbell;
9285 9327
9286 9328 NDBG30(("mptsas_watch"));
9287 9329
9288 9330 rw_enter(&mptsas_global_rwlock, RW_READER);
9289 9331 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9290 9332
9291 9333 mutex_enter(&mpt->m_mutex);
9292 9334
9293 9335 /* Skip device if not powered on */
9294 9336 if (mpt->m_options & MPTSAS_OPT_PM) {
9295 9337 if (mpt->m_power_level == PM_LEVEL_D0) {
9296 9338 (void) pm_busy_component(mpt->m_dip, 0);
9297 9339 mpt->m_busy = 1;
9298 9340 } else {
9299 9341 mutex_exit(&mpt->m_mutex);
9300 9342 continue;
9301 9343 }
9302 9344 }
9303 9345
9304 9346 /*
9305 9347 * Check if controller is in a FAULT state. If so, reset it.
9306 9348 */
9307 9349 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9308 9350 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9309 9351 doorbell &= MPI2_DOORBELL_DATA_MASK;
9310 9352 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9311 9353 "code: %04x", doorbell);
9312 9354 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9313 9355 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9314 9356 mptsas_log(mpt, CE_WARN, "Reset failed"
9315 9357 "after fault was detected");
9316 9358 }
9317 9359 }
9318 9360
9319 9361 /*
9320 9362 * For now, always call mptsas_watchsubr.
9321 9363 */
9322 9364 mptsas_watchsubr(mpt);
9323 9365
9324 9366 if (mpt->m_options & MPTSAS_OPT_PM) {
9325 9367 mpt->m_busy = 0;
9326 9368 (void) pm_idle_component(mpt->m_dip, 0);
9327 9369 }
9328 9370
9329 9371 mutex_exit(&mpt->m_mutex);
9330 9372 }
9331 9373 rw_exit(&mptsas_global_rwlock);
9332 9374
9333 9375 mutex_enter(&mptsas_global_mutex);
9334 9376 if (mptsas_timeouts_enabled)
9335 9377 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
|
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
9336 9378 mutex_exit(&mptsas_global_mutex);
9337 9379 }
9338 9380
9339 9381 static void
9340 9382 mptsas_watchsubr(mptsas_t *mpt)
9341 9383 {
9342 9384 int i;
9343 9385 mptsas_cmd_t *cmd;
9344 9386 mptsas_target_t *ptgt = NULL;
9345 9387
9388 + ASSERT(MUTEX_HELD(&mpt->m_mutex));
9389 +
9346 9390 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9347 9391
9348 9392 #ifdef MPTSAS_TEST
9349 9393 if (mptsas_enable_untagged) {
9350 9394 mptsas_test_untagged++;
9351 9395 }
9352 9396 #endif
9353 9397
9354 9398 /*
9355 9399 * Check for commands stuck in active slot
9356 9400 * Account for TM requests, which use the last SMID.
9357 9401 */
9358 - for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9402 + for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9359 9403 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9360 9404 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9361 9405 cmd->cmd_active_timeout -=
9362 9406 mptsas_scsi_watchdog_tick;
9363 9407 if (cmd->cmd_active_timeout <= 0) {
9364 9408 /*
9365 9409 * There seems to be a command stuck
9366 9410 * in the active slot. Drain throttle.
9367 9411 */
9368 9412 mptsas_set_throttle(mpt,
9369 9413 cmd->cmd_tgt_addr,
9370 9414 DRAIN_THROTTLE);
9371 9415 }
9372 9416 }
9373 9417 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9374 9418 (cmd->cmd_flags & CFLAG_CONFIG) ||
9375 9419 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9376 9420 cmd->cmd_active_timeout -=
9377 9421 mptsas_scsi_watchdog_tick;
9378 9422 if (cmd->cmd_active_timeout <= 0) {
9379 9423 /*
9380 9424 * passthrough command timeout
9381 9425 */
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
9382 9426 cmd->cmd_flags |= (CFLAG_FINISHED |
9383 9427 CFLAG_TIMEOUT);
9384 9428 cv_broadcast(&mpt->m_passthru_cv);
9385 9429 cv_broadcast(&mpt->m_config_cv);
9386 9430 cv_broadcast(&mpt->m_fw_diag_cv);
9387 9431 }
9388 9432 }
9389 9433 }
9390 9434 }
9391 9435
9392 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9393 - MPTSAS_HASH_FIRST);
9394 - while (ptgt != NULL) {
9436 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9437 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
9395 9438 /*
9396 9439 * If we were draining due to a qfull condition,
9397 9440 * go back to full throttle.
9398 9441 */
9399 9442 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9400 9443 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9401 9444 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9402 9445 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9403 9446 mptsas_restart_hba(mpt);
9404 9447 }
9405 9448
9406 9449 if ((ptgt->m_t_ncmds > 0) &&
9407 9450 (ptgt->m_timebase)) {
9408 9451
9409 9452 if (ptgt->m_timebase <=
9410 9453 mptsas_scsi_watchdog_tick) {
9411 9454 ptgt->m_timebase +=
9412 9455 mptsas_scsi_watchdog_tick;
9413 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9414 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9415 9456 continue;
9416 9457 }
9417 9458
9418 9459 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9419 9460
9420 9461 if (ptgt->m_timeout < 0) {
9421 9462 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9422 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9423 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9424 9463 continue;
9425 9464 }
9426 9465
9427 9466 if ((ptgt->m_timeout) <=
9428 9467 mptsas_scsi_watchdog_tick) {
9429 9468 NDBG23(("pending timeout"));
9430 9469 mptsas_set_throttle(mpt, ptgt,
9431 9470 DRAIN_THROTTLE);
9432 9471 }
9433 9472 }
9434 -
9435 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9436 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9437 9473 }
9438 9474 }
9439 9475
9440 9476 /*
9441 9477 * timeout recovery
9442 9478 */
9443 9479 static void
9444 9480 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9445 9481 {
9446 9482
9447 9483 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9448 9484 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9449 9485 "Target %d", devhdl);
9450 9486
9451 9487 /*
9452 9488 * If the current target is not the target passed in,
9453 9489 * try to reset that target.
9454 9490 */
9455 9491 NDBG29(("mptsas_cmd_timeout: device reset"));
9456 9492 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9457 9493 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9458 9494 "recovery failed!", devhdl);
9459 9495 }
9460 9496 }
9461 9497
9462 9498 /*
9463 9499 * Device / Hotplug control
9464 9500 */
9465 9501 static int
9466 9502 mptsas_scsi_quiesce(dev_info_t *dip)
9467 9503 {
9468 9504 mptsas_t *mpt;
9469 9505 scsi_hba_tran_t *tran;
9470 9506
9471 9507 tran = ddi_get_driver_private(dip);
9472 9508 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9473 9509 return (-1);
9474 9510
9475 9511 return (mptsas_quiesce_bus(mpt));
9476 9512 }
9477 9513
9478 9514 static int
9479 9515 mptsas_scsi_unquiesce(dev_info_t *dip)
9480 9516 {
9481 9517 mptsas_t *mpt;
9482 9518 scsi_hba_tran_t *tran;
9483 9519
9484 9520 tran = ddi_get_driver_private(dip);
9485 9521 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9486 9522 return (-1);
9487 9523
9488 9524 return (mptsas_unquiesce_bus(mpt));
9489 9525 }
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
9490 9526
9491 9527 static int
9492 9528 mptsas_quiesce_bus(mptsas_t *mpt)
9493 9529 {
9494 9530 mptsas_target_t *ptgt = NULL;
9495 9531
9496 9532 NDBG28(("mptsas_quiesce_bus"));
9497 9533 mutex_enter(&mpt->m_mutex);
9498 9534
9499 9535 /* Set all the throttles to zero */
9500 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9501 - MPTSAS_HASH_FIRST);
9502 - while (ptgt != NULL) {
9536 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9537 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
9503 9538 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9504 -
9505 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9506 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9507 9539 }
9508 9540
9509 9541 /* If there are any outstanding commands in the queue */
9510 9542 if (mpt->m_ncmds) {
9511 9543 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9512 9544 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9513 9545 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9514 9546 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9515 9547 /*
9516 9548 * Quiesce has been interrupted
9517 9549 */
9518 9550 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9519 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9520 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9521 - while (ptgt != NULL) {
9551 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9552 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
9522 9553 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9523 -
9524 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9525 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9526 9554 }
9527 9555 mptsas_restart_hba(mpt);
9528 9556 if (mpt->m_quiesce_timeid != 0) {
9529 9557 timeout_id_t tid = mpt->m_quiesce_timeid;
9530 9558 mpt->m_quiesce_timeid = 0;
9531 9559 mutex_exit(&mpt->m_mutex);
9532 9560 (void) untimeout(tid);
9533 9561 return (-1);
9534 9562 }
9535 9563 mutex_exit(&mpt->m_mutex);
9536 9564 return (-1);
9537 9565 } else {
9538 9566 /* Bus has been quiesced */
9539 9567 ASSERT(mpt->m_quiesce_timeid == 0);
9540 9568 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9541 9569 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9542 9570 mutex_exit(&mpt->m_mutex);
9543 9571 return (0);
9544 9572 }
9545 9573 }
9546 9574 /* Bus was not busy - QUIESCED */
9547 9575 mutex_exit(&mpt->m_mutex);
9548 9576
9549 9577 return (0);
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
9550 9578 }
9551 9579
9552 9580 static int
9553 9581 mptsas_unquiesce_bus(mptsas_t *mpt)
9554 9582 {
9555 9583 mptsas_target_t *ptgt = NULL;
9556 9584
9557 9585 NDBG28(("mptsas_unquiesce_bus"));
9558 9586 mutex_enter(&mpt->m_mutex);
9559 9587 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9560 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9561 - MPTSAS_HASH_FIRST);
9562 - while (ptgt != NULL) {
9588 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9589 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
9563 9590 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9564 -
9565 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9566 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9567 9591 }
9568 9592 mptsas_restart_hba(mpt);
9569 9593 mutex_exit(&mpt->m_mutex);
9570 9594 return (0);
9571 9595 }
9572 9596
9573 9597 static void
9574 9598 mptsas_ncmds_checkdrain(void *arg)
9575 9599 {
9576 9600 mptsas_t *mpt = arg;
9577 9601 mptsas_target_t *ptgt = NULL;
9578 9602
9579 9603 mutex_enter(&mpt->m_mutex);
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
9580 9604 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9581 9605 mpt->m_quiesce_timeid = 0;
9582 9606 if (mpt->m_ncmds == 0) {
9583 9607 /* Command queue has been drained */
9584 9608 cv_signal(&mpt->m_cv);
9585 9609 } else {
9586 9610 /*
9587 9611 * The throttle may have been reset because
9588 9612 * of a SCSI bus reset
9589 9613 */
9590 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9591 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9592 - while (ptgt != NULL) {
9614 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9615 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
9593 9616 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9594 -
9595 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9596 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9597 9617 }
9598 9618
9599 9619 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9600 9620 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9601 9621 drv_usectohz(1000000)));
9602 9622 }
9603 9623 }
9604 9624 mutex_exit(&mpt->m_mutex);
9605 9625 }
9606 9626
9607 9627 /*ARGSUSED*/
9608 9628 static void
9609 9629 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9610 9630 {
9611 9631 int i;
9612 9632 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9613 9633 char buf[128];
9614 9634
9615 9635 buf[0] = '\0';
9616 9636 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9617 9637 Tgt(cmd), Lun(cmd)));
9618 9638 (void) sprintf(&buf[0], "\tcdb=[");
9619 9639 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9620 9640 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9621 9641 }
9622 9642 (void) sprintf(&buf[strlen(buf)], " ]");
9623 9643 NDBG25(("?%s\n", buf));
9624 9644 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9625 9645 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9626 9646 cmd->cmd_pkt->pkt_state));
9627 9647 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9628 9648 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9629 9649 }
9630 9650
9631 9651 static void
9632 9652 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9633 9653 {
9634 9654 caddr_t memp;
9635 9655 pMPI2RequestHeader_t request_hdrp;
9636 9656 struct scsi_pkt *pkt = cmd->cmd_pkt;
9637 9657 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9638 9658 uint32_t request_size, data_size, dataout_size;
9639 9659 uint32_t direction;
9640 9660 ddi_dma_cookie_t data_cookie;
9641 9661 ddi_dma_cookie_t dataout_cookie;
9642 9662 uint32_t request_desc_low, request_desc_high = 0;
9643 9663 uint32_t i, sense_bufp;
9644 9664 uint8_t desc_type;
9645 9665 uint8_t *request, function;
9646 9666 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9647 9667 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9648 9668
9649 9669 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9650 9670
9651 9671 request = pt->request;
9652 9672 direction = pt->direction;
9653 9673 request_size = pt->request_size;
9654 9674 data_size = pt->data_size;
9655 9675 dataout_size = pt->dataout_size;
9656 9676 data_cookie = pt->data_cookie;
9657 9677 dataout_cookie = pt->dataout_cookie;
9658 9678
9659 9679 /*
9660 9680 * Store the passthrough message in memory location
9661 9681 * corresponding to our slot number
9662 9682 */
9663 9683 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9664 9684 request_hdrp = (pMPI2RequestHeader_t)memp;
9665 9685 bzero(memp, mpt->m_req_frame_size);
9666 9686
9667 9687 for (i = 0; i < request_size; i++) {
9668 9688 bcopy(request + i, memp + i, 1);
9669 9689 }
9670 9690
9671 9691 if (data_size || dataout_size) {
9672 9692 pMpi2SGESimple64_t sgep;
9673 9693 uint32_t sge_flags;
9674 9694
9675 9695 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9676 9696 request_size);
9677 9697 if (dataout_size) {
9678 9698
9679 9699 sge_flags = dataout_size |
9680 9700 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9681 9701 MPI2_SGE_FLAGS_END_OF_BUFFER |
9682 9702 MPI2_SGE_FLAGS_HOST_TO_IOC |
9683 9703 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9684 9704 MPI2_SGE_FLAGS_SHIFT);
9685 9705 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9686 9706 ddi_put32(acc_hdl, &sgep->Address.Low,
9687 9707 (uint32_t)(dataout_cookie.dmac_laddress &
9688 9708 0xffffffffull));
9689 9709 ddi_put32(acc_hdl, &sgep->Address.High,
9690 9710 (uint32_t)(dataout_cookie.dmac_laddress
9691 9711 >> 32));
9692 9712 sgep++;
9693 9713 }
9694 9714 sge_flags = data_size;
9695 9715 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9696 9716 MPI2_SGE_FLAGS_LAST_ELEMENT |
9697 9717 MPI2_SGE_FLAGS_END_OF_BUFFER |
9698 9718 MPI2_SGE_FLAGS_END_OF_LIST |
9699 9719 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9700 9720 MPI2_SGE_FLAGS_SHIFT);
9701 9721 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9702 9722 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9703 9723 MPI2_SGE_FLAGS_SHIFT);
9704 9724 } else {
9705 9725 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9706 9726 MPI2_SGE_FLAGS_SHIFT);
9707 9727 }
9708 9728 ddi_put32(acc_hdl, &sgep->FlagsLength,
9709 9729 sge_flags);
9710 9730 ddi_put32(acc_hdl, &sgep->Address.Low,
9711 9731 (uint32_t)(data_cookie.dmac_laddress &
9712 9732 0xffffffffull));
9713 9733 ddi_put32(acc_hdl, &sgep->Address.High,
9714 9734 (uint32_t)(data_cookie.dmac_laddress >> 32));
9715 9735 }
9716 9736
9717 9737 function = request_hdrp->Function;
9718 9738 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9719 9739 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9720 9740 pMpi2SCSIIORequest_t scsi_io_req;
9721 9741
9722 9742 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9723 9743 /*
9724 9744 * Put SGE for data and data_out buffer at the end of
9725 9745 * scsi_io_request message header.(64 bytes in total)
9726 9746 * Following above SGEs, the residual space will be
9727 9747 * used by sense data.
9728 9748 */
9729 9749 ddi_put8(acc_hdl,
9730 9750 &scsi_io_req->SenseBufferLength,
9731 9751 (uint8_t)(request_size - 64));
9732 9752
9733 9753 sense_bufp = mpt->m_req_frame_dma_addr +
9734 9754 (mpt->m_req_frame_size * cmd->cmd_slot);
9735 9755 sense_bufp += 64;
9736 9756 ddi_put32(acc_hdl,
9737 9757 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9738 9758
9739 9759 /*
9740 9760 * Set SGLOffset0 value
9741 9761 */
9742 9762 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9743 9763 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9744 9764
9745 9765 /*
9746 9766 * Setup descriptor info. RAID passthrough must use the
9747 9767 * default request descriptor which is already set, so if this
9748 9768 * is a SCSI IO request, change the descriptor to SCSI IO.
9749 9769 */
9750 9770 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9751 9771 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9752 9772 request_desc_high = (ddi_get16(acc_hdl,
9753 9773 &scsi_io_req->DevHandle) << 16);
9754 9774 }
9755 9775 }
9756 9776
9757 9777 /*
9758 9778 * We must wait till the message has been completed before
9759 9779 * beginning the next message so we wait for this one to
9760 9780 * finish.
9761 9781 */
9762 9782 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9763 9783 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9764 9784 cmd->cmd_rfm = NULL;
9765 9785 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9766 9786 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9767 9787 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9768 9788 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9769 9789 }
9770 9790 }
9771 9791
9772 9792
9773 9793
9774 9794 static int
9775 9795 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9776 9796 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9777 9797 uint32_t data_size, uint32_t direction, uint8_t *dataout,
9778 9798 uint32_t dataout_size, short timeout, int mode)
9779 9799 {
9780 9800 mptsas_pt_request_t pt;
9781 9801 mptsas_dma_alloc_state_t data_dma_state;
9782 9802 mptsas_dma_alloc_state_t dataout_dma_state;
9783 9803 caddr_t memp;
9784 9804 mptsas_cmd_t *cmd = NULL;
9785 9805 struct scsi_pkt *pkt;
9786 9806 uint32_t reply_len = 0, sense_len = 0;
9787 9807 pMPI2RequestHeader_t request_hdrp;
9788 9808 pMPI2RequestHeader_t request_msg;
9789 9809 pMPI2DefaultReply_t reply_msg;
9790 9810 Mpi2SCSIIOReply_t rep_msg;
9791 9811 int i, status = 0, pt_flags = 0, rv = 0;
9792 9812 int rvalue;
9793 9813 uint8_t function;
9794 9814
9795 9815 ASSERT(mutex_owned(&mpt->m_mutex));
9796 9816
9797 9817 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9798 9818 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9799 9819 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9800 9820
9801 9821 mutex_exit(&mpt->m_mutex);
9802 9822 /*
9803 9823 * copy in the request buffer since it could be used by
9804 9824 * another thread when the pt request into waitq
9805 9825 */
9806 9826 if (ddi_copyin(request, request_msg, request_size, mode)) {
9807 9827 mutex_enter(&mpt->m_mutex);
9808 9828 status = EFAULT;
9809 9829 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9810 9830 goto out;
9811 9831 }
9812 9832 mutex_enter(&mpt->m_mutex);
9813 9833
9814 9834 function = request_msg->Function;
9815 9835 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9816 9836 pMpi2SCSITaskManagementRequest_t task;
9817 9837 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9818 9838 mptsas_setup_bus_reset_delay(mpt);
9819 9839 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9820 9840 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9821 9841 mode);
9822 9842
9823 9843 if (rv != TRUE) {
9824 9844 status = EIO;
9825 9845 mptsas_log(mpt, CE_WARN, "task management failed");
9826 9846 }
9827 9847 goto out;
9828 9848 }
9829 9849
9830 9850 if (data_size != 0) {
9831 9851 data_dma_state.size = data_size;
9832 9852 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9833 9853 status = ENOMEM;
9834 9854 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9835 9855 "resource");
9836 9856 goto out;
9837 9857 }
9838 9858 pt_flags |= MPTSAS_DATA_ALLOCATED;
9839 9859 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9840 9860 mutex_exit(&mpt->m_mutex);
9841 9861 for (i = 0; i < data_size; i++) {
9842 9862 if (ddi_copyin(data + i, (uint8_t *)
9843 9863 data_dma_state.memp + i, 1, mode)) {
9844 9864 mutex_enter(&mpt->m_mutex);
9845 9865 status = EFAULT;
9846 9866 mptsas_log(mpt, CE_WARN, "failed to "
9847 9867 "copy read data");
9848 9868 goto out;
9849 9869 }
9850 9870 }
9851 9871 mutex_enter(&mpt->m_mutex);
9852 9872 }
9853 9873 }
9854 9874
9855 9875 if (dataout_size != 0) {
9856 9876 dataout_dma_state.size = dataout_size;
9857 9877 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9858 9878 status = ENOMEM;
9859 9879 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9860 9880 "resource");
9861 9881 goto out;
9862 9882 }
9863 9883 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9864 9884 mutex_exit(&mpt->m_mutex);
9865 9885 for (i = 0; i < dataout_size; i++) {
9866 9886 if (ddi_copyin(dataout + i, (uint8_t *)
9867 9887 dataout_dma_state.memp + i, 1, mode)) {
9868 9888 mutex_enter(&mpt->m_mutex);
9869 9889 mptsas_log(mpt, CE_WARN, "failed to copy out"
9870 9890 " data");
9871 9891 status = EFAULT;
9872 9892 goto out;
9873 9893 }
9874 9894 }
9875 9895 mutex_enter(&mpt->m_mutex);
9876 9896 }
9877 9897
9878 9898 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9879 9899 status = EAGAIN;
9880 9900 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9881 9901 goto out;
9882 9902 }
9883 9903 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9884 9904
9885 9905 bzero((caddr_t)cmd, sizeof (*cmd));
9886 9906 bzero((caddr_t)pkt, scsi_pkt_size());
9887 9907 bzero((caddr_t)&pt, sizeof (pt));
9888 9908
9889 9909 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9890 9910
9891 9911 pt.request = (uint8_t *)request_msg;
9892 9912 pt.direction = direction;
9893 9913 pt.request_size = request_size;
9894 9914 pt.data_size = data_size;
9895 9915 pt.dataout_size = dataout_size;
9896 9916 pt.data_cookie = data_dma_state.cookie;
9897 9917 pt.dataout_cookie = dataout_dma_state.cookie;
9898 9918
9899 9919 /*
9900 9920 * Form a blank cmd/pkt to store the acknowledgement message
9901 9921 */
9902 9922 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9903 9923 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9904 9924 pkt->pkt_ha_private = (opaque_t)&pt;
9905 9925 pkt->pkt_flags = FLAG_HEAD;
9906 9926 pkt->pkt_time = timeout;
9907 9927 cmd->cmd_pkt = pkt;
9908 9928 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9909 9929
9910 9930 /*
9911 9931 * Save the command in a slot
9912 9932 */
9913 9933 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9914 9934 /*
9915 9935 * Once passthru command get slot, set cmd_flags
9916 9936 * CFLAG_PREPARED.
9917 9937 */
9918 9938 cmd->cmd_flags |= CFLAG_PREPARED;
9919 9939 mptsas_start_passthru(mpt, cmd);
9920 9940 } else {
9921 9941 mptsas_waitq_add(mpt, cmd);
9922 9942 }
9923 9943
9924 9944 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9925 9945 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9926 9946 }
9927 9947
9928 9948 if (cmd->cmd_flags & CFLAG_PREPARED) {
9929 9949 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9930 9950 cmd->cmd_slot);
9931 9951 request_hdrp = (pMPI2RequestHeader_t)memp;
9932 9952 }
9933 9953
9934 9954 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9935 9955 status = ETIMEDOUT;
9936 9956 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9937 9957 pt_flags |= MPTSAS_CMD_TIMEOUT;
9938 9958 goto out;
9939 9959 }
9940 9960
9941 9961 if (cmd->cmd_rfm) {
9942 9962 /*
9943 9963 * cmd_rfm is zero means the command reply is a CONTEXT
9944 9964 * reply and no PCI Write to post the free reply SMFA
9945 9965 * because no reply message frame is used.
9946 9966 * cmd_rfm is non-zero means the reply is a ADDRESS
9947 9967 * reply and reply message frame is used.
9948 9968 */
9949 9969 pt_flags |= MPTSAS_ADDRESS_REPLY;
9950 9970 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
9951 9971 DDI_DMA_SYNC_FORCPU);
9952 9972 reply_msg = (pMPI2DefaultReply_t)
9953 9973 (mpt->m_reply_frame + (cmd->cmd_rfm -
9954 9974 mpt->m_reply_frame_dma_addr));
9955 9975 }
9956 9976
9957 9977 mptsas_fma_check(mpt, cmd);
9958 9978 if (pkt->pkt_reason == CMD_TRAN_ERR) {
9959 9979 status = EAGAIN;
9960 9980 mptsas_log(mpt, CE_WARN, "passthru fma error");
9961 9981 goto out;
9962 9982 }
9963 9983 if (pkt->pkt_reason == CMD_RESET) {
9964 9984 status = EAGAIN;
9965 9985 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
9966 9986 goto out;
9967 9987 }
9968 9988
9969 9989 if (pkt->pkt_reason == CMD_INCOMPLETE) {
9970 9990 status = EIO;
9971 9991 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
9972 9992 goto out;
9973 9993 }
9974 9994
9975 9995 mutex_exit(&mpt->m_mutex);
9976 9996 if (cmd->cmd_flags & CFLAG_PREPARED) {
9977 9997 function = request_hdrp->Function;
9978 9998 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9979 9999 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9980 10000 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
9981 10001 sense_len = reply_size - reply_len;
9982 10002 } else {
9983 10003 reply_len = reply_size;
9984 10004 sense_len = 0;
9985 10005 }
9986 10006
9987 10007 for (i = 0; i < reply_len; i++) {
9988 10008 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
9989 10009 mode)) {
9990 10010 mutex_enter(&mpt->m_mutex);
9991 10011 status = EFAULT;
9992 10012 mptsas_log(mpt, CE_WARN, "failed to copy out "
9993 10013 "reply data");
9994 10014 goto out;
9995 10015 }
9996 10016 }
9997 10017 for (i = 0; i < sense_len; i++) {
9998 10018 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
9999 10019 reply + reply_len + i, 1, mode)) {
10000 10020 mutex_enter(&mpt->m_mutex);
10001 10021 status = EFAULT;
10002 10022 mptsas_log(mpt, CE_WARN, "failed to copy out "
10003 10023 "sense data");
10004 10024 goto out;
10005 10025 }
10006 10026 }
10007 10027 }
10008 10028
10009 10029 if (data_size) {
10010 10030 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10011 10031 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10012 10032 DDI_DMA_SYNC_FORCPU);
10013 10033 for (i = 0; i < data_size; i++) {
10014 10034 if (ddi_copyout((uint8_t *)(
10015 10035 data_dma_state.memp + i), data + i, 1,
10016 10036 mode)) {
10017 10037 mutex_enter(&mpt->m_mutex);
10018 10038 status = EFAULT;
10019 10039 mptsas_log(mpt, CE_WARN, "failed to "
10020 10040 "copy out the reply data");
10021 10041 goto out;
10022 10042 }
10023 10043 }
10024 10044 }
10025 10045 }
10026 10046 mutex_enter(&mpt->m_mutex);
10027 10047 out:
10028 10048 /*
10029 10049 * Put the reply frame back on the free queue, increment the free
10030 10050 * index, and write the new index to the free index register. But only
10031 10051 * if this reply is an ADDRESS reply.
10032 10052 */
10033 10053 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10034 10054 ddi_put32(mpt->m_acc_free_queue_hdl,
10035 10055 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10036 10056 cmd->cmd_rfm);
10037 10057 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10038 10058 DDI_DMA_SYNC_FORDEV);
10039 10059 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10040 10060 mpt->m_free_index = 0;
10041 10061 }
10042 10062 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10043 10063 mpt->m_free_index);
10044 10064 }
10045 10065 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10046 10066 mptsas_remove_cmd(mpt, cmd);
10047 10067 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10048 10068 }
10049 10069 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10050 10070 mptsas_return_to_pool(mpt, cmd);
10051 10071 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10052 10072 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10053 10073 DDI_SUCCESS) {
10054 10074 ddi_fm_service_impact(mpt->m_dip,
10055 10075 DDI_SERVICE_UNAFFECTED);
10056 10076 status = EFAULT;
10057 10077 }
10058 10078 mptsas_dma_free(&data_dma_state);
10059 10079 }
10060 10080 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10061 10081 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10062 10082 DDI_SUCCESS) {
10063 10083 ddi_fm_service_impact(mpt->m_dip,
10064 10084 DDI_SERVICE_UNAFFECTED);
10065 10085 status = EFAULT;
10066 10086 }
10067 10087 mptsas_dma_free(&dataout_dma_state);
10068 10088 }
10069 10089 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10070 10090 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10071 10091 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10072 10092 }
10073 10093 }
10074 10094 if (request_msg)
10075 10095 kmem_free(request_msg, request_size);
10076 10096
10077 10097 return (status);
10078 10098 }
10079 10099
10080 10100 static int
10081 10101 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10082 10102 {
10083 10103 /*
10084 10104 * If timeout is 0, set timeout to default of 60 seconds.
10085 10105 */
10086 10106 if (data->Timeout == 0) {
10087 10107 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10088 10108 }
10089 10109
10090 10110 if (((data->DataSize == 0) &&
10091 10111 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10092 10112 ((data->DataSize != 0) &&
10093 10113 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10094 10114 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10095 10115 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10096 10116 (data->DataOutSize != 0))))) {
10097 10117 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10098 10118 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10099 10119 } else {
10100 10120 data->DataOutSize = 0;
10101 10121 }
10102 10122 /*
10103 10123 * Send passthru request messages
10104 10124 */
10105 10125 return (mptsas_do_passthru(mpt,
10106 10126 (uint8_t *)((uintptr_t)data->PtrRequest),
10107 10127 (uint8_t *)((uintptr_t)data->PtrReply),
10108 10128 (uint8_t *)((uintptr_t)data->PtrData),
10109 10129 data->RequestSize, data->ReplySize,
10110 10130 data->DataSize, data->DataDirection,
10111 10131 (uint8_t *)((uintptr_t)data->PtrDataOut),
10112 10132 data->DataOutSize, data->Timeout, mode));
10113 10133 } else {
10114 10134 return (EINVAL);
10115 10135 }
10116 10136 }
10117 10137
10118 10138 static uint8_t
10119 10139 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10120 10140 {
10121 10141 uint8_t index;
10122 10142
10123 10143 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10124 10144 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10125 10145 return (index);
10126 10146 }
10127 10147 }
10128 10148
10129 10149 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10130 10150 }
10131 10151
10132 10152 static void
10133 10153 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10134 10154 {
10135 10155 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10136 10156 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10137 10157 struct scsi_pkt *pkt = cmd->cmd_pkt;
10138 10158 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10139 10159 uint32_t request_desc_low, i;
10140 10160
10141 10161 ASSERT(mutex_owned(&mpt->m_mutex));
10142 10162
10143 10163 /*
10144 10164 * Form the diag message depending on the post or release function.
10145 10165 */
10146 10166 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10147 10167 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10148 10168 (mpt->m_req_frame + (mpt->m_req_frame_size *
10149 10169 cmd->cmd_slot));
10150 10170 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10151 10171 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10152 10172 diag->function);
10153 10173 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10154 10174 diag->pBuffer->buffer_type);
10155 10175 ddi_put8(mpt->m_acc_req_frame_hdl,
10156 10176 &pDiag_post_msg->ExtendedType,
10157 10177 diag->pBuffer->extended_type);
10158 10178 ddi_put32(mpt->m_acc_req_frame_hdl,
10159 10179 &pDiag_post_msg->BufferLength,
10160 10180 diag->pBuffer->buffer_data.size);
10161 10181 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10162 10182 i++) {
10163 10183 ddi_put32(mpt->m_acc_req_frame_hdl,
10164 10184 &pDiag_post_msg->ProductSpecific[i],
10165 10185 diag->pBuffer->product_specific[i]);
10166 10186 }
10167 10187 ddi_put32(mpt->m_acc_req_frame_hdl,
10168 10188 &pDiag_post_msg->BufferAddress.Low,
10169 10189 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10170 10190 & 0xffffffffull));
10171 10191 ddi_put32(mpt->m_acc_req_frame_hdl,
10172 10192 &pDiag_post_msg->BufferAddress.High,
10173 10193 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10174 10194 >> 32));
10175 10195 } else {
10176 10196 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10177 10197 (mpt->m_req_frame + (mpt->m_req_frame_size *
10178 10198 cmd->cmd_slot));
10179 10199 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10180 10200 ddi_put8(mpt->m_acc_req_frame_hdl,
10181 10201 &pDiag_release_msg->Function, diag->function);
10182 10202 ddi_put8(mpt->m_acc_req_frame_hdl,
10183 10203 &pDiag_release_msg->BufferType,
10184 10204 diag->pBuffer->buffer_type);
10185 10205 }
10186 10206
10187 10207 /*
10188 10208 * Send the message
10189 10209 */
10190 10210 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10191 10211 DDI_DMA_SYNC_FORDEV);
10192 10212 request_desc_low = (cmd->cmd_slot << 16) +
10193 10213 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10194 10214 cmd->cmd_rfm = NULL;
10195 10215 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10196 10216 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10197 10217 DDI_SUCCESS) ||
10198 10218 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10199 10219 DDI_SUCCESS)) {
10200 10220 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10201 10221 }
10202 10222 }
10203 10223
10204 10224 static int
10205 10225 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10206 10226 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10207 10227 {
10208 10228 mptsas_diag_request_t diag;
10209 10229 int status, slot_num, post_flags = 0;
10210 10230 mptsas_cmd_t *cmd = NULL;
10211 10231 struct scsi_pkt *pkt;
10212 10232 pMpi2DiagBufferPostReply_t reply;
10213 10233 uint16_t iocstatus;
10214 10234 uint32_t iocloginfo, transfer_length;
10215 10235
10216 10236 /*
10217 10237 * If buffer is not enabled, just leave.
10218 10238 */
10219 10239 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10220 10240 if (!pBuffer->enabled) {
10221 10241 status = DDI_FAILURE;
10222 10242 goto out;
10223 10243 }
10224 10244
10225 10245 /*
10226 10246 * Clear some flags initially.
10227 10247 */
10228 10248 pBuffer->force_release = FALSE;
10229 10249 pBuffer->valid_data = FALSE;
10230 10250 pBuffer->owned_by_firmware = FALSE;
10231 10251
10232 10252 /*
10233 10253 * Get a cmd buffer from the cmd buffer pool
10234 10254 */
10235 10255 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10236 10256 status = DDI_FAILURE;
10237 10257 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10238 10258 goto out;
10239 10259 }
10240 10260 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10241 10261
10242 10262 bzero((caddr_t)cmd, sizeof (*cmd));
10243 10263 bzero((caddr_t)pkt, scsi_pkt_size());
10244 10264
10245 10265 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10246 10266
10247 10267 diag.pBuffer = pBuffer;
10248 10268 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10249 10269
10250 10270 /*
10251 10271 * Form a blank cmd/pkt to store the acknowledgement message
10252 10272 */
10253 10273 pkt->pkt_ha_private = (opaque_t)&diag;
10254 10274 pkt->pkt_flags = FLAG_HEAD;
10255 10275 pkt->pkt_time = 60;
10256 10276 cmd->cmd_pkt = pkt;
10257 10277 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10258 10278
10259 10279 /*
10260 10280 * Save the command in a slot
10261 10281 */
10262 10282 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10263 10283 /*
10264 10284 * Once passthru command get slot, set cmd_flags
10265 10285 * CFLAG_PREPARED.
10266 10286 */
10267 10287 cmd->cmd_flags |= CFLAG_PREPARED;
10268 10288 mptsas_start_diag(mpt, cmd);
10269 10289 } else {
10270 10290 mptsas_waitq_add(mpt, cmd);
10271 10291 }
10272 10292
10273 10293 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10274 10294 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10275 10295 }
10276 10296
10277 10297 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10278 10298 status = DDI_FAILURE;
10279 10299 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10280 10300 goto out;
10281 10301 }
10282 10302
10283 10303 /*
10284 10304 * cmd_rfm points to the reply message if a reply was given. Check the
10285 10305 * IOCStatus to make sure everything went OK with the FW diag request
10286 10306 * and set buffer flags.
10287 10307 */
10288 10308 if (cmd->cmd_rfm) {
10289 10309 post_flags |= MPTSAS_ADDRESS_REPLY;
10290 10310 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10291 10311 DDI_DMA_SYNC_FORCPU);
10292 10312 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10293 10313 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10294 10314
10295 10315 /*
10296 10316 * Get the reply message data
10297 10317 */
10298 10318 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10299 10319 &reply->IOCStatus);
10300 10320 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10301 10321 &reply->IOCLogInfo);
10302 10322 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10303 10323 &reply->TransferLength);
10304 10324
10305 10325 /*
10306 10326 * If post failed quit.
10307 10327 */
10308 10328 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10309 10329 status = DDI_FAILURE;
10310 10330 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10311 10331 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10312 10332 iocloginfo, transfer_length));
10313 10333 goto out;
10314 10334 }
10315 10335
10316 10336 /*
10317 10337 * Post was successful.
10318 10338 */
10319 10339 pBuffer->valid_data = TRUE;
10320 10340 pBuffer->owned_by_firmware = TRUE;
10321 10341 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10322 10342 status = DDI_SUCCESS;
10323 10343 }
10324 10344
10325 10345 out:
10326 10346 /*
10327 10347 * Put the reply frame back on the free queue, increment the free
10328 10348 * index, and write the new index to the free index register. But only
10329 10349 * if this reply is an ADDRESS reply.
10330 10350 */
10331 10351 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10332 10352 ddi_put32(mpt->m_acc_free_queue_hdl,
10333 10353 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10334 10354 cmd->cmd_rfm);
10335 10355 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10336 10356 DDI_DMA_SYNC_FORDEV);
10337 10357 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10338 10358 mpt->m_free_index = 0;
10339 10359 }
10340 10360 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10341 10361 mpt->m_free_index);
10342 10362 }
10343 10363 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10344 10364 mptsas_remove_cmd(mpt, cmd);
10345 10365 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10346 10366 }
10347 10367 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10348 10368 mptsas_return_to_pool(mpt, cmd);
10349 10369 }
10350 10370
10351 10371 return (status);
10352 10372 }
10353 10373
10354 10374 static int
10355 10375 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10356 10376 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10357 10377 uint32_t diag_type)
10358 10378 {
10359 10379 mptsas_diag_request_t diag;
10360 10380 int status, slot_num, rel_flags = 0;
10361 10381 mptsas_cmd_t *cmd = NULL;
10362 10382 struct scsi_pkt *pkt;
10363 10383 pMpi2DiagReleaseReply_t reply;
10364 10384 uint16_t iocstatus;
10365 10385 uint32_t iocloginfo;
10366 10386
10367 10387 /*
10368 10388 * If buffer is not enabled, just leave.
10369 10389 */
10370 10390 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10371 10391 if (!pBuffer->enabled) {
10372 10392 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10373 10393 "by the IOC");
10374 10394 status = DDI_FAILURE;
10375 10395 goto out;
10376 10396 }
10377 10397
10378 10398 /*
10379 10399 * Clear some flags initially.
10380 10400 */
10381 10401 pBuffer->force_release = FALSE;
10382 10402 pBuffer->valid_data = FALSE;
10383 10403 pBuffer->owned_by_firmware = FALSE;
10384 10404
10385 10405 /*
10386 10406 * Get a cmd buffer from the cmd buffer pool
10387 10407 */
10388 10408 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10389 10409 status = DDI_FAILURE;
10390 10410 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10391 10411 "Diag");
10392 10412 goto out;
10393 10413 }
10394 10414 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10395 10415
10396 10416 bzero((caddr_t)cmd, sizeof (*cmd));
10397 10417 bzero((caddr_t)pkt, scsi_pkt_size());
10398 10418
10399 10419 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10400 10420
10401 10421 diag.pBuffer = pBuffer;
10402 10422 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10403 10423
10404 10424 /*
10405 10425 * Form a blank cmd/pkt to store the acknowledgement message
10406 10426 */
10407 10427 pkt->pkt_ha_private = (opaque_t)&diag;
10408 10428 pkt->pkt_flags = FLAG_HEAD;
10409 10429 pkt->pkt_time = 60;
10410 10430 cmd->cmd_pkt = pkt;
10411 10431 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10412 10432
10413 10433 /*
10414 10434 * Save the command in a slot
10415 10435 */
10416 10436 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10417 10437 /*
10418 10438 * Once passthru command get slot, set cmd_flags
10419 10439 * CFLAG_PREPARED.
10420 10440 */
10421 10441 cmd->cmd_flags |= CFLAG_PREPARED;
10422 10442 mptsas_start_diag(mpt, cmd);
10423 10443 } else {
10424 10444 mptsas_waitq_add(mpt, cmd);
10425 10445 }
10426 10446
10427 10447 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10428 10448 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10429 10449 }
10430 10450
10431 10451 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10432 10452 status = DDI_FAILURE;
10433 10453 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10434 10454 goto out;
10435 10455 }
10436 10456
10437 10457 /*
10438 10458 * cmd_rfm points to the reply message if a reply was given. Check the
10439 10459 * IOCStatus to make sure everything went OK with the FW diag request
10440 10460 * and set buffer flags.
10441 10461 */
10442 10462 if (cmd->cmd_rfm) {
10443 10463 rel_flags |= MPTSAS_ADDRESS_REPLY;
10444 10464 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10445 10465 DDI_DMA_SYNC_FORCPU);
10446 10466 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10447 10467 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10448 10468
10449 10469 /*
10450 10470 * Get the reply message data
10451 10471 */
10452 10472 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10453 10473 &reply->IOCStatus);
10454 10474 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10455 10475 &reply->IOCLogInfo);
10456 10476
10457 10477 /*
10458 10478 * If release failed quit.
10459 10479 */
10460 10480 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10461 10481 pBuffer->owned_by_firmware) {
10462 10482 status = DDI_FAILURE;
10463 10483 NDBG13(("release FW Diag Buffer failed: "
10464 10484 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10465 10485 iocloginfo));
10466 10486 goto out;
10467 10487 }
10468 10488
10469 10489 /*
10470 10490 * Release was successful.
10471 10491 */
10472 10492 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10473 10493 status = DDI_SUCCESS;
10474 10494
10475 10495 /*
10476 10496 * If this was for an UNREGISTER diag type command, clear the
10477 10497 * unique ID.
10478 10498 */
10479 10499 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10480 10500 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10481 10501 }
10482 10502 }
10483 10503
10484 10504 out:
10485 10505 /*
10486 10506 * Put the reply frame back on the free queue, increment the free
10487 10507 * index, and write the new index to the free index register. But only
10488 10508 * if this reply is an ADDRESS reply.
10489 10509 */
10490 10510 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10491 10511 ddi_put32(mpt->m_acc_free_queue_hdl,
10492 10512 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10493 10513 cmd->cmd_rfm);
10494 10514 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10495 10515 DDI_DMA_SYNC_FORDEV);
10496 10516 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10497 10517 mpt->m_free_index = 0;
10498 10518 }
10499 10519 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10500 10520 mpt->m_free_index);
10501 10521 }
10502 10522 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10503 10523 mptsas_remove_cmd(mpt, cmd);
10504 10524 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10505 10525 }
10506 10526 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10507 10527 mptsas_return_to_pool(mpt, cmd);
10508 10528 }
10509 10529
10510 10530 return (status);
10511 10531 }
10512 10532
10513 10533 static int
10514 10534 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10515 10535 uint32_t *return_code)
10516 10536 {
10517 10537 mptsas_fw_diagnostic_buffer_t *pBuffer;
10518 10538 uint8_t extended_type, buffer_type, i;
10519 10539 uint32_t buffer_size;
10520 10540 uint32_t unique_id;
10521 10541 int status;
10522 10542
10523 10543 ASSERT(mutex_owned(&mpt->m_mutex));
10524 10544
10525 10545 extended_type = diag_register->ExtendedType;
10526 10546 buffer_type = diag_register->BufferType;
10527 10547 buffer_size = diag_register->RequestedBufferSize;
10528 10548 unique_id = diag_register->UniqueId;
10529 10549
10530 10550 /*
10531 10551 * Check for valid buffer type
10532 10552 */
10533 10553 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10534 10554 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10535 10555 return (DDI_FAILURE);
10536 10556 }
10537 10557
10538 10558 /*
10539 10559 * Get the current buffer and look up the unique ID. The unique ID
10540 10560 * should not be found. If it is, the ID is already in use.
10541 10561 */
10542 10562 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10543 10563 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10544 10564 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10545 10565 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10546 10566 return (DDI_FAILURE);
10547 10567 }
10548 10568
10549 10569 /*
10550 10570 * The buffer's unique ID should not be registered yet, and the given
10551 10571 * unique ID cannot be 0.
10552 10572 */
10553 10573 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10554 10574 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10555 10575 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10556 10576 return (DDI_FAILURE);
10557 10577 }
10558 10578
10559 10579 /*
10560 10580 * If this buffer is already posted as immediate, just change owner.
10561 10581 */
10562 10582 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10563 10583 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10564 10584 pBuffer->immediate = FALSE;
10565 10585 pBuffer->unique_id = unique_id;
10566 10586 return (DDI_SUCCESS);
10567 10587 }
10568 10588
10569 10589 /*
10570 10590 * Post a new buffer after checking if it's enabled. The DMA buffer
10571 10591 * that is allocated will be contiguous (sgl_len = 1).
10572 10592 */
10573 10593 if (!pBuffer->enabled) {
10574 10594 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10575 10595 return (DDI_FAILURE);
10576 10596 }
10577 10597 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10578 10598 pBuffer->buffer_data.size = buffer_size;
10579 10599 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10580 10600 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10581 10601 "diag buffer: size = %d bytes", buffer_size);
10582 10602 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10583 10603 return (DDI_FAILURE);
10584 10604 }
10585 10605
10586 10606 /*
10587 10607 * Copy the given info to the diag buffer and post the buffer.
10588 10608 */
10589 10609 pBuffer->buffer_type = buffer_type;
10590 10610 pBuffer->immediate = FALSE;
10591 10611 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10592 10612 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10593 10613 i++) {
10594 10614 pBuffer->product_specific[i] =
10595 10615 diag_register->ProductSpecific[i];
10596 10616 }
10597 10617 }
10598 10618 pBuffer->extended_type = extended_type;
10599 10619 pBuffer->unique_id = unique_id;
10600 10620 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10601 10621
10602 10622 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10603 10623 DDI_SUCCESS) {
10604 10624 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10605 10625 "mptsas_diag_register.");
10606 10626 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10607 10627 status = DDI_FAILURE;
10608 10628 }
10609 10629
10610 10630 /*
10611 10631 * In case there was a failure, free the DMA buffer.
10612 10632 */
10613 10633 if (status == DDI_FAILURE) {
10614 10634 mptsas_dma_free(&pBuffer->buffer_data);
10615 10635 }
10616 10636
10617 10637 return (status);
10618 10638 }
10619 10639
10620 10640 static int
10621 10641 mptsas_diag_unregister(mptsas_t *mpt,
10622 10642 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10623 10643 {
10624 10644 mptsas_fw_diagnostic_buffer_t *pBuffer;
10625 10645 uint8_t i;
10626 10646 uint32_t unique_id;
10627 10647 int status;
10628 10648
10629 10649 ASSERT(mutex_owned(&mpt->m_mutex));
10630 10650
10631 10651 unique_id = diag_unregister->UniqueId;
10632 10652
10633 10653 /*
10634 10654 * Get the current buffer and look up the unique ID. The unique ID
10635 10655 * should be there.
10636 10656 */
10637 10657 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10638 10658 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10639 10659 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10640 10660 return (DDI_FAILURE);
10641 10661 }
10642 10662
10643 10663 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10644 10664
10645 10665 /*
10646 10666 * Try to release the buffer from FW before freeing it. If release
10647 10667 * fails, don't free the DMA buffer in case FW tries to access it
10648 10668 * later. If buffer is not owned by firmware, can't release it.
10649 10669 */
10650 10670 if (!pBuffer->owned_by_firmware) {
10651 10671 status = DDI_SUCCESS;
10652 10672 } else {
10653 10673 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10654 10674 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10655 10675 }
10656 10676
10657 10677 /*
10658 10678 * At this point, return the current status no matter what happens with
10659 10679 * the DMA buffer.
10660 10680 */
10661 10681 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10662 10682 if (status == DDI_SUCCESS) {
10663 10683 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10664 10684 DDI_SUCCESS) {
10665 10685 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10666 10686 "in mptsas_diag_unregister.");
10667 10687 ddi_fm_service_impact(mpt->m_dip,
10668 10688 DDI_SERVICE_UNAFFECTED);
10669 10689 }
10670 10690 mptsas_dma_free(&pBuffer->buffer_data);
10671 10691 }
10672 10692
10673 10693 return (status);
10674 10694 }
10675 10695
10676 10696 static int
10677 10697 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10678 10698 uint32_t *return_code)
10679 10699 {
10680 10700 mptsas_fw_diagnostic_buffer_t *pBuffer;
10681 10701 uint8_t i;
10682 10702 uint32_t unique_id;
10683 10703
10684 10704 ASSERT(mutex_owned(&mpt->m_mutex));
10685 10705
10686 10706 unique_id = diag_query->UniqueId;
10687 10707
10688 10708 /*
10689 10709 * If ID is valid, query on ID.
10690 10710 * If ID is invalid, query on buffer type.
10691 10711 */
10692 10712 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10693 10713 i = diag_query->BufferType;
10694 10714 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10695 10715 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10696 10716 return (DDI_FAILURE);
10697 10717 }
10698 10718 } else {
10699 10719 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10700 10720 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10701 10721 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10702 10722 return (DDI_FAILURE);
10703 10723 }
10704 10724 }
10705 10725
10706 10726 /*
10707 10727 * Fill query structure with the diag buffer info.
10708 10728 */
10709 10729 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10710 10730 diag_query->BufferType = pBuffer->buffer_type;
10711 10731 diag_query->ExtendedType = pBuffer->extended_type;
10712 10732 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10713 10733 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10714 10734 i++) {
10715 10735 diag_query->ProductSpecific[i] =
10716 10736 pBuffer->product_specific[i];
10717 10737 }
10718 10738 }
10719 10739 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10720 10740 diag_query->DriverAddedBufferSize = 0;
10721 10741 diag_query->UniqueId = pBuffer->unique_id;
10722 10742 diag_query->ApplicationFlags = 0;
10723 10743 diag_query->DiagnosticFlags = 0;
10724 10744
10725 10745 /*
10726 10746 * Set/Clear application flags
10727 10747 */
10728 10748 if (pBuffer->immediate) {
10729 10749 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10730 10750 } else {
10731 10751 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10732 10752 }
10733 10753 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10734 10754 diag_query->ApplicationFlags |=
10735 10755 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10736 10756 } else {
10737 10757 diag_query->ApplicationFlags &=
10738 10758 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10739 10759 }
10740 10760 if (pBuffer->owned_by_firmware) {
10741 10761 diag_query->ApplicationFlags |=
10742 10762 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10743 10763 } else {
10744 10764 diag_query->ApplicationFlags &=
10745 10765 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10746 10766 }
10747 10767
10748 10768 return (DDI_SUCCESS);
10749 10769 }
10750 10770
10751 10771 static int
10752 10772 mptsas_diag_read_buffer(mptsas_t *mpt,
10753 10773 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10754 10774 uint32_t *return_code, int ioctl_mode)
10755 10775 {
10756 10776 mptsas_fw_diagnostic_buffer_t *pBuffer;
10757 10777 uint8_t i, *pData;
10758 10778 uint32_t unique_id, byte;
10759 10779 int status;
10760 10780
10761 10781 ASSERT(mutex_owned(&mpt->m_mutex));
10762 10782
10763 10783 unique_id = diag_read_buffer->UniqueId;
10764 10784
10765 10785 /*
10766 10786 * Get the current buffer and look up the unique ID. The unique ID
10767 10787 * should be there.
10768 10788 */
10769 10789 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10770 10790 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10771 10791 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10772 10792 return (DDI_FAILURE);
10773 10793 }
10774 10794
10775 10795 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10776 10796
10777 10797 /*
10778 10798 * Make sure requested read is within limits
10779 10799 */
10780 10800 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10781 10801 pBuffer->buffer_data.size) {
10782 10802 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10783 10803 return (DDI_FAILURE);
10784 10804 }
10785 10805
10786 10806 /*
10787 10807 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10788 10808 * buffer that was allocated is one contiguous buffer.
10789 10809 */
10790 10810 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10791 10811 diag_read_buffer->StartingOffset);
10792 10812 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10793 10813 DDI_DMA_SYNC_FORCPU);
10794 10814 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10795 10815 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10796 10816 != 0) {
10797 10817 return (DDI_FAILURE);
10798 10818 }
10799 10819 }
10800 10820 diag_read_buffer->Status = 0;
10801 10821
10802 10822 /*
10803 10823 * Set or clear the Force Release flag.
10804 10824 */
10805 10825 if (pBuffer->force_release) {
10806 10826 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10807 10827 } else {
10808 10828 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10809 10829 }
10810 10830
10811 10831 /*
10812 10832 * If buffer is to be reregistered, make sure it's not already owned by
10813 10833 * firmware first.
10814 10834 */
10815 10835 status = DDI_SUCCESS;
10816 10836 if (!pBuffer->owned_by_firmware) {
10817 10837 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10818 10838 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10819 10839 return_code);
10820 10840 }
10821 10841 }
10822 10842
10823 10843 return (status);
10824 10844 }
10825 10845
10826 10846 static int
10827 10847 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10828 10848 uint32_t *return_code)
10829 10849 {
10830 10850 mptsas_fw_diagnostic_buffer_t *pBuffer;
10831 10851 uint8_t i;
10832 10852 uint32_t unique_id;
10833 10853 int status;
10834 10854
10835 10855 ASSERT(mutex_owned(&mpt->m_mutex));
10836 10856
10837 10857 unique_id = diag_release->UniqueId;
10838 10858
10839 10859 /*
10840 10860 * Get the current buffer and look up the unique ID. The unique ID
10841 10861 * should be there.
10842 10862 */
10843 10863 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10844 10864 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10845 10865 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10846 10866 return (DDI_FAILURE);
10847 10867 }
10848 10868
10849 10869 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10850 10870
10851 10871 /*
10852 10872 * If buffer is not owned by firmware, it's already been released.
10853 10873 */
10854 10874 if (!pBuffer->owned_by_firmware) {
10855 10875 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10856 10876 return (DDI_FAILURE);
10857 10877 }
10858 10878
10859 10879 /*
10860 10880 * Release the buffer.
10861 10881 */
10862 10882 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10863 10883 MPTSAS_FW_DIAG_TYPE_RELEASE);
10864 10884 return (status);
10865 10885 }
10866 10886
10867 10887 static int
10868 10888 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10869 10889 uint32_t length, uint32_t *return_code, int ioctl_mode)
10870 10890 {
10871 10891 mptsas_fw_diag_register_t diag_register;
10872 10892 mptsas_fw_diag_unregister_t diag_unregister;
10873 10893 mptsas_fw_diag_query_t diag_query;
10874 10894 mptsas_diag_read_buffer_t diag_read_buffer;
10875 10895 mptsas_fw_diag_release_t diag_release;
10876 10896 int status = DDI_SUCCESS;
10877 10897 uint32_t original_return_code, read_buf_len;
10878 10898
10879 10899 ASSERT(mutex_owned(&mpt->m_mutex));
10880 10900
10881 10901 original_return_code = *return_code;
10882 10902 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10883 10903
10884 10904 switch (action) {
10885 10905 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10886 10906 if (!length) {
10887 10907 *return_code =
10888 10908 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10889 10909 status = DDI_FAILURE;
10890 10910 break;
10891 10911 }
10892 10912 if (ddi_copyin(diag_action, &diag_register,
10893 10913 sizeof (diag_register), ioctl_mode) != 0) {
10894 10914 return (DDI_FAILURE);
10895 10915 }
10896 10916 status = mptsas_diag_register(mpt, &diag_register,
10897 10917 return_code);
10898 10918 break;
10899 10919
10900 10920 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10901 10921 if (length < sizeof (diag_unregister)) {
10902 10922 *return_code =
10903 10923 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10904 10924 status = DDI_FAILURE;
10905 10925 break;
10906 10926 }
10907 10927 if (ddi_copyin(diag_action, &diag_unregister,
10908 10928 sizeof (diag_unregister), ioctl_mode) != 0) {
10909 10929 return (DDI_FAILURE);
10910 10930 }
10911 10931 status = mptsas_diag_unregister(mpt, &diag_unregister,
10912 10932 return_code);
10913 10933 break;
10914 10934
10915 10935 case MPTSAS_FW_DIAG_TYPE_QUERY:
10916 10936 if (length < sizeof (diag_query)) {
10917 10937 *return_code =
10918 10938 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10919 10939 status = DDI_FAILURE;
10920 10940 break;
10921 10941 }
10922 10942 if (ddi_copyin(diag_action, &diag_query,
10923 10943 sizeof (diag_query), ioctl_mode) != 0) {
10924 10944 return (DDI_FAILURE);
10925 10945 }
10926 10946 status = mptsas_diag_query(mpt, &diag_query,
10927 10947 return_code);
10928 10948 if (status == DDI_SUCCESS) {
10929 10949 if (ddi_copyout(&diag_query, diag_action,
10930 10950 sizeof (diag_query), ioctl_mode) != 0) {
10931 10951 return (DDI_FAILURE);
10932 10952 }
10933 10953 }
10934 10954 break;
10935 10955
10936 10956 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10937 10957 if (ddi_copyin(diag_action, &diag_read_buffer,
10938 10958 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10939 10959 return (DDI_FAILURE);
10940 10960 }
10941 10961 read_buf_len = sizeof (diag_read_buffer) -
10942 10962 sizeof (diag_read_buffer.DataBuffer) +
10943 10963 diag_read_buffer.BytesToRead;
10944 10964 if (length < read_buf_len) {
10945 10965 *return_code =
10946 10966 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10947 10967 status = DDI_FAILURE;
10948 10968 break;
10949 10969 }
10950 10970 status = mptsas_diag_read_buffer(mpt,
10951 10971 &diag_read_buffer, diag_action +
10952 10972 sizeof (diag_read_buffer) - 4, return_code,
10953 10973 ioctl_mode);
10954 10974 if (status == DDI_SUCCESS) {
10955 10975 if (ddi_copyout(&diag_read_buffer, diag_action,
10956 10976 sizeof (diag_read_buffer) - 4, ioctl_mode)
10957 10977 != 0) {
10958 10978 return (DDI_FAILURE);
10959 10979 }
10960 10980 }
10961 10981 break;
10962 10982
10963 10983 case MPTSAS_FW_DIAG_TYPE_RELEASE:
10964 10984 if (length < sizeof (diag_release)) {
10965 10985 *return_code =
10966 10986 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10967 10987 status = DDI_FAILURE;
10968 10988 break;
10969 10989 }
10970 10990 if (ddi_copyin(diag_action, &diag_release,
10971 10991 sizeof (diag_release), ioctl_mode) != 0) {
10972 10992 return (DDI_FAILURE);
10973 10993 }
10974 10994 status = mptsas_diag_release(mpt, &diag_release,
10975 10995 return_code);
10976 10996 break;
10977 10997
10978 10998 default:
10979 10999 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10980 11000 status = DDI_FAILURE;
10981 11001 break;
10982 11002 }
10983 11003
10984 11004 if ((status == DDI_FAILURE) &&
10985 11005 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
10986 11006 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
10987 11007 status = DDI_SUCCESS;
10988 11008 }
10989 11009
10990 11010 return (status);
10991 11011 }
10992 11012
10993 11013 static int
10994 11014 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
10995 11015 {
10996 11016 int status;
10997 11017 mptsas_diag_action_t driver_data;
10998 11018
10999 11019 ASSERT(mutex_owned(&mpt->m_mutex));
11000 11020
11001 11021 /*
11002 11022 * Copy the user data to a driver data buffer.
11003 11023 */
11004 11024 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11005 11025 mode) == 0) {
11006 11026 /*
11007 11027 * Send diag action request if Action is valid
11008 11028 */
11009 11029 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11010 11030 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11011 11031 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11012 11032 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11013 11033 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11014 11034 status = mptsas_do_diag_action(mpt, driver_data.Action,
11015 11035 (void *)(uintptr_t)driver_data.PtrDiagAction,
11016 11036 driver_data.Length, &driver_data.ReturnCode,
11017 11037 mode);
11018 11038 if (status == DDI_SUCCESS) {
11019 11039 if (ddi_copyout(&driver_data.ReturnCode,
11020 11040 &user_data->ReturnCode,
11021 11041 sizeof (user_data->ReturnCode), mode)
11022 11042 != 0) {
11023 11043 status = EFAULT;
11024 11044 } else {
11025 11045 status = 0;
11026 11046 }
11027 11047 } else {
11028 11048 status = EIO;
11029 11049 }
11030 11050 } else {
11031 11051 status = EINVAL;
11032 11052 }
11033 11053 } else {
11034 11054 status = EFAULT;
11035 11055 }
11036 11056
11037 11057 return (status);
11038 11058 }
11039 11059
11040 11060 /*
11041 11061 * This routine handles the "event query" ioctl.
11042 11062 */
11043 11063 static int
11044 11064 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11045 11065 int *rval)
11046 11066 {
11047 11067 int status;
11048 11068 mptsas_event_query_t driverdata;
11049 11069 uint8_t i;
11050 11070
11051 11071 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11052 11072
11053 11073 mutex_enter(&mpt->m_mutex);
11054 11074 for (i = 0; i < 4; i++) {
11055 11075 driverdata.Types[i] = mpt->m_event_mask[i];
11056 11076 }
11057 11077 mutex_exit(&mpt->m_mutex);
11058 11078
11059 11079 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11060 11080 status = EFAULT;
11061 11081 } else {
11062 11082 *rval = MPTIOCTL_STATUS_GOOD;
11063 11083 status = 0;
11064 11084 }
11065 11085
11066 11086 return (status);
11067 11087 }
11068 11088
11069 11089 /*
11070 11090 * This routine handles the "event enable" ioctl.
11071 11091 */
11072 11092 static int
11073 11093 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11074 11094 int *rval)
11075 11095 {
11076 11096 int status;
11077 11097 mptsas_event_enable_t driverdata;
11078 11098 uint8_t i;
11079 11099
11080 11100 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11081 11101 mutex_enter(&mpt->m_mutex);
11082 11102 for (i = 0; i < 4; i++) {
11083 11103 mpt->m_event_mask[i] = driverdata.Types[i];
11084 11104 }
11085 11105 mutex_exit(&mpt->m_mutex);
11086 11106
11087 11107 *rval = MPTIOCTL_STATUS_GOOD;
11088 11108 status = 0;
11089 11109 } else {
11090 11110 status = EFAULT;
11091 11111 }
11092 11112 return (status);
11093 11113 }
11094 11114
11095 11115 /*
11096 11116 * This routine handles the "event report" ioctl.
11097 11117 */
11098 11118 static int
11099 11119 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11100 11120 int *rval)
11101 11121 {
11102 11122 int status;
11103 11123 mptsas_event_report_t driverdata;
11104 11124
11105 11125 mutex_enter(&mpt->m_mutex);
11106 11126
11107 11127 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11108 11128 mode) == 0) {
11109 11129 if (driverdata.Size >= sizeof (mpt->m_events)) {
11110 11130 if (ddi_copyout(mpt->m_events, data->Events,
11111 11131 sizeof (mpt->m_events), mode) != 0) {
11112 11132 status = EFAULT;
11113 11133 } else {
11114 11134 if (driverdata.Size > sizeof (mpt->m_events)) {
11115 11135 driverdata.Size =
11116 11136 sizeof (mpt->m_events);
11117 11137 if (ddi_copyout(&driverdata.Size,
11118 11138 &data->Size,
11119 11139 sizeof (driverdata.Size),
11120 11140 mode) != 0) {
11121 11141 status = EFAULT;
11122 11142 } else {
11123 11143 *rval = MPTIOCTL_STATUS_GOOD;
11124 11144 status = 0;
11125 11145 }
11126 11146 } else {
11127 11147 *rval = MPTIOCTL_STATUS_GOOD;
11128 11148 status = 0;
11129 11149 }
11130 11150 }
11131 11151 } else {
11132 11152 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11133 11153 status = 0;
11134 11154 }
11135 11155 } else {
11136 11156 status = EFAULT;
11137 11157 }
11138 11158
11139 11159 mutex_exit(&mpt->m_mutex);
11140 11160 return (status);
11141 11161 }
11142 11162
11143 11163 static void
11144 11164 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11145 11165 {
11146 11166 int *reg_data;
11147 11167 uint_t reglen;
11148 11168
11149 11169 /*
11150 11170 * Lookup the 'reg' property and extract the other data
11151 11171 */
11152 11172 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11153 11173 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11154 11174 DDI_PROP_SUCCESS) {
11155 11175 /*
11156 11176 * Extract the PCI data from the 'reg' property first DWORD.
11157 11177 * The entry looks like the following:
11158 11178 * First DWORD:
11159 11179 * Bits 0 - 7 8-bit Register number
11160 11180 * Bits 8 - 10 3-bit Function number
11161 11181 * Bits 11 - 15 5-bit Device number
11162 11182 * Bits 16 - 23 8-bit Bus number
11163 11183 * Bits 24 - 25 2-bit Address Space type identifier
11164 11184 *
11165 11185 */
11166 11186 adapter_data->PciInformation.u.bits.BusNumber =
11167 11187 (reg_data[0] & 0x00FF0000) >> 16;
11168 11188 adapter_data->PciInformation.u.bits.DeviceNumber =
11169 11189 (reg_data[0] & 0x0000F800) >> 11;
11170 11190 adapter_data->PciInformation.u.bits.FunctionNumber =
11171 11191 (reg_data[0] & 0x00000700) >> 8;
11172 11192 ddi_prop_free((void *)reg_data);
11173 11193 } else {
11174 11194 /*
11175 11195 * If we can't determine the PCI data then we fill in FF's for
11176 11196 * the data to indicate this.
11177 11197 */
11178 11198 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11179 11199 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11180 11200 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11181 11201 }
11182 11202
11183 11203 /*
11184 11204 * Saved in the mpt->m_fwversion
11185 11205 */
11186 11206 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11187 11207 }
11188 11208
11189 11209 static void
11190 11210 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11191 11211 {
11192 11212 char *driver_verstr = MPTSAS_MOD_STRING;
11193 11213
11194 11214 mptsas_lookup_pci_data(mpt, adapter_data);
11195 11215 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11196 11216 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11197 11217 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11198 11218 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11199 11219 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11200 11220 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11201 11221 adapter_data->BiosVersion = 0;
11202 11222 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11203 11223 }
11204 11224
11205 11225 static void
11206 11226 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11207 11227 {
11208 11228 int *reg_data, i;
11209 11229 uint_t reglen;
11210 11230
11211 11231 /*
11212 11232 * Lookup the 'reg' property and extract the other data
11213 11233 */
11214 11234 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11215 11235 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11216 11236 DDI_PROP_SUCCESS) {
11217 11237 /*
11218 11238 * Extract the PCI data from the 'reg' property first DWORD.
11219 11239 * The entry looks like the following:
11220 11240 * First DWORD:
11221 11241 * Bits 8 - 10 3-bit Function number
11222 11242 * Bits 11 - 15 5-bit Device number
11223 11243 * Bits 16 - 23 8-bit Bus number
11224 11244 */
11225 11245 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11226 11246 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11227 11247 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11228 11248 ddi_prop_free((void *)reg_data);
11229 11249 } else {
11230 11250 /*
11231 11251 * If we can't determine the PCI info then we fill in FF's for
11232 11252 * the data to indicate this.
11233 11253 */
11234 11254 pci_info->BusNumber = 0xFFFFFFFF;
11235 11255 pci_info->DeviceNumber = 0xFF;
11236 11256 pci_info->FunctionNumber = 0xFF;
11237 11257 }
11238 11258
11239 11259 /*
11240 11260 * Now get the interrupt vector and the pci header. The vector can
11241 11261 * only be 0 right now. The header is the first 256 bytes of config
11242 11262 * space.
11243 11263 */
11244 11264 pci_info->InterruptVector = 0;
11245 11265 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11246 11266 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11247 11267 i);
11248 11268 }
11249 11269 }
11250 11270
11251 11271 static int
11252 11272 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11253 11273 {
11254 11274 int status = 0;
11255 11275 mptsas_reg_access_t driverdata;
11256 11276
11257 11277 mutex_enter(&mpt->m_mutex);
11258 11278 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11259 11279 switch (driverdata.Command) {
11260 11280 /*
11261 11281 * IO access is not supported.
11262 11282 */
11263 11283 case REG_IO_READ:
11264 11284 case REG_IO_WRITE:
11265 11285 mptsas_log(mpt, CE_WARN, "IO access is not "
11266 11286 "supported. Use memory access.");
11267 11287 status = EINVAL;
11268 11288 break;
11269 11289
11270 11290 case REG_MEM_READ:
11271 11291 driverdata.RegData = ddi_get32(mpt->m_datap,
11272 11292 (uint32_t *)(void *)mpt->m_reg +
11273 11293 driverdata.RegOffset);
11274 11294 if (ddi_copyout(&driverdata.RegData,
11275 11295 &data->RegData,
11276 11296 sizeof (driverdata.RegData), mode) != 0) {
11277 11297 mptsas_log(mpt, CE_WARN, "Register "
11278 11298 "Read Failed");
11279 11299 status = EFAULT;
11280 11300 }
11281 11301 break;
11282 11302
11283 11303 case REG_MEM_WRITE:
11284 11304 ddi_put32(mpt->m_datap,
11285 11305 (uint32_t *)(void *)mpt->m_reg +
11286 11306 driverdata.RegOffset,
11287 11307 driverdata.RegData);
11288 11308 break;
11289 11309
11290 11310 default:
11291 11311 status = EINVAL;
11292 11312 break;
11293 11313 }
11294 11314 } else {
11295 11315 status = EFAULT;
11296 11316 }
11297 11317
11298 11318 mutex_exit(&mpt->m_mutex);
11299 11319 return (status);
11300 11320 }
11301 11321
11302 11322 static int
11303 11323 led_control(mptsas_t *mpt, intptr_t data, int mode)
11304 11324 {
11305 11325 int ret = 0;
11306 11326 mptsas_led_control_t lc;
11307 11327 mptsas_target_t *ptgt;
11308 11328
11309 11329 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
11310 11330 return (EFAULT);
11311 11331 }
11312 11332
11313 11333 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
11314 11334 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
11315 11335 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
11316 11336 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
11317 11337 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
|
↓ open down ↓ |
1711 lines elided |
↑ open up ↑ |
11318 11338 lc.LedStatus != 1)) {
11319 11339 return (EINVAL);
11320 11340 }
11321 11341
11322 11342 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
11323 11343 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
11324 11344 return (EACCES);
11325 11345
11326 11346 /* Locate the target we're interrogating... */
11327 11347 mutex_enter(&mpt->m_mutex);
11328 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11329 - MPTSAS_HASH_FIRST);
11330 - while (ptgt != NULL) {
11331 - if (ptgt->m_enclosure == lc.Enclosure &&
11332 - ptgt->m_slot_num == lc.Slot) {
11333 - break;
11334 - }
11335 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11336 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11337 - }
11348 + ptgt = refhash_linear_search(mpt->m_targets,
11349 + mptsas_target_eval_slot, &lc);
11338 11350 if (ptgt == NULL) {
11339 11351 /* We could not find a target for that enclosure/slot. */
11340 11352 mutex_exit(&mpt->m_mutex);
11341 11353 return (ENOENT);
11342 11354 }
11343 11355
11344 11356 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
11345 11357 /* Update our internal LED state. */
11346 11358 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
11347 11359 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
11348 11360
11349 11361 /* Flush it to the controller. */
11350 11362 ret = mptsas_flush_led_status(mpt, ptgt);
11351 11363 mutex_exit(&mpt->m_mutex);
11352 11364 return (ret);
11353 11365 }
11354 11366
11355 11367 /* Return our internal LED state. */
11356 11368 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
11357 11369 mutex_exit(&mpt->m_mutex);
11358 11370
11359 11371 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
11360 11372 return (EFAULT);
11361 11373 }
11362 11374
11363 11375 return (0);
11364 11376 }
11365 11377
11366 11378 static int
11367 11379 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
11368 11380 {
11369 11381 uint16_t i = 0;
11370 11382 uint16_t count = 0;
11371 11383 int ret = 0;
11372 11384 mptsas_target_t *ptgt;
11373 11385 mptsas_disk_info_t *di;
11374 11386 STRUCT_DECL(mptsas_get_disk_info, gdi);
11375 11387
11376 11388 if ((mode & FREAD) == 0)
11377 11389 return (EACCES);
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
11378 11390
11379 11391 STRUCT_INIT(gdi, get_udatamodel());
11380 11392
11381 11393 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
11382 11394 mode) != 0) {
11383 11395 return (EFAULT);
11384 11396 }
11385 11397
11386 11398 /* Find out how many targets there are. */
11387 11399 mutex_enter(&mpt->m_mutex);
11388 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11389 - MPTSAS_HASH_FIRST);
11390 - while (ptgt != NULL) {
11400 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11401 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
11391 11402 count++;
11392 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11393 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11394 11403 }
11395 11404 mutex_exit(&mpt->m_mutex);
11396 11405
11397 11406 /*
11398 11407 * If we haven't been asked to copy out information on each target,
11399 11408 * then just return the count.
11400 11409 */
11401 11410 STRUCT_FSET(gdi, DiskCount, count);
11402 11411 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
11403 11412 goto copy_out;
11404 11413
11405 11414 /*
11406 11415 * If we haven't been given a large enough buffer to copy out into,
11407 11416 * let the caller know.
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
11408 11417 */
11409 11418 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
11410 11419 count * sizeof (mptsas_disk_info_t)) {
11411 11420 ret = ENOSPC;
11412 11421 goto copy_out;
11413 11422 }
11414 11423
11415 11424 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
11416 11425
11417 11426 mutex_enter(&mpt->m_mutex);
11418 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11419 - MPTSAS_HASH_FIRST);
11420 - while (ptgt != NULL) {
11427 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11428 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
11421 11429 if (i >= count) {
11422 11430 /*
11423 11431 * The number of targets changed while we weren't
11424 11432 * looking, so give up.
11425 11433 */
11434 + refhash_rele(mpt->m_targets, ptgt);
11426 11435 mutex_exit(&mpt->m_mutex);
11427 11436 kmem_free(di, count * sizeof (mptsas_disk_info_t));
11428 11437 return (EAGAIN);
11429 11438 }
11430 11439 di[i].Instance = mpt->m_instance;
11431 11440 di[i].Enclosure = ptgt->m_enclosure;
11432 11441 di[i].Slot = ptgt->m_slot_num;
11433 - di[i].SasAddress = ptgt->m_sas_wwn;
11434 -
11435 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11436 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11442 + di[i].SasAddress = ptgt->m_addr.mta_wwn;
11437 11443 i++;
11438 11444 }
11439 11445 mutex_exit(&mpt->m_mutex);
11440 11446 STRUCT_FSET(gdi, DiskCount, i);
11441 11447
11442 11448 /* Copy out the disk information to the caller. */
11443 11449 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
11444 11450 i * sizeof (mptsas_disk_info_t), mode) != 0) {
11445 11451 ret = EFAULT;
11446 11452 }
11447 11453
11448 11454 kmem_free(di, count * sizeof (mptsas_disk_info_t));
11449 11455
11450 11456 copy_out:
11451 11457 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
11452 11458 mode) != 0) {
11453 11459 ret = EFAULT;
11454 11460 }
11455 11461
11456 11462 return (ret);
11457 11463 }
11458 11464
11459 11465 static int
11460 11466 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11461 11467 int *rval)
11462 11468 {
11463 11469 int status = 0;
11464 11470 mptsas_t *mpt;
11465 11471 mptsas_update_flash_t flashdata;
11466 11472 mptsas_pass_thru_t passthru_data;
11467 11473 mptsas_adapter_data_t adapter_data;
11468 11474 mptsas_pci_info_t pci_info;
11469 11475 int copylen;
11470 11476
11471 11477 int iport_flag = 0;
11472 11478 dev_info_t *dip = NULL;
11473 11479 mptsas_phymask_t phymask = 0;
11474 11480 struct devctl_iocdata *dcp = NULL;
11475 11481 char *addr = NULL;
11476 11482 mptsas_target_t *ptgt = NULL;
11477 11483
11478 11484 *rval = MPTIOCTL_STATUS_GOOD;
11479 11485 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11480 11486 return (EPERM);
11481 11487 }
11482 11488
11483 11489 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11484 11490 if (mpt == NULL) {
11485 11491 /*
11486 11492 * Called from iport node, get the states
11487 11493 */
11488 11494 iport_flag = 1;
11489 11495 dip = mptsas_get_dip_from_dev(dev, &phymask);
11490 11496 if (dip == NULL) {
11491 11497 return (ENXIO);
11492 11498 }
11493 11499 mpt = DIP2MPT(dip);
11494 11500 }
11495 11501 /* Make sure power level is D0 before accessing registers */
11496 11502 mutex_enter(&mpt->m_mutex);
11497 11503 if (mpt->m_options & MPTSAS_OPT_PM) {
11498 11504 (void) pm_busy_component(mpt->m_dip, 0);
11499 11505 if (mpt->m_power_level != PM_LEVEL_D0) {
11500 11506 mutex_exit(&mpt->m_mutex);
11501 11507 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11502 11508 DDI_SUCCESS) {
11503 11509 mptsas_log(mpt, CE_WARN,
11504 11510 "mptsas%d: mptsas_ioctl: Raise power "
11505 11511 "request failed.", mpt->m_instance);
11506 11512 (void) pm_idle_component(mpt->m_dip, 0);
11507 11513 return (ENXIO);
11508 11514 }
11509 11515 } else {
11510 11516 mutex_exit(&mpt->m_mutex);
11511 11517 }
11512 11518 } else {
11513 11519 mutex_exit(&mpt->m_mutex);
11514 11520 }
11515 11521
11516 11522 if (iport_flag) {
11517 11523 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11518 11524 if (status != 0) {
11519 11525 goto out;
11520 11526 }
11521 11527 /*
11522 11528 * The following code control the OK2RM LED, it doesn't affect
11523 11529 * the ioctl return status.
11524 11530 */
11525 11531 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
11526 11532 (cmd == DEVCTL_DEVICE_OFFLINE)) {
11527 11533 if (ndi_dc_allochdl((void *)data, &dcp) !=
11528 11534 NDI_SUCCESS) {
11529 11535 goto out;
11530 11536 }
11531 11537 addr = ndi_dc_getaddr(dcp);
11532 11538 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
11533 11539 if (ptgt == NULL) {
11534 11540 NDBG14(("mptsas_ioctl led control: tgt %s not "
11535 11541 "found", addr));
11536 11542 ndi_dc_freehdl(dcp);
11537 11543 goto out;
11538 11544 }
11539 11545 mutex_enter(&mpt->m_mutex);
11540 11546 if (cmd == DEVCTL_DEVICE_ONLINE) {
11541 11547 ptgt->m_tgt_unconfigured = 0;
11542 11548 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
11543 11549 ptgt->m_tgt_unconfigured = 1;
11544 11550 }
11545 11551 if (cmd == DEVCTL_DEVICE_OFFLINE) {
11546 11552 ptgt->m_led_status |=
11547 11553 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
11548 11554 } else {
11549 11555 ptgt->m_led_status &=
11550 11556 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
11551 11557 }
11552 11558 (void) mptsas_flush_led_status(mpt, ptgt);
11553 11559 mutex_exit(&mpt->m_mutex);
11554 11560 ndi_dc_freehdl(dcp);
11555 11561 }
11556 11562 goto out;
11557 11563 }
11558 11564 switch (cmd) {
11559 11565 case MPTIOCTL_GET_DISK_INFO:
11560 11566 status = get_disk_info(mpt, data, mode);
11561 11567 break;
11562 11568 case MPTIOCTL_LED_CONTROL:
11563 11569 status = led_control(mpt, data, mode);
11564 11570 break;
11565 11571 case MPTIOCTL_UPDATE_FLASH:
11566 11572 if (ddi_copyin((void *)data, &flashdata,
11567 11573 sizeof (struct mptsas_update_flash), mode)) {
11568 11574 status = EFAULT;
11569 11575 break;
11570 11576 }
11571 11577
11572 11578 mutex_enter(&mpt->m_mutex);
11573 11579 if (mptsas_update_flash(mpt,
11574 11580 (caddr_t)(long)flashdata.PtrBuffer,
11575 11581 flashdata.ImageSize, flashdata.ImageType, mode)) {
11576 11582 status = EFAULT;
11577 11583 }
11578 11584
11579 11585 /*
11580 11586 * Reset the chip to start using the new
11581 11587 * firmware. Reset if failed also.
11582 11588 */
11583 11589 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11584 11590 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11585 11591 status = EFAULT;
11586 11592 }
11587 11593 mutex_exit(&mpt->m_mutex);
11588 11594 break;
11589 11595 case MPTIOCTL_PASS_THRU:
11590 11596 /*
11591 11597 * The user has requested to pass through a command to
11592 11598 * be executed by the MPT firmware. Call our routine
11593 11599 * which does this. Only allow one passthru IOCTL at
11594 11600 * one time. Other threads will block on
11595 11601 * m_passthru_mutex, which is of adaptive variant.
11596 11602 */
11597 11603 if (ddi_copyin((void *)data, &passthru_data,
11598 11604 sizeof (mptsas_pass_thru_t), mode)) {
11599 11605 status = EFAULT;
11600 11606 break;
11601 11607 }
11602 11608 mutex_enter(&mpt->m_passthru_mutex);
11603 11609 mutex_enter(&mpt->m_mutex);
11604 11610 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11605 11611 mutex_exit(&mpt->m_mutex);
11606 11612 mutex_exit(&mpt->m_passthru_mutex);
11607 11613
11608 11614 break;
11609 11615 case MPTIOCTL_GET_ADAPTER_DATA:
11610 11616 /*
11611 11617 * The user has requested to read adapter data. Call
11612 11618 * our routine which does this.
11613 11619 */
11614 11620 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11615 11621 if (ddi_copyin((void *)data, (void *)&adapter_data,
11616 11622 sizeof (mptsas_adapter_data_t), mode)) {
11617 11623 status = EFAULT;
11618 11624 break;
11619 11625 }
11620 11626 if (adapter_data.StructureLength >=
11621 11627 sizeof (mptsas_adapter_data_t)) {
11622 11628 adapter_data.StructureLength = (uint32_t)
11623 11629 sizeof (mptsas_adapter_data_t);
11624 11630 copylen = sizeof (mptsas_adapter_data_t);
11625 11631 mutex_enter(&mpt->m_mutex);
11626 11632 mptsas_read_adapter_data(mpt, &adapter_data);
11627 11633 mutex_exit(&mpt->m_mutex);
11628 11634 } else {
11629 11635 adapter_data.StructureLength = (uint32_t)
11630 11636 sizeof (mptsas_adapter_data_t);
11631 11637 copylen = sizeof (adapter_data.StructureLength);
11632 11638 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11633 11639 }
11634 11640 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11635 11641 copylen, mode) != 0) {
11636 11642 status = EFAULT;
11637 11643 }
11638 11644 break;
11639 11645 case MPTIOCTL_GET_PCI_INFO:
11640 11646 /*
11641 11647 * The user has requested to read pci info. Call
11642 11648 * our routine which does this.
11643 11649 */
11644 11650 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11645 11651 mutex_enter(&mpt->m_mutex);
11646 11652 mptsas_read_pci_info(mpt, &pci_info);
11647 11653 mutex_exit(&mpt->m_mutex);
11648 11654 if (ddi_copyout((void *)(&pci_info), (void *)data,
11649 11655 sizeof (mptsas_pci_info_t), mode) != 0) {
11650 11656 status = EFAULT;
11651 11657 }
11652 11658 break;
11653 11659 case MPTIOCTL_RESET_ADAPTER:
11654 11660 mutex_enter(&mpt->m_mutex);
11655 11661 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11656 11662 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11657 11663 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11658 11664 "failed");
11659 11665 status = EFAULT;
11660 11666 }
11661 11667 mutex_exit(&mpt->m_mutex);
11662 11668 break;
11663 11669 case MPTIOCTL_DIAG_ACTION:
11664 11670 /*
11665 11671 * The user has done a diag buffer action. Call our
11666 11672 * routine which does this. Only allow one diag action
11667 11673 * at one time.
11668 11674 */
11669 11675 mutex_enter(&mpt->m_mutex);
11670 11676 if (mpt->m_diag_action_in_progress) {
11671 11677 mutex_exit(&mpt->m_mutex);
11672 11678 return (EBUSY);
11673 11679 }
11674 11680 mpt->m_diag_action_in_progress = 1;
11675 11681 status = mptsas_diag_action(mpt,
11676 11682 (mptsas_diag_action_t *)data, mode);
11677 11683 mpt->m_diag_action_in_progress = 0;
11678 11684 mutex_exit(&mpt->m_mutex);
11679 11685 break;
11680 11686 case MPTIOCTL_EVENT_QUERY:
11681 11687 /*
11682 11688 * The user has done an event query. Call our routine
11683 11689 * which does this.
11684 11690 */
11685 11691 status = mptsas_event_query(mpt,
11686 11692 (mptsas_event_query_t *)data, mode, rval);
11687 11693 break;
11688 11694 case MPTIOCTL_EVENT_ENABLE:
11689 11695 /*
11690 11696 * The user has done an event enable. Call our routine
11691 11697 * which does this.
11692 11698 */
11693 11699 status = mptsas_event_enable(mpt,
11694 11700 (mptsas_event_enable_t *)data, mode, rval);
11695 11701 break;
11696 11702 case MPTIOCTL_EVENT_REPORT:
11697 11703 /*
11698 11704 * The user has done an event report. Call our routine
11699 11705 * which does this.
11700 11706 */
11701 11707 status = mptsas_event_report(mpt,
11702 11708 (mptsas_event_report_t *)data, mode, rval);
11703 11709 break;
11704 11710 case MPTIOCTL_REG_ACCESS:
11705 11711 /*
11706 11712 * The user has requested register access. Call our
11707 11713 * routine which does this.
11708 11714 */
11709 11715 status = mptsas_reg_access(mpt,
11710 11716 (mptsas_reg_access_t *)data, mode);
11711 11717 break;
11712 11718 default:
11713 11719 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11714 11720 rval);
11715 11721 break;
11716 11722 }
11717 11723
11718 11724 out:
11719 11725 return (status);
11720 11726 }
11721 11727
11722 11728 int
11723 11729 mptsas_restart_ioc(mptsas_t *mpt)
11724 11730 {
11725 11731 int rval = DDI_SUCCESS;
11726 11732 mptsas_target_t *ptgt = NULL;
11727 11733
11728 11734 ASSERT(mutex_owned(&mpt->m_mutex));
11729 11735
11730 11736 /*
11731 11737 * Set a flag telling I/O path that we're processing a reset. This is
|
↓ open down ↓ |
285 lines elided |
↑ open up ↑ |
11732 11738 * needed because after the reset is complete, the hash table still
11733 11739 * needs to be rebuilt. If I/Os are started before the hash table is
11734 11740 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11735 11741 * so that they can be retried.
11736 11742 */
11737 11743 mpt->m_in_reset = TRUE;
11738 11744
11739 11745 /*
11740 11746 * Set all throttles to HOLD
11741 11747 */
11742 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11743 - MPTSAS_HASH_FIRST);
11744 - while (ptgt != NULL) {
11748 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11749 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
11745 11750 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11746 -
11747 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11748 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11749 11751 }
11750 11752
11751 11753 /*
11752 11754 * Disable interrupts
11753 11755 */
11754 11756 MPTSAS_DISABLE_INTR(mpt);
11755 11757
11756 11758 /*
11757 11759 * Abort all commands: outstanding commands, commands in waitq and
11758 11760 * tx_waitq.
11759 11761 */
11760 11762 mptsas_flush_hba(mpt);
11761 11763
11762 11764 /*
11763 11765 * Reinitialize the chip.
11764 11766 */
11765 11767 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11766 11768 rval = DDI_FAILURE;
11767 11769 }
11768 11770
11769 11771 /*
11770 11772 * Enable interrupts again
11771 11773 */
11772 11774 MPTSAS_ENABLE_INTR(mpt);
11773 11775
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
11774 11776 /*
11775 11777 * If mptsas_init_chip was successful, update the driver data.
11776 11778 */
11777 11779 if (rval == DDI_SUCCESS) {
11778 11780 mptsas_update_driver_data(mpt);
11779 11781 }
11780 11782
11781 11783 /*
11782 11784 * Reset the throttles
11783 11785 */
11784 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11785 - MPTSAS_HASH_FIRST);
11786 - while (ptgt != NULL) {
11786 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
11787 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
11787 11788 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11788 -
11789 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11790 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11791 11789 }
11792 11790
11793 11791 mptsas_doneq_empty(mpt);
11794 11792 mptsas_restart_hba(mpt);
11795 11793
11796 11794 if (rval != DDI_SUCCESS) {
11797 11795 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11798 11796 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11799 11797 }
11800 11798
11801 11799 /*
11802 11800 * Clear the reset flag so that I/Os can continue.
11803 11801 */
11804 11802 mpt->m_in_reset = FALSE;
11805 11803
11806 11804 return (rval);
11807 11805 }
11808 11806
11809 11807 static int
11810 11808 mptsas_init_chip(mptsas_t *mpt, int first_time)
11811 11809 {
11812 11810 ddi_dma_cookie_t cookie;
11813 11811 uint32_t i;
11814 11812 int rval;
11815 11813
11816 11814 /*
11817 11815 * Check to see if the firmware image is valid
11818 11816 */
11819 11817 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11820 11818 MPI2_DIAG_FLASH_BAD_SIG) {
11821 11819 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11822 11820 goto fail;
11823 11821 }
11824 11822
11825 11823 /*
11826 11824 * Reset the chip
11827 11825 */
11828 11826 rval = mptsas_ioc_reset(mpt, first_time);
11829 11827 if (rval == MPTSAS_RESET_FAIL) {
11830 11828 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11831 11829 goto fail;
11832 11830 }
11833 11831
11834 11832 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11835 11833 goto mur;
11836 11834 }
11837 11835 /*
11838 11836 * Setup configuration space
11839 11837 */
11840 11838 if (mptsas_config_space_init(mpt) == FALSE) {
11841 11839 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11842 11840 "failed!");
11843 11841 goto fail;
11844 11842 }
11845 11843
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
11846 11844 /*
11847 11845 * IOC facts can change after a diag reset so all buffers that are
11848 11846 * based on these numbers must be de-allocated and re-allocated. Get
11849 11847 * new IOC facts each time chip is initialized.
11850 11848 */
11851 11849 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11852 11850 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11853 11851 goto fail;
11854 11852 }
11855 11853
11854 + mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
11855 + mptsas_target_addr_hash, mptsas_target_addr_cmp,
11856 + mptsas_target_free, sizeof (mptsas_target_t),
11857 + offsetof(mptsas_target_t, m_link),
11858 + offsetof(mptsas_target_t, m_addr), KM_SLEEP);
11859 +
11856 11860 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11857 11861 goto fail;
11858 11862 }
11859 11863 /*
11860 11864 * Allocate request message frames, reply free queue, reply descriptor
11861 11865 * post queue, and reply message frames using latest IOC facts.
11862 11866 */
11863 11867 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11864 11868 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11865 11869 goto fail;
11866 11870 }
11867 11871 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11868 11872 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11869 11873 goto fail;
11870 11874 }
11871 11875 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11872 11876 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11873 11877 goto fail;
11874 11878 }
11875 11879 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11876 11880 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11877 11881 goto fail;
11878 11882 }
11879 11883
11880 11884 mur:
11881 11885 /*
11882 11886 * Re-Initialize ioc to operational state
11883 11887 */
11884 11888 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11885 11889 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11886 11890 goto fail;
11887 11891 }
11888 11892
11889 11893 mptsas_alloc_reply_args(mpt);
11890 11894
11891 11895 /*
11892 11896 * Initialize reply post index. Reply free index is initialized after
11893 11897 * the next loop.
11894 11898 */
11895 11899 mpt->m_post_index = 0;
11896 11900
11897 11901 /*
11898 11902 * Initialize the Reply Free Queue with the physical addresses of our
11899 11903 * reply frames.
11900 11904 */
11901 11905 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11902 11906 for (i = 0; i < mpt->m_max_replies; i++) {
11903 11907 ddi_put32(mpt->m_acc_free_queue_hdl,
11904 11908 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11905 11909 cookie.dmac_address);
11906 11910 cookie.dmac_address += mpt->m_reply_frame_size;
11907 11911 }
11908 11912 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11909 11913 DDI_DMA_SYNC_FORDEV);
11910 11914
11911 11915 /*
11912 11916 * Initialize the reply free index to one past the last frame on the
11913 11917 * queue. This will signify that the queue is empty to start with.
11914 11918 */
11915 11919 mpt->m_free_index = i;
11916 11920 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11917 11921
11918 11922 /*
11919 11923 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11920 11924 */
11921 11925 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11922 11926 ddi_put64(mpt->m_acc_post_queue_hdl,
11923 11927 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11924 11928 0xFFFFFFFFFFFFFFFF);
11925 11929 }
11926 11930 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11927 11931 DDI_DMA_SYNC_FORDEV);
11928 11932
11929 11933 /*
11930 11934 * Enable ports
11931 11935 */
11932 11936 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11933 11937 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11934 11938 goto fail;
11935 11939 }
11936 11940
11937 11941 /*
11938 11942 * enable events
11939 11943 */
11940 11944 if (mptsas_ioc_enable_event_notification(mpt)) {
11941 11945 goto fail;
11942 11946 }
11943 11947
11944 11948 /*
11945 11949 * We need checks in attach and these.
11946 11950 * chip_init is called in mult. places
11947 11951 */
11948 11952
11949 11953 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11950 11954 DDI_SUCCESS) ||
11951 11955 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11952 11956 DDI_SUCCESS) ||
11953 11957 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11954 11958 DDI_SUCCESS) ||
11955 11959 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11956 11960 DDI_SUCCESS) ||
11957 11961 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11958 11962 DDI_SUCCESS)) {
11959 11963 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11960 11964 goto fail;
11961 11965 }
11962 11966
11963 11967 /* Check all acc handles */
11964 11968 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11965 11969 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11966 11970 DDI_SUCCESS) ||
11967 11971 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11968 11972 DDI_SUCCESS) ||
11969 11973 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11970 11974 DDI_SUCCESS) ||
11971 11975 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11972 11976 DDI_SUCCESS) ||
11973 11977 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11974 11978 DDI_SUCCESS) ||
11975 11979 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11976 11980 DDI_SUCCESS)) {
11977 11981 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11978 11982 goto fail;
11979 11983 }
11980 11984
11981 11985 return (DDI_SUCCESS);
11982 11986
11983 11987 fail:
11984 11988 return (DDI_FAILURE);
11985 11989 }
11986 11990
11987 11991 static int
11988 11992 mptsas_get_pci_cap(mptsas_t *mpt)
11989 11993 {
11990 11994 ushort_t caps_ptr, cap, cap_count;
11991 11995
11992 11996 if (mpt->m_config_handle == NULL)
11993 11997 return (FALSE);
11994 11998 /*
11995 11999 * Check if capabilities list is supported and if so,
11996 12000 * get initial capabilities pointer and clear bits 0,1.
11997 12001 */
11998 12002 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
11999 12003 & PCI_STAT_CAP) {
12000 12004 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12001 12005 PCI_CONF_CAP_PTR), 4);
12002 12006 } else {
12003 12007 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12004 12008 }
12005 12009
12006 12010 /*
12007 12011 * Walk capabilities if supported.
12008 12012 */
12009 12013 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12010 12014
12011 12015 /*
12012 12016 * Check that we haven't exceeded the maximum number of
12013 12017 * capabilities and that the pointer is in a valid range.
12014 12018 */
12015 12019 if (++cap_count > 48) {
12016 12020 mptsas_log(mpt, CE_WARN,
12017 12021 "too many device capabilities.\n");
12018 12022 break;
12019 12023 }
12020 12024 if (caps_ptr < 64) {
12021 12025 mptsas_log(mpt, CE_WARN,
12022 12026 "capabilities pointer 0x%x out of range.\n",
12023 12027 caps_ptr);
12024 12028 break;
12025 12029 }
12026 12030
12027 12031 /*
12028 12032 * Get next capability and check that it is valid.
12029 12033 * For now, we only support power management.
12030 12034 */
12031 12035 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12032 12036 switch (cap) {
12033 12037 case PCI_CAP_ID_PM:
12034 12038 mptsas_log(mpt, CE_NOTE,
12035 12039 "?mptsas%d supports power management.\n",
12036 12040 mpt->m_instance);
12037 12041 mpt->m_options |= MPTSAS_OPT_PM;
12038 12042
12039 12043 /* Save PMCSR offset */
12040 12044 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12041 12045 break;
12042 12046 /*
12043 12047 * The following capabilities are valid. Any others
12044 12048 * will cause a message to be logged.
12045 12049 */
12046 12050 case PCI_CAP_ID_VPD:
12047 12051 case PCI_CAP_ID_MSI:
12048 12052 case PCI_CAP_ID_PCIX:
12049 12053 case PCI_CAP_ID_PCI_E:
12050 12054 case PCI_CAP_ID_MSI_X:
12051 12055 break;
12052 12056 default:
12053 12057 mptsas_log(mpt, CE_NOTE,
12054 12058 "?mptsas%d unrecognized capability "
12055 12059 "0x%x.\n", mpt->m_instance, cap);
12056 12060 break;
12057 12061 }
12058 12062
12059 12063 /*
12060 12064 * Get next capabilities pointer and clear bits 0,1.
12061 12065 */
12062 12066 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12063 12067 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12064 12068 }
12065 12069 return (TRUE);
12066 12070 }
12067 12071
12068 12072 static int
12069 12073 mptsas_init_pm(mptsas_t *mpt)
12070 12074 {
12071 12075 char pmc_name[16];
12072 12076 char *pmc[] = {
12073 12077 NULL,
12074 12078 "0=Off (PCI D3 State)",
12075 12079 "3=On (PCI D0 State)",
12076 12080 NULL
12077 12081 };
12078 12082 uint16_t pmcsr_stat;
12079 12083
12080 12084 if (mptsas_get_pci_cap(mpt) == FALSE) {
12081 12085 return (DDI_FAILURE);
12082 12086 }
12083 12087 /*
12084 12088 * If PCI's capability does not support PM, then don't need
12085 12089 * to registe the pm-components
12086 12090 */
12087 12091 if (!(mpt->m_options & MPTSAS_OPT_PM))
12088 12092 return (DDI_SUCCESS);
12089 12093 /*
12090 12094 * If power management is supported by this chip, create
12091 12095 * pm-components property for the power management framework
12092 12096 */
12093 12097 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12094 12098 pmc[0] = pmc_name;
12095 12099 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12096 12100 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12097 12101 mpt->m_options &= ~MPTSAS_OPT_PM;
12098 12102 mptsas_log(mpt, CE_WARN,
12099 12103 "mptsas%d: pm-component property creation failed.",
12100 12104 mpt->m_instance);
12101 12105 return (DDI_FAILURE);
12102 12106 }
12103 12107
12104 12108 /*
12105 12109 * Power on device.
12106 12110 */
12107 12111 (void) pm_busy_component(mpt->m_dip, 0);
12108 12112 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12109 12113 mpt->m_pmcsr_offset);
12110 12114 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12111 12115 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12112 12116 mpt->m_instance);
12113 12117 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12114 12118 PCI_PMCSR_D0);
12115 12119 }
12116 12120 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12117 12121 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12118 12122 return (DDI_FAILURE);
12119 12123 }
12120 12124 mpt->m_power_level = PM_LEVEL_D0;
12121 12125 /*
12122 12126 * Set pm idle delay.
12123 12127 */
12124 12128 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12125 12129 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12126 12130
12127 12131 return (DDI_SUCCESS);
12128 12132 }
12129 12133
12130 12134 static int
12131 12135 mptsas_register_intrs(mptsas_t *mpt)
12132 12136 {
12133 12137 dev_info_t *dip;
12134 12138 int intr_types;
12135 12139
12136 12140 dip = mpt->m_dip;
12137 12141
12138 12142 /* Get supported interrupt types */
12139 12143 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12140 12144 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12141 12145 "failed\n");
12142 12146 return (FALSE);
12143 12147 }
12144 12148
12145 12149 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12146 12150
12147 12151 /*
12148 12152 * Try MSI, but fall back to FIXED
12149 12153 */
12150 12154 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12151 12155 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12152 12156 NDBG0(("Using MSI interrupt type"));
12153 12157 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12154 12158 return (TRUE);
12155 12159 }
12156 12160 }
12157 12161 if (intr_types & DDI_INTR_TYPE_FIXED) {
12158 12162 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12159 12163 NDBG0(("Using FIXED interrupt type"));
12160 12164 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12161 12165 return (TRUE);
12162 12166 } else {
12163 12167 NDBG0(("FIXED interrupt registration failed"));
12164 12168 return (FALSE);
12165 12169 }
12166 12170 }
12167 12171
12168 12172 return (FALSE);
12169 12173 }
12170 12174
12171 12175 static void
12172 12176 mptsas_unregister_intrs(mptsas_t *mpt)
12173 12177 {
12174 12178 mptsas_rem_intrs(mpt);
12175 12179 }
12176 12180
12177 12181 /*
12178 12182 * mptsas_add_intrs:
12179 12183 *
12180 12184 * Register FIXED or MSI interrupts.
12181 12185 */
12182 12186 static int
12183 12187 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12184 12188 {
12185 12189 dev_info_t *dip = mpt->m_dip;
12186 12190 int avail, actual, count = 0;
12187 12191 int i, flag, ret;
12188 12192
12189 12193 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12190 12194
12191 12195 /* Get number of interrupts */
12192 12196 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12193 12197 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12194 12198 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12195 12199 "ret %d count %d\n", ret, count);
12196 12200
12197 12201 return (DDI_FAILURE);
12198 12202 }
12199 12203
12200 12204 /* Get number of available interrupts */
12201 12205 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12202 12206 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12203 12207 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12204 12208 "ret %d avail %d\n", ret, avail);
12205 12209
12206 12210 return (DDI_FAILURE);
12207 12211 }
12208 12212
12209 12213 if (avail < count) {
12210 12214 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12211 12215 "navail() returned %d", count, avail);
12212 12216 }
12213 12217
12214 12218 /* Mpt only have one interrupt routine */
12215 12219 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12216 12220 count = 1;
12217 12221 }
12218 12222
12219 12223 /* Allocate an array of interrupt handles */
12220 12224 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12221 12225 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12222 12226
12223 12227 flag = DDI_INTR_ALLOC_NORMAL;
12224 12228
12225 12229 /* call ddi_intr_alloc() */
12226 12230 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12227 12231 count, &actual, flag);
12228 12232
12229 12233 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12230 12234 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12231 12235 ret);
12232 12236 kmem_free(mpt->m_htable, mpt->m_intr_size);
12233 12237 return (DDI_FAILURE);
12234 12238 }
12235 12239
12236 12240 /* use interrupt count returned or abort? */
12237 12241 if (actual < count) {
12238 12242 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12239 12243 count, actual);
12240 12244 }
12241 12245
12242 12246 mpt->m_intr_cnt = actual;
12243 12247
12244 12248 /*
12245 12249 * Get priority for first msi, assume remaining are all the same
12246 12250 */
12247 12251 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12248 12252 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12249 12253 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12250 12254
12251 12255 /* Free already allocated intr */
12252 12256 for (i = 0; i < actual; i++) {
12253 12257 (void) ddi_intr_free(mpt->m_htable[i]);
12254 12258 }
12255 12259
12256 12260 kmem_free(mpt->m_htable, mpt->m_intr_size);
12257 12261 return (DDI_FAILURE);
12258 12262 }
12259 12263
12260 12264 /* Test for high level mutex */
12261 12265 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12262 12266 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12263 12267 "Hi level interrupt not supported\n");
12264 12268
12265 12269 /* Free already allocated intr */
12266 12270 for (i = 0; i < actual; i++) {
12267 12271 (void) ddi_intr_free(mpt->m_htable[i]);
12268 12272 }
12269 12273
12270 12274 kmem_free(mpt->m_htable, mpt->m_intr_size);
12271 12275 return (DDI_FAILURE);
12272 12276 }
12273 12277
12274 12278 /* Call ddi_intr_add_handler() */
12275 12279 for (i = 0; i < actual; i++) {
12276 12280 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12277 12281 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12278 12282 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12279 12283 "failed %d\n", ret);
12280 12284
12281 12285 /* Free already allocated intr */
12282 12286 for (i = 0; i < actual; i++) {
12283 12287 (void) ddi_intr_free(mpt->m_htable[i]);
12284 12288 }
12285 12289
12286 12290 kmem_free(mpt->m_htable, mpt->m_intr_size);
12287 12291 return (DDI_FAILURE);
12288 12292 }
12289 12293 }
12290 12294
12291 12295 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12292 12296 != DDI_SUCCESS) {
12293 12297 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12294 12298
12295 12299 /* Free already allocated intr */
12296 12300 for (i = 0; i < actual; i++) {
12297 12301 (void) ddi_intr_free(mpt->m_htable[i]);
12298 12302 }
12299 12303
12300 12304 kmem_free(mpt->m_htable, mpt->m_intr_size);
12301 12305 return (DDI_FAILURE);
12302 12306 }
12303 12307
12304 12308 /*
12305 12309 * Enable interrupts
12306 12310 */
12307 12311 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12308 12312 /* Call ddi_intr_block_enable() for MSI interrupts */
12309 12313 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12310 12314 } else {
12311 12315 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12312 12316 for (i = 0; i < mpt->m_intr_cnt; i++) {
12313 12317 (void) ddi_intr_enable(mpt->m_htable[i]);
12314 12318 }
12315 12319 }
12316 12320 return (DDI_SUCCESS);
12317 12321 }
12318 12322
12319 12323 /*
12320 12324 * mptsas_rem_intrs:
12321 12325 *
12322 12326 * Unregister FIXED or MSI interrupts
12323 12327 */
12324 12328 static void
12325 12329 mptsas_rem_intrs(mptsas_t *mpt)
12326 12330 {
12327 12331 int i;
12328 12332
12329 12333 NDBG6(("mptsas_rem_intrs"));
12330 12334
12331 12335 /* Disable all interrupts */
12332 12336 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12333 12337 /* Call ddi_intr_block_disable() */
12334 12338 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12335 12339 } else {
12336 12340 for (i = 0; i < mpt->m_intr_cnt; i++) {
12337 12341 (void) ddi_intr_disable(mpt->m_htable[i]);
12338 12342 }
12339 12343 }
12340 12344
12341 12345 /* Call ddi_intr_remove_handler() */
12342 12346 for (i = 0; i < mpt->m_intr_cnt; i++) {
12343 12347 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12344 12348 (void) ddi_intr_free(mpt->m_htable[i]);
12345 12349 }
12346 12350
12347 12351 kmem_free(mpt->m_htable, mpt->m_intr_size);
12348 12352 }
12349 12353
12350 12354 /*
12351 12355 * The IO fault service error handling callback function
12352 12356 */
12353 12357 /*ARGSUSED*/
12354 12358 static int
12355 12359 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12356 12360 {
12357 12361 /*
12358 12362 * as the driver can always deal with an error in any dma or
12359 12363 * access handle, we can just return the fme_status value.
12360 12364 */
12361 12365 pci_ereport_post(dip, err, NULL);
12362 12366 return (err->fme_status);
12363 12367 }
12364 12368
12365 12369 /*
12366 12370 * mptsas_fm_init - initialize fma capabilities and register with IO
12367 12371 * fault services.
12368 12372 */
12369 12373 static void
12370 12374 mptsas_fm_init(mptsas_t *mpt)
12371 12375 {
12372 12376 /*
12373 12377 * Need to change iblock to priority for new MSI intr
12374 12378 */
12375 12379 ddi_iblock_cookie_t fm_ibc;
12376 12380
12377 12381 /* Only register with IO Fault Services if we have some capability */
12378 12382 if (mpt->m_fm_capabilities) {
12379 12383 /* Adjust access and dma attributes for FMA */
12380 12384 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12381 12385 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12382 12386 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12383 12387
12384 12388 /*
12385 12389 * Register capabilities with IO Fault Services.
12386 12390 * mpt->m_fm_capabilities will be updated to indicate
12387 12391 * capabilities actually supported (not requested.)
12388 12392 */
12389 12393 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12390 12394
12391 12395 /*
12392 12396 * Initialize pci ereport capabilities if ereport
12393 12397 * capable (should always be.)
12394 12398 */
12395 12399 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12396 12400 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12397 12401 pci_ereport_setup(mpt->m_dip);
12398 12402 }
12399 12403
12400 12404 /*
12401 12405 * Register error callback if error callback capable.
12402 12406 */
12403 12407 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12404 12408 ddi_fm_handler_register(mpt->m_dip,
12405 12409 mptsas_fm_error_cb, (void *) mpt);
12406 12410 }
12407 12411 }
12408 12412 }
12409 12413
12410 12414 /*
12411 12415 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12412 12416 * fault services.
12413 12417 *
12414 12418 */
12415 12419 static void
12416 12420 mptsas_fm_fini(mptsas_t *mpt)
12417 12421 {
12418 12422 /* Only unregister FMA capabilities if registered */
12419 12423 if (mpt->m_fm_capabilities) {
12420 12424
12421 12425 /*
12422 12426 * Un-register error callback if error callback capable.
12423 12427 */
12424 12428
12425 12429 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12426 12430 ddi_fm_handler_unregister(mpt->m_dip);
12427 12431 }
12428 12432
12429 12433 /*
12430 12434 * Release any resources allocated by pci_ereport_setup()
12431 12435 */
12432 12436
12433 12437 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12434 12438 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12435 12439 pci_ereport_teardown(mpt->m_dip);
12436 12440 }
12437 12441
12438 12442 /* Unregister from IO Fault Services */
12439 12443 ddi_fm_fini(mpt->m_dip);
12440 12444
12441 12445 /* Adjust access and dma attributes for FMA */
12442 12446 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12443 12447 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12444 12448 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12445 12449
12446 12450 }
12447 12451 }
12448 12452
12449 12453 int
12450 12454 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12451 12455 {
12452 12456 ddi_fm_error_t de;
12453 12457
12454 12458 if (handle == NULL)
12455 12459 return (DDI_FAILURE);
12456 12460 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12457 12461 return (de.fme_status);
12458 12462 }
12459 12463
12460 12464 int
12461 12465 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12462 12466 {
12463 12467 ddi_fm_error_t de;
12464 12468
12465 12469 if (handle == NULL)
12466 12470 return (DDI_FAILURE);
12467 12471 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12468 12472 return (de.fme_status);
12469 12473 }
12470 12474
12471 12475 void
12472 12476 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12473 12477 {
12474 12478 uint64_t ena;
12475 12479 char buf[FM_MAX_CLASS];
12476 12480
12477 12481 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12478 12482 ena = fm_ena_generate(0, FM_ENA_FMT1);
12479 12483 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12480 12484 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12481 12485 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12482 12486 }
12483 12487 }
|
↓ open down ↓ |
618 lines elided |
↑ open up ↑ |
12484 12488
12485 12489 static int
12486 12490 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12487 12491 uint16_t *dev_handle, mptsas_target_t **pptgt)
12488 12492 {
12489 12493 int rval;
12490 12494 uint32_t dev_info;
12491 12495 uint64_t sas_wwn;
12492 12496 mptsas_phymask_t phymask;
12493 12497 uint8_t physport, phynum, config, disk;
12494 - mptsas_slots_t *slots = mpt->m_active;
12495 12498 uint64_t devicename;
12496 12499 uint16_t pdev_hdl;
12497 12500 mptsas_target_t *tmp_tgt = NULL;
12498 12501 uint16_t bay_num, enclosure;
12499 12502
12500 12503 ASSERT(*pptgt == NULL);
12501 12504
12502 12505 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12503 12506 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12504 12507 &bay_num, &enclosure);
12505 12508 if (rval != DDI_SUCCESS) {
12506 12509 rval = DEV_INFO_FAIL_PAGE0;
12507 12510 return (rval);
12508 12511 }
12509 12512
12510 12513 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
12511 12514 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12512 12515 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12513 12516 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12514 12517 return (rval);
12515 12518 }
12516 12519
12517 12520 /*
12518 12521 * Check if the dev handle is for a Phys Disk. If so, set return value
12519 12522 * and exit. Don't add Phys Disks to hash.
12520 12523 */
12521 - for (config = 0; config < slots->m_num_raid_configs; config++) {
12524 + for (config = 0; config < mpt->m_num_raid_configs; config++) {
12522 12525 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12523 - if (*dev_handle == slots->m_raidconfig[config].
12526 + if (*dev_handle == mpt->m_raidconfig[config].
12524 12527 m_physdisk_devhdl[disk]) {
12525 12528 rval = DEV_INFO_PHYS_DISK;
12526 12529 return (rval);
12527 12530 }
12528 12531 }
12529 12532 }
12530 12533
12531 12534 /*
12532 12535 * Get SATA Device Name from SAS device page0 for
12533 - * sata device, if device name doesn't exist, set m_sas_wwn to
12536 + * sata device, if device name doesn't exist, set mta_wwn to
12534 12537 * 0 for direct attached SATA. For the device behind the expander
12535 12538 * we still can use STP address assigned by expander.
12536 12539 */
12537 12540 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12538 12541 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12539 12542 mutex_exit(&mpt->m_mutex);
12540 12543 /* alloc a tmp_tgt to send the cmd */
12541 12544 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12542 12545 KM_SLEEP);
12543 12546 tmp_tgt->m_devhdl = *dev_handle;
12544 12547 tmp_tgt->m_deviceinfo = dev_info;
12545 12548 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12546 12549 tmp_tgt->m_qfull_retry_interval =
12547 12550 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12548 12551 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12549 12552 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
12550 12553 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12551 12554 mutex_enter(&mpt->m_mutex);
12552 12555 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12553 12556 sas_wwn = devicename;
12554 12557 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12555 12558 sas_wwn = 0;
12556 12559 }
12557 12560 }
12558 12561
12559 12562 phymask = mptsas_physport_to_phymask(mpt, physport);
12560 - *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12563 + *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
12561 12564 dev_info, phymask, phynum);
12562 12565 if (*pptgt == NULL) {
12563 12566 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12564 12567 "structure!");
12565 12568 rval = DEV_INFO_FAIL_ALLOC;
12566 12569 return (rval);
12567 12570 }
12568 12571 (*pptgt)->m_enclosure = enclosure;
12569 12572 (*pptgt)->m_slot_num = bay_num;
12570 12573 return (DEV_INFO_SUCCESS);
12571 12574 }
12572 12575
12573 12576 uint64_t
12574 12577 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12575 12578 {
12576 12579 uint64_t sata_guid = 0, *pwwn = NULL;
12577 12580 int target = ptgt->m_devhdl;
12578 12581 uchar_t *inq83 = NULL;
12579 12582 int inq83_len = 0xFF;
12580 12583 uchar_t *dblk = NULL;
12581 12584 int inq83_retry = 3;
12582 12585 int rval = DDI_FAILURE;
12583 12586
12584 12587 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12585 12588
12586 12589 inq83_retry:
12587 12590 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12588 12591 inq83_len, NULL, 1);
12589 12592 if (rval != DDI_SUCCESS) {
12590 12593 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12591 12594 "0x83 for target:%x, lun:%x failed!", target, lun);
12592 12595 goto out;
12593 12596 }
12594 12597 /* According to SAT2, the first descriptor is logic unit name */
12595 12598 dblk = &inq83[4];
12596 12599 if ((dblk[1] & 0x30) != 0) {
12597 12600 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12598 12601 goto out;
12599 12602 }
12600 12603 pwwn = (uint64_t *)(void *)(&dblk[4]);
12601 12604 if ((dblk[4] & 0xf0) == 0x50) {
12602 12605 sata_guid = BE_64(*pwwn);
12603 12606 goto out;
12604 12607 } else if (dblk[4] == 'A') {
12605 12608 NDBG20(("SATA drive has no NAA format GUID."));
12606 12609 goto out;
12607 12610 } else {
12608 12611 /* The data is not ready, wait and retry */
12609 12612 inq83_retry--;
12610 12613 if (inq83_retry <= 0) {
12611 12614 goto out;
12612 12615 }
12613 12616 NDBG20(("The GUID is not ready, retry..."));
12614 12617 delay(1 * drv_usectohz(1000000));
12615 12618 goto inq83_retry;
12616 12619 }
12617 12620 out:
12618 12621 kmem_free(inq83, inq83_len);
12619 12622 return (sata_guid);
12620 12623 }
12621 12624
12622 12625 static int
12623 12626 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12624 12627 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12625 12628 {
12626 12629 uchar_t cdb[CDB_GROUP0];
12627 12630 struct scsi_address ap;
12628 12631 struct buf *data_bp = NULL;
12629 12632 int resid = 0;
12630 12633 int ret = DDI_FAILURE;
12631 12634
12632 12635 ASSERT(len <= 0xffff);
12633 12636
12634 12637 ap.a_target = MPTSAS_INVALID_DEVHDL;
12635 12638 ap.a_lun = (uchar_t)(lun);
12636 12639 ap.a_hba_tran = mpt->m_tran;
12637 12640
12638 12641 data_bp = scsi_alloc_consistent_buf(&ap,
12639 12642 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12640 12643 if (data_bp == NULL) {
12641 12644 return (ret);
12642 12645 }
12643 12646 bzero(cdb, CDB_GROUP0);
12644 12647 cdb[0] = SCMD_INQUIRY;
12645 12648 cdb[1] = evpd;
12646 12649 cdb[2] = page;
12647 12650 cdb[3] = (len & 0xff00) >> 8;
12648 12651 cdb[4] = (len & 0x00ff);
12649 12652 cdb[5] = 0;
12650 12653
12651 12654 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12652 12655 &resid);
12653 12656 if (ret == DDI_SUCCESS) {
12654 12657 if (reallen) {
12655 12658 *reallen = len - resid;
12656 12659 }
12657 12660 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12658 12661 }
12659 12662 if (data_bp) {
12660 12663 scsi_free_consistent_buf(data_bp);
12661 12664 }
12662 12665 return (ret);
12663 12666 }
12664 12667
12665 12668 static int
12666 12669 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12667 12670 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12668 12671 int *resid)
12669 12672 {
12670 12673 struct scsi_pkt *pktp = NULL;
12671 12674 scsi_hba_tran_t *tran_clone = NULL;
12672 12675 mptsas_tgt_private_t *tgt_private = NULL;
12673 12676 int ret = DDI_FAILURE;
12674 12677
12675 12678 /*
12676 12679 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12677 12680 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12678 12681 * to simulate the cmds from sd
12679 12682 */
12680 12683 tran_clone = kmem_alloc(
12681 12684 sizeof (scsi_hba_tran_t), KM_SLEEP);
12682 12685 if (tran_clone == NULL) {
12683 12686 goto out;
12684 12687 }
12685 12688 bcopy((caddr_t)mpt->m_tran,
12686 12689 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12687 12690 tgt_private = kmem_alloc(
12688 12691 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12689 12692 if (tgt_private == NULL) {
12690 12693 goto out;
12691 12694 }
12692 12695 tgt_private->t_lun = ap->a_lun;
12693 12696 tgt_private->t_private = ptgt;
12694 12697 tran_clone->tran_tgt_private = tgt_private;
12695 12698 ap->a_hba_tran = tran_clone;
12696 12699
12697 12700 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12698 12701 data_bp, cdblen, sizeof (struct scsi_arq_status),
12699 12702 0, PKT_CONSISTENT, NULL, NULL);
12700 12703 if (pktp == NULL) {
12701 12704 goto out;
12702 12705 }
12703 12706 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12704 12707 pktp->pkt_flags = FLAG_NOPARITY;
12705 12708 if (scsi_poll(pktp) < 0) {
12706 12709 goto out;
12707 12710 }
12708 12711 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12709 12712 goto out;
12710 12713 }
12711 12714 if (resid != NULL) {
12712 12715 *resid = pktp->pkt_resid;
12713 12716 }
12714 12717
12715 12718 ret = DDI_SUCCESS;
12716 12719 out:
12717 12720 if (pktp) {
12718 12721 scsi_destroy_pkt(pktp);
12719 12722 }
12720 12723 if (tran_clone) {
12721 12724 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12722 12725 }
12723 12726 if (tgt_private) {
12724 12727 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12725 12728 }
12726 12729 return (ret);
12727 12730 }
12728 12731 static int
12729 12732 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12730 12733 {
12731 12734 char *cp = NULL;
12732 12735 char *ptr = NULL;
12733 12736 size_t s = 0;
12734 12737 char *wwid_str = NULL;
12735 12738 char *lun_str = NULL;
12736 12739 long lunnum;
12737 12740 long phyid = -1;
12738 12741 int rc = DDI_FAILURE;
12739 12742
12740 12743 ptr = name;
12741 12744 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12742 12745 ptr++;
12743 12746 if ((cp = strchr(ptr, ',')) == NULL) {
12744 12747 return (DDI_FAILURE);
12745 12748 }
12746 12749
12747 12750 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12748 12751 s = (uintptr_t)cp - (uintptr_t)ptr;
12749 12752
12750 12753 bcopy(ptr, wwid_str, s);
12751 12754 wwid_str[s] = '\0';
12752 12755
12753 12756 ptr = ++cp;
12754 12757
12755 12758 if ((cp = strchr(ptr, '\0')) == NULL) {
12756 12759 goto out;
12757 12760 }
12758 12761 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12759 12762 s = (uintptr_t)cp - (uintptr_t)ptr;
12760 12763
12761 12764 bcopy(ptr, lun_str, s);
12762 12765 lun_str[s] = '\0';
12763 12766
12764 12767 if (name[0] == 'p') {
12765 12768 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12766 12769 } else {
12767 12770 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12768 12771 }
12769 12772 if (rc != DDI_SUCCESS)
12770 12773 goto out;
12771 12774
12772 12775 if (phyid != -1) {
12773 12776 ASSERT(phyid < MPTSAS_MAX_PHYS);
12774 12777 *phy = (uint8_t)phyid;
12775 12778 }
12776 12779 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12777 12780 if (rc != 0)
12778 12781 goto out;
12779 12782
12780 12783 *lun = (int)lunnum;
12781 12784 rc = DDI_SUCCESS;
12782 12785 out:
12783 12786 if (wwid_str)
12784 12787 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12785 12788 if (lun_str)
12786 12789 kmem_free(lun_str, SCSI_MAXNAMELEN);
12787 12790
12788 12791 return (rc);
12789 12792 }
12790 12793
12791 12794 /*
12792 12795 * mptsas_parse_smp_name() is to parse sas wwn string
12793 12796 * which format is "wWWN"
12794 12797 */
12795 12798 static int
12796 12799 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12797 12800 {
12798 12801 char *ptr = name;
12799 12802
12800 12803 if (*ptr != 'w') {
12801 12804 return (DDI_FAILURE);
12802 12805 }
12803 12806
12804 12807 ptr++;
12805 12808 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12806 12809 return (DDI_FAILURE);
12807 12810 }
12808 12811 return (DDI_SUCCESS);
12809 12812 }
12810 12813
12811 12814 static int
12812 12815 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12813 12816 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12814 12817 {
12815 12818 int ret = NDI_FAILURE;
12816 12819 int circ = 0;
12817 12820 int circ1 = 0;
12818 12821 mptsas_t *mpt;
12819 12822 char *ptr = NULL;
12820 12823 char *devnm = NULL;
12821 12824 uint64_t wwid = 0;
12822 12825 uint8_t phy = 0xFF;
12823 12826 int lun = 0;
12824 12827 uint_t mflags = flag;
12825 12828 int bconfig = TRUE;
12826 12829
12827 12830 if (scsi_hba_iport_unit_address(pdip) == 0) {
12828 12831 return (DDI_FAILURE);
12829 12832 }
12830 12833
12831 12834 mpt = DIP2MPT(pdip);
12832 12835 if (!mpt) {
12833 12836 return (DDI_FAILURE);
12834 12837 }
12835 12838 /*
12836 12839 * Hold the nexus across the bus_config
12837 12840 */
12838 12841 ndi_devi_enter(scsi_vhci_dip, &circ);
12839 12842 ndi_devi_enter(pdip, &circ1);
12840 12843 switch (op) {
12841 12844 case BUS_CONFIG_ONE:
12842 12845 /* parse wwid/target name out of name given */
12843 12846 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12844 12847 ret = NDI_FAILURE;
12845 12848 break;
12846 12849 }
12847 12850 ptr++;
12848 12851 if (strncmp((char *)arg, "smp", 3) == 0) {
12849 12852 /*
12850 12853 * This is a SMP target device
12851 12854 */
12852 12855 ret = mptsas_parse_smp_name(ptr, &wwid);
12853 12856 if (ret != DDI_SUCCESS) {
12854 12857 ret = NDI_FAILURE;
12855 12858 break;
12856 12859 }
12857 12860 ret = mptsas_config_smp(pdip, wwid, childp);
12858 12861 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12859 12862 /*
12860 12863 * OBP could pass down a non-canonical form
12861 12864 * bootpath without LUN part when LUN is 0.
12862 12865 * So driver need adjust the string.
12863 12866 */
12864 12867 if (strchr(ptr, ',') == NULL) {
12865 12868 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12866 12869 (void) sprintf(devnm, "%s,0", (char *)arg);
12867 12870 ptr = strchr(devnm, '@');
12868 12871 ptr++;
12869 12872 }
12870 12873
12871 12874 /*
12872 12875 * The device path is wWWID format and the device
12873 12876 * is not SMP target device.
12874 12877 */
12875 12878 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12876 12879 if (ret != DDI_SUCCESS) {
12877 12880 ret = NDI_FAILURE;
12878 12881 break;
12879 12882 }
12880 12883 *childp = NULL;
12881 12884 if (ptr[0] == 'w') {
12882 12885 ret = mptsas_config_one_addr(pdip, wwid,
12883 12886 lun, childp);
12884 12887 } else if (ptr[0] == 'p') {
12885 12888 ret = mptsas_config_one_phy(pdip, phy, lun,
12886 12889 childp);
12887 12890 }
12888 12891
12889 12892 /*
12890 12893 * If this is CD/DVD device in OBP path, the
12891 12894 * ndi_busop_bus_config can be skipped as config one
12892 12895 * operation is done above.
12893 12896 */
12894 12897 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12895 12898 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12896 12899 (strncmp((char *)arg, "disk", 4) == 0)) {
12897 12900 bconfig = FALSE;
12898 12901 ndi_hold_devi(*childp);
12899 12902 }
12900 12903 } else {
12901 12904 ret = NDI_FAILURE;
12902 12905 break;
12903 12906 }
12904 12907
12905 12908 /*
12906 12909 * DDI group instructed us to use this flag.
12907 12910 */
12908 12911 mflags |= NDI_MDI_FALLBACK;
12909 12912 break;
12910 12913 case BUS_CONFIG_DRIVER:
12911 12914 case BUS_CONFIG_ALL:
12912 12915 mptsas_config_all(pdip);
12913 12916 ret = NDI_SUCCESS;
12914 12917 break;
12915 12918 }
12916 12919
12917 12920 if ((ret == NDI_SUCCESS) && bconfig) {
12918 12921 ret = ndi_busop_bus_config(pdip, mflags, op,
12919 12922 (devnm == NULL) ? arg : devnm, childp, 0);
12920 12923 }
12921 12924
12922 12925 ndi_devi_exit(pdip, circ1);
12923 12926 ndi_devi_exit(scsi_vhci_dip, circ);
12924 12927 if (devnm != NULL)
12925 12928 kmem_free(devnm, SCSI_MAXNAMELEN);
12926 12929 return (ret);
12927 12930 }
12928 12931
12929 12932 static int
12930 12933 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12931 12934 mptsas_target_t *ptgt)
12932 12935 {
12933 12936 int rval = DDI_FAILURE;
12934 12937 struct scsi_inquiry *sd_inq = NULL;
12935 12938 mptsas_t *mpt = DIP2MPT(pdip);
12936 12939
12937 12940 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12938 12941
12939 12942 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12940 12943 SUN_INQSIZE, 0, (uchar_t)0);
12941 12944
12942 12945 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12943 12946 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12944 12947 } else {
12945 12948 rval = DDI_FAILURE;
12946 12949 }
12947 12950
12948 12951 kmem_free(sd_inq, SUN_INQSIZE);
12949 12952 return (rval);
12950 12953 }
12951 12954
12952 12955 static int
12953 12956 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12954 12957 dev_info_t **lundip)
12955 12958 {
12956 12959 int rval;
12957 12960 mptsas_t *mpt = DIP2MPT(pdip);
12958 12961 int phymask;
12959 12962 mptsas_target_t *ptgt = NULL;
12960 12963
12961 12964 /*
12962 12965 * Get the physical port associated to the iport
12963 12966 */
12964 12967 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12965 12968 "phymask", 0);
12966 12969
12967 12970 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12968 12971 if (ptgt == NULL) {
12969 12972 /*
12970 12973 * didn't match any device by searching
12971 12974 */
12972 12975 return (DDI_FAILURE);
12973 12976 }
12974 12977 /*
12975 12978 * If the LUN already exists and the status is online,
12976 12979 * we just return the pointer to dev_info_t directly.
12977 12980 * For the mdi_pathinfo node, we'll handle it in
12978 12981 * mptsas_create_virt_lun()
12979 12982 * TODO should be also in mptsas_handle_dr
|
↓ open down ↓ |
409 lines elided |
↑ open up ↑ |
12980 12983 */
12981 12984
12982 12985 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12983 12986 if (*lundip != NULL) {
12984 12987 /*
12985 12988 * TODO Another senario is, we hotplug the same disk
12986 12989 * on the same slot, the devhdl changed, is this
12987 12990 * possible?
12988 12991 * tgt_private->t_private != ptgt
12989 12992 */
12990 - if (sasaddr != ptgt->m_sas_wwn) {
12993 + if (sasaddr != ptgt->m_addr.mta_wwn) {
12991 12994 /*
12992 12995 * The device has changed although the devhdl is the
12993 12996 * same (Enclosure mapping mode, change drive on the
12994 12997 * same slot)
12995 12998 */
12996 12999 return (DDI_FAILURE);
12997 13000 }
12998 13001 return (DDI_SUCCESS);
12999 13002 }
13000 13003
13001 13004 if (phymask == 0) {
13002 13005 /*
13003 13006 * Configure IR volume
13004 13007 */
13005 13008 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13006 13009 return (rval);
13007 13010 }
13008 13011 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
13009 13012
13010 13013 return (rval);
13011 13014 }
13012 13015
13013 13016 static int
13014 13017 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13015 13018 dev_info_t **lundip)
13016 13019 {
13017 13020 int rval;
13018 13021 mptsas_t *mpt = DIP2MPT(pdip);
13019 - int phymask;
13022 + mptsas_phymask_t phymask;
13020 13023 mptsas_target_t *ptgt = NULL;
13021 13024
13022 13025 /*
13023 13026 * Get the physical port associated to the iport
13024 13027 */
13025 - phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13028 + phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13026 13029 "phymask", 0);
13027 13030
13028 13031 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13029 13032 if (ptgt == NULL) {
13030 13033 /*
13031 13034 * didn't match any device by searching
13032 13035 */
13033 13036 return (DDI_FAILURE);
13034 13037 }
13035 13038
13036 13039 /*
13037 13040 * If the LUN already exists and the status is online,
13038 13041 * we just return the pointer to dev_info_t directly.
13039 13042 * For the mdi_pathinfo node, we'll handle it in
13040 13043 * mptsas_create_virt_lun().
13041 13044 */
13042 13045
13043 13046 *lundip = mptsas_find_child_phy(pdip, phy);
13044 13047 if (*lundip != NULL) {
13045 13048 return (DDI_SUCCESS);
13046 13049 }
13047 13050
13048 13051 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13049 13052
13050 13053 return (rval);
13051 13054 }
13052 13055
13053 13056 static int
13054 13057 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13055 13058 uint8_t *lun_addr_type)
13056 13059 {
13057 13060 uint32_t lun_idx = 0;
13058 13061
13059 13062 ASSERT(lun_num != NULL);
13060 13063 ASSERT(lun_addr_type != NULL);
13061 13064
13062 13065 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13063 13066 /* determine report luns addressing type */
13064 13067 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13065 13068 /*
13066 13069 * Vendors in the field have been found to be concatenating
13067 13070 * bus/target/lun to equal the complete lun value instead
13068 13071 * of switching to flat space addressing
13069 13072 */
13070 13073 /* 00b - peripheral device addressing method */
13071 13074 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13072 13075 /* FALLTHRU */
13073 13076 /* 10b - logical unit addressing method */
13074 13077 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13075 13078 /* FALLTHRU */
13076 13079 /* 01b - flat space addressing method */
13077 13080 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13078 13081 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13079 13082 *lun_addr_type = (buf[lun_idx] &
13080 13083 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13081 13084 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13082 13085 *lun_num |= buf[lun_idx + 1];
13083 13086 return (DDI_SUCCESS);
13084 13087 default:
13085 13088 return (DDI_FAILURE);
13086 13089 }
13087 13090 }
13088 13091
13089 13092 static int
13090 13093 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13091 13094 {
13092 13095 struct buf *repluns_bp = NULL;
13093 13096 struct scsi_address ap;
13094 13097 uchar_t cdb[CDB_GROUP5];
13095 13098 int ret = DDI_FAILURE;
13096 13099 int retry = 0;
13097 13100 int lun_list_len = 0;
13098 13101 uint16_t lun_num = 0;
13099 13102 uint8_t lun_addr_type = 0;
13100 13103 uint32_t lun_cnt = 0;
13101 13104 uint32_t lun_total = 0;
|
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
13102 13105 dev_info_t *cdip = NULL;
13103 13106 uint16_t *saved_repluns = NULL;
13104 13107 char *buffer = NULL;
13105 13108 int buf_len = 128;
13106 13109 mptsas_t *mpt = DIP2MPT(pdip);
13107 13110 uint64_t sas_wwn = 0;
13108 13111 uint8_t phy = 0xFF;
13109 13112 uint32_t dev_info = 0;
13110 13113
13111 13114 mutex_enter(&mpt->m_mutex);
13112 - sas_wwn = ptgt->m_sas_wwn;
13115 + sas_wwn = ptgt->m_addr.mta_wwn;
13113 13116 phy = ptgt->m_phynum;
13114 13117 dev_info = ptgt->m_deviceinfo;
13115 13118 mutex_exit(&mpt->m_mutex);
13116 13119
13117 13120 if (sas_wwn == 0) {
13118 13121 /*
13119 13122 * It's a SATA without Device Name
13120 13123 * So don't try multi-LUNs
13121 13124 */
13122 13125 if (mptsas_find_child_phy(pdip, phy)) {
13123 13126 return (DDI_SUCCESS);
13124 13127 } else {
13125 13128 /*
13126 13129 * need configure and create node
13127 13130 */
13128 13131 return (DDI_FAILURE);
13129 13132 }
13130 13133 }
13131 13134
13132 13135 /*
13133 13136 * WWN (SAS address or Device Name exist)
13134 13137 */
13135 13138 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13136 13139 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13137 13140 /*
13138 13141 * SATA device with Device Name
13139 13142 * So don't try multi-LUNs
13140 13143 */
13141 13144 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13142 13145 return (DDI_SUCCESS);
13143 13146 } else {
13144 13147 return (DDI_FAILURE);
13145 13148 }
13146 13149 }
13147 13150
13148 13151 do {
13149 13152 ap.a_target = MPTSAS_INVALID_DEVHDL;
13150 13153 ap.a_lun = 0;
13151 13154 ap.a_hba_tran = mpt->m_tran;
13152 13155 repluns_bp = scsi_alloc_consistent_buf(&ap,
13153 13156 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13154 13157 if (repluns_bp == NULL) {
13155 13158 retry++;
13156 13159 continue;
13157 13160 }
13158 13161 bzero(cdb, CDB_GROUP5);
13159 13162 cdb[0] = SCMD_REPORT_LUNS;
13160 13163 cdb[6] = (buf_len & 0xff000000) >> 24;
13161 13164 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13162 13165 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13163 13166 cdb[9] = (buf_len & 0x000000ff);
13164 13167
13165 13168 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13166 13169 repluns_bp, NULL);
13167 13170 if (ret != DDI_SUCCESS) {
13168 13171 scsi_free_consistent_buf(repluns_bp);
13169 13172 retry++;
13170 13173 continue;
13171 13174 }
13172 13175 lun_list_len = BE_32(*(int *)((void *)(
13173 13176 repluns_bp->b_un.b_addr)));
13174 13177 if (buf_len >= lun_list_len + 8) {
13175 13178 ret = DDI_SUCCESS;
13176 13179 break;
13177 13180 }
13178 13181 scsi_free_consistent_buf(repluns_bp);
13179 13182 buf_len = lun_list_len + 8;
13180 13183
13181 13184 } while (retry < 3);
13182 13185
13183 13186 if (ret != DDI_SUCCESS)
13184 13187 return (ret);
13185 13188 buffer = (char *)repluns_bp->b_un.b_addr;
13186 13189 /*
13187 13190 * find out the number of luns returned by the SCSI ReportLun call
13188 13191 * and allocate buffer space
13189 13192 */
13190 13193 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13191 13194 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13192 13195 if (saved_repluns == NULL) {
13193 13196 scsi_free_consistent_buf(repluns_bp);
13194 13197 return (DDI_FAILURE);
13195 13198 }
13196 13199 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13197 13200 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13198 13201 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13199 13202 continue;
13200 13203 }
13201 13204 saved_repluns[lun_cnt] = lun_num;
13202 13205 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13203 13206 ret = DDI_SUCCESS;
13204 13207 else
13205 13208 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13206 13209 ptgt);
13207 13210 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13208 13211 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13209 13212 MPTSAS_DEV_GONE);
13210 13213 }
13211 13214 }
13212 13215 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13213 13216 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13214 13217 scsi_free_consistent_buf(repluns_bp);
13215 13218 return (DDI_SUCCESS);
13216 13219 }
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
13217 13220
13218 13221 static int
13219 13222 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13220 13223 {
13221 13224 int rval = DDI_FAILURE;
13222 13225 struct scsi_inquiry *sd_inq = NULL;
13223 13226 mptsas_t *mpt = DIP2MPT(pdip);
13224 13227 mptsas_target_t *ptgt = NULL;
13225 13228
13226 13229 mutex_enter(&mpt->m_mutex);
13227 - ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13230 + ptgt = refhash_linear_search(mpt->m_targets,
13231 + mptsas_target_eval_devhdl, &target);
13228 13232 mutex_exit(&mpt->m_mutex);
13229 13233 if (ptgt == NULL) {
13230 13234 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13231 13235 "not found.", target);
13232 13236 return (rval);
13233 13237 }
13234 13238
13235 13239 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13236 13240 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13237 13241 SUN_INQSIZE, 0, (uchar_t)0);
13238 13242
13239 13243 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13240 13244 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13241 13245 0);
13242 13246 } else {
13243 13247 rval = DDI_FAILURE;
13244 13248 }
13245 13249
13246 13250 kmem_free(sd_inq, SUN_INQSIZE);
13247 13251 return (rval);
13248 13252 }
13249 13253
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13250 13254 /*
13251 13255 * configure all RAID volumes for virtual iport
13252 13256 */
13253 13257 static void
13254 13258 mptsas_config_all_viport(dev_info_t *pdip)
13255 13259 {
13256 13260 mptsas_t *mpt = DIP2MPT(pdip);
13257 13261 int config, vol;
13258 13262 int target;
13259 13263 dev_info_t *lundip = NULL;
13260 - mptsas_slots_t *slots = mpt->m_active;
13261 13264
13262 13265 /*
13263 13266 * Get latest RAID info and search for any Volume DevHandles. If any
13264 13267 * are found, configure the volume.
13265 13268 */
13266 13269 mutex_enter(&mpt->m_mutex);
13267 - for (config = 0; config < slots->m_num_raid_configs; config++) {
13270 + for (config = 0; config < mpt->m_num_raid_configs; config++) {
13268 13271 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13269 - if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13272 + if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
13270 13273 == 1) {
13271 - target = slots->m_raidconfig[config].
13274 + target = mpt->m_raidconfig[config].
13272 13275 m_raidvol[vol].m_raidhandle;
13273 13276 mutex_exit(&mpt->m_mutex);
13274 13277 (void) mptsas_config_raid(pdip, target,
13275 13278 &lundip);
13276 13279 mutex_enter(&mpt->m_mutex);
13277 13280 }
13278 13281 }
13279 13282 }
13280 13283 mutex_exit(&mpt->m_mutex);
13281 13284 }
13282 13285
13283 13286 static void
13284 13287 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13285 13288 int lun_cnt, mptsas_target_t *ptgt)
13286 13289 {
13287 13290 dev_info_t *child = NULL, *savechild = NULL;
13288 13291 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
13289 13292 uint64_t sas_wwn, wwid;
13290 13293 uint8_t phy;
13291 13294 int lun;
13292 13295 int i;
13293 13296 int find;
13294 13297 char *addr;
13295 13298 char *nodename;
13296 13299 mptsas_t *mpt = DIP2MPT(pdip);
13297 13300
13298 13301 mutex_enter(&mpt->m_mutex);
13299 - wwid = ptgt->m_sas_wwn;
13302 + wwid = ptgt->m_addr.mta_wwn;
13300 13303 mutex_exit(&mpt->m_mutex);
13301 13304
13302 13305 child = ddi_get_child(pdip);
13303 13306 while (child) {
13304 13307 find = 0;
13305 13308 savechild = child;
13306 13309 child = ddi_get_next_sibling(child);
13307 13310
13308 13311 nodename = ddi_node_name(savechild);
13309 13312 if (strcmp(nodename, "smp") == 0) {
13310 13313 continue;
13311 13314 }
13312 13315
13313 13316 addr = ddi_get_name_addr(savechild);
13314 13317 if (addr == NULL) {
13315 13318 continue;
13316 13319 }
13317 13320
13318 13321 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13319 13322 DDI_SUCCESS) {
13320 13323 continue;
13321 13324 }
13322 13325
13323 13326 if (wwid == sas_wwn) {
13324 13327 for (i = 0; i < lun_cnt; i++) {
13325 13328 if (repluns[i] == lun) {
13326 13329 find = 1;
13327 13330 break;
13328 13331 }
13329 13332 }
13330 13333 } else {
13331 13334 continue;
13332 13335 }
13333 13336 if (find == 0) {
13334 13337 /*
13335 13338 * The lun has not been there already
13336 13339 */
13337 13340 (void) mptsas_offline_lun(pdip, savechild, NULL,
13338 13341 NDI_DEVI_REMOVE);
13339 13342 }
13340 13343 }
13341 13344
13342 13345 pip = mdi_get_next_client_path(pdip, NULL);
13343 13346 while (pip) {
13344 13347 find = 0;
13345 13348 savepip = pip;
13346 13349 addr = MDI_PI(pip)->pi_addr;
13347 13350
13348 13351 pip = mdi_get_next_client_path(pdip, pip);
13349 13352
13350 13353 if (addr == NULL) {
13351 13354 continue;
13352 13355 }
13353 13356
13354 13357 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13355 13358 &lun) != DDI_SUCCESS) {
13356 13359 continue;
13357 13360 }
13358 13361
13359 13362 if (sas_wwn == wwid) {
13360 13363 for (i = 0; i < lun_cnt; i++) {
13361 13364 if (repluns[i] == lun) {
13362 13365 find = 1;
13363 13366 break;
13364 13367 }
13365 13368 }
13366 13369 } else {
13367 13370 continue;
13368 13371 }
13369 13372
13370 13373 if (find == 0) {
13371 13374 /*
13372 13375 * The lun has not been there already
13373 13376 */
13374 13377 (void) mptsas_offline_lun(pdip, NULL, savepip,
13375 13378 NDI_DEVI_REMOVE);
13376 13379 }
13377 13380 }
13378 13381 }
13379 13382
13380 13383 void
13381 13384 mptsas_update_hashtab(struct mptsas *mpt)
13382 13385 {
13383 13386 uint32_t page_address;
13384 13387 int rval = 0;
13385 13388 uint16_t dev_handle;
13386 13389 mptsas_target_t *ptgt = NULL;
13387 13390 mptsas_smp_t smp_node;
13388 13391
13389 13392 /*
13390 13393 * Get latest RAID info.
13391 13394 */
13392 13395 (void) mptsas_get_raid_info(mpt);
|
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
13393 13396
13394 13397 dev_handle = mpt->m_smp_devhdl;
13395 13398 for (; mpt->m_done_traverse_smp == 0; ) {
13396 13399 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13397 13400 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13398 13401 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13399 13402 != DDI_SUCCESS) {
13400 13403 break;
13401 13404 }
13402 13405 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13403 - (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13406 + (void) mptsas_smp_alloc(mpt, &smp_node);
13404 13407 }
13405 13408
13406 13409 /*
13407 13410 * Config target devices
13408 13411 */
13409 13412 dev_handle = mpt->m_dev_handle;
13410 13413
13411 13414 /*
13412 13415 * Do loop to get sas device page 0 by GetNextHandle till the
13413 13416 * the last handle. If the sas device is a SATA/SSP target,
13414 13417 * we try to config it.
13415 13418 */
13416 13419 for (; mpt->m_done_traverse_dev == 0; ) {
13417 13420 ptgt = NULL;
13418 13421 page_address =
13419 13422 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13420 13423 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13421 13424 (uint32_t)dev_handle;
13422 13425 rval = mptsas_get_target_device_info(mpt, page_address,
13423 13426 &dev_handle, &ptgt);
13424 13427 if ((rval == DEV_INFO_FAIL_PAGE0) ||
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
13425 13428 (rval == DEV_INFO_FAIL_ALLOC)) {
13426 13429 break;
13427 13430 }
13428 13431
13429 13432 mpt->m_dev_handle = dev_handle;
13430 13433 }
13431 13434
13432 13435 }
13433 13436
13434 13437 void
13435 -mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13436 -{
13437 - mptsas_hash_data_t *data;
13438 - data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13439 - while (data != NULL) {
13440 - data->devhdl = MPTSAS_INVALID_DEVHDL;
13441 - data->device_info = 0;
13442 - /*
13443 - * For tgttbl, clear dr_flag.
13444 - */
13445 - data->dr_flag = MPTSAS_DR_INACTIVE;
13446 - data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13447 - }
13448 -}
13449 -
13450 -void
13451 13438 mptsas_update_driver_data(struct mptsas *mpt)
13452 13439 {
13440 + mptsas_target_t *tp;
13441 + mptsas_smp_t *sp;
13442 +
13443 + ASSERT(MUTEX_HELD(&mpt->m_mutex));
13444 +
13453 13445 /*
13454 13446 * TODO after hard reset, update the driver data structures
13455 13447 * 1. update port/phymask mapping table mpt->m_phy_info
13456 13448 * 2. invalid all the entries in hash table
13457 13449 * m_devhdl = 0xffff and m_deviceinfo = 0
13458 13450 * 3. call sas_device_page/expander_page to update hash table
13459 13451 */
13460 13452 mptsas_update_phymask(mpt);
13461 13453 /*
13462 13454 * Invalid the existing entries
13455 + *
13456 + * XXX - It seems like we should just delete everything here. We are
13457 + * holding the lock and are about to refresh all the targets in both
13458 + * hashes anyway. Given the path we're in, what outstanding async
13459 + * event could possibly be trying to reference one of these things
13460 + * without taking the lock, and how would that be useful anyway?
13463 13461 */
13464 - mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13465 - mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13462 + for (tp = refhash_first(mpt->m_targets); tp != NULL;
13463 + tp = refhash_next(mpt->m_targets, tp)) {
13464 + tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
13465 + tp->m_deviceinfo = 0;
13466 + tp->m_dr_flag = MPTSAS_DR_INACTIVE;
13467 + }
13468 + for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
13469 + sp = refhash_next(mpt->m_smp_targets, sp)) {
13470 + sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
13471 + sp->m_deviceinfo = 0;
13472 + }
13466 13473 mpt->m_done_traverse_dev = 0;
13467 13474 mpt->m_done_traverse_smp = 0;
13468 13475 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13469 13476 mptsas_update_hashtab(mpt);
13470 13477 }
13471 13478
13472 13479 static void
13473 13480 mptsas_config_all(dev_info_t *pdip)
13474 13481 {
13475 13482 dev_info_t *smpdip = NULL;
13476 13483 mptsas_t *mpt = DIP2MPT(pdip);
13477 13484 int phymask = 0;
13478 13485 mptsas_phymask_t phy_mask;
13479 13486 mptsas_target_t *ptgt = NULL;
13480 13487 mptsas_smp_t *psmp;
13481 13488
13482 13489 /*
13483 13490 * Get the phymask associated to the iport
13484 13491 */
13485 13492 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13486 13493 "phymask", 0);
13487 13494
13488 13495 /*
13489 13496 * Enumerate RAID volumes here (phymask == 0).
13490 13497 */
13491 13498 if (phymask == 0) {
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
13492 13499 mptsas_config_all_viport(pdip);
13493 13500 return;
13494 13501 }
13495 13502
13496 13503 mutex_enter(&mpt->m_mutex);
13497 13504
13498 13505 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13499 13506 mptsas_update_hashtab(mpt);
13500 13507 }
13501 13508
13502 - psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13503 - MPTSAS_HASH_FIRST);
13504 - while (psmp != NULL) {
13505 - phy_mask = psmp->m_phymask;
13509 + for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
13510 + psmp = refhash_next(mpt->m_smp_targets, psmp)) {
13511 + phy_mask = psmp->m_addr.mta_phymask;
13506 13512 if (phy_mask == phymask) {
13507 13513 smpdip = NULL;
13508 13514 mutex_exit(&mpt->m_mutex);
13509 13515 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13510 13516 mutex_enter(&mpt->m_mutex);
13511 13517 }
13512 - psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13513 - &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13514 13518 }
13515 13519
13516 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13517 - MPTSAS_HASH_FIRST);
13518 - while (ptgt != NULL) {
13519 - phy_mask = ptgt->m_phymask;
13520 + for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13521 + ptgt = refhash_next(mpt->m_targets, ptgt)) {
13522 + phy_mask = ptgt->m_addr.mta_phymask;
13520 13523 if (phy_mask == phymask) {
13521 13524 mutex_exit(&mpt->m_mutex);
13522 13525 (void) mptsas_config_target(pdip, ptgt);
13523 13526 mutex_enter(&mpt->m_mutex);
13524 13527 }
13525 -
13526 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13527 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13528 13528 }
13529 13529 mutex_exit(&mpt->m_mutex);
13530 13530 }
13531 13531
13532 13532 static int
13533 13533 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13534 13534 {
13535 13535 int rval = DDI_FAILURE;
13536 13536 dev_info_t *tdip;
13537 13537
13538 13538 rval = mptsas_config_luns(pdip, ptgt);
13539 13539 if (rval != DDI_SUCCESS) {
13540 13540 /*
13541 13541 * The return value means the SCMD_REPORT_LUNS
13542 13542 * did not execute successfully. The target maybe
13543 13543 * doesn't support such command.
13544 13544 */
13545 13545 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13546 13546 }
13547 13547 return (rval);
13548 13548 }
13549 13549
13550 13550 /*
13551 13551 * Return fail if not all the childs/paths are freed.
13552 13552 * if there is any path under the HBA, the return value will be always fail
13553 13553 * because we didn't call mdi_pi_free for path
13554 13554 */
13555 13555 static int
13556 13556 mptsas_offline_target(dev_info_t *pdip, char *name)
13557 13557 {
13558 13558 dev_info_t *child = NULL, *prechild = NULL;
13559 13559 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13560 13560 int tmp_rval, rval = DDI_SUCCESS;
13561 13561 char *addr, *cp;
13562 13562 size_t s;
13563 13563 mptsas_t *mpt = DIP2MPT(pdip);
13564 13564
13565 13565 child = ddi_get_child(pdip);
13566 13566 while (child) {
13567 13567 addr = ddi_get_name_addr(child);
13568 13568 prechild = child;
13569 13569 child = ddi_get_next_sibling(child);
13570 13570
13571 13571 if (addr == NULL) {
13572 13572 continue;
13573 13573 }
13574 13574 if ((cp = strchr(addr, ',')) == NULL) {
13575 13575 continue;
13576 13576 }
13577 13577
13578 13578 s = (uintptr_t)cp - (uintptr_t)addr;
13579 13579
13580 13580 if (strncmp(addr, name, s) != 0) {
13581 13581 continue;
13582 13582 }
13583 13583
13584 13584 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13585 13585 NDI_DEVI_REMOVE);
13586 13586 if (tmp_rval != DDI_SUCCESS) {
13587 13587 rval = DDI_FAILURE;
13588 13588 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13589 13589 prechild, MPTSAS_DEV_GONE) !=
13590 13590 DDI_PROP_SUCCESS) {
13591 13591 mptsas_log(mpt, CE_WARN, "mptsas driver "
13592 13592 "unable to create property for "
13593 13593 "SAS %s (MPTSAS_DEV_GONE)", addr);
13594 13594 }
13595 13595 }
13596 13596 }
13597 13597
13598 13598 pip = mdi_get_next_client_path(pdip, NULL);
13599 13599 while (pip) {
13600 13600 addr = MDI_PI(pip)->pi_addr;
13601 13601 savepip = pip;
13602 13602 pip = mdi_get_next_client_path(pdip, pip);
13603 13603 if (addr == NULL) {
13604 13604 continue;
13605 13605 }
13606 13606
13607 13607 if ((cp = strchr(addr, ',')) == NULL) {
13608 13608 continue;
13609 13609 }
13610 13610
13611 13611 s = (uintptr_t)cp - (uintptr_t)addr;
13612 13612
13613 13613 if (strncmp(addr, name, s) != 0) {
13614 13614 continue;
13615 13615 }
13616 13616
13617 13617 (void) mptsas_offline_lun(pdip, NULL, savepip,
13618 13618 NDI_DEVI_REMOVE);
13619 13619 /*
13620 13620 * driver will not invoke mdi_pi_free, so path will not
13621 13621 * be freed forever, return DDI_FAILURE.
13622 13622 */
13623 13623 rval = DDI_FAILURE;
13624 13624 }
13625 13625 return (rval);
13626 13626 }
13627 13627
13628 13628 static int
13629 13629 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13630 13630 mdi_pathinfo_t *rpip, uint_t flags)
13631 13631 {
13632 13632 int rval = DDI_FAILURE;
13633 13633 char *devname;
13634 13634 dev_info_t *cdip, *parent;
13635 13635
13636 13636 if (rpip != NULL) {
13637 13637 parent = scsi_vhci_dip;
13638 13638 cdip = mdi_pi_get_client(rpip);
13639 13639 } else if (rdip != NULL) {
13640 13640 parent = pdip;
13641 13641 cdip = rdip;
13642 13642 } else {
13643 13643 return (DDI_FAILURE);
13644 13644 }
13645 13645
13646 13646 /*
13647 13647 * Make sure node is attached otherwise
13648 13648 * it won't have related cache nodes to
13649 13649 * clean up. i_ddi_devi_attached is
13650 13650 * similiar to i_ddi_node_state(cdip) >=
13651 13651 * DS_ATTACHED.
13652 13652 */
13653 13653 if (i_ddi_devi_attached(cdip)) {
13654 13654
13655 13655 /* Get full devname */
13656 13656 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13657 13657 (void) ddi_deviname(cdip, devname);
13658 13658 /* Clean cache */
13659 13659 (void) devfs_clean(parent, devname + 1,
13660 13660 DV_CLEAN_FORCE);
13661 13661 kmem_free(devname, MAXNAMELEN + 1);
13662 13662 }
13663 13663 if (rpip != NULL) {
13664 13664 if (MDI_PI_IS_OFFLINE(rpip)) {
13665 13665 rval = DDI_SUCCESS;
13666 13666 } else {
13667 13667 rval = mdi_pi_offline(rpip, 0);
13668 13668 }
13669 13669 } else {
13670 13670 rval = ndi_devi_offline(cdip, flags);
13671 13671 }
13672 13672
13673 13673 return (rval);
13674 13674 }
13675 13675
13676 13676 static dev_info_t *
13677 13677 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13678 13678 {
13679 13679 dev_info_t *child = NULL;
13680 13680 char *smp_wwn = NULL;
13681 13681
13682 13682 child = ddi_get_child(parent);
13683 13683 while (child) {
13684 13684 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13685 13685 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13686 13686 != DDI_SUCCESS) {
13687 13687 child = ddi_get_next_sibling(child);
13688 13688 continue;
13689 13689 }
13690 13690
13691 13691 if (strcmp(smp_wwn, str_wwn) == 0) {
13692 13692 ddi_prop_free(smp_wwn);
13693 13693 break;
13694 13694 }
13695 13695 child = ddi_get_next_sibling(child);
13696 13696 ddi_prop_free(smp_wwn);
13697 13697 }
13698 13698 return (child);
|
↓ open down ↓ |
161 lines elided |
↑ open up ↑ |
13699 13699 }
13700 13700
13701 13701 static int
13702 13702 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13703 13703 {
13704 13704 int rval = DDI_FAILURE;
13705 13705 char *devname;
13706 13706 char wwn_str[MPTSAS_WWN_STRLEN];
13707 13707 dev_info_t *cdip;
13708 13708
13709 - (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13709 + (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
13710 13710
13711 13711 cdip = mptsas_find_smp_child(pdip, wwn_str);
13712 13712
13713 13713 if (cdip == NULL)
13714 13714 return (DDI_SUCCESS);
13715 13715
13716 13716 /*
13717 13717 * Make sure node is attached otherwise
13718 13718 * it won't have related cache nodes to
13719 13719 * clean up. i_ddi_devi_attached is
13720 13720 * similiar to i_ddi_node_state(cdip) >=
13721 13721 * DS_ATTACHED.
13722 13722 */
13723 13723 if (i_ddi_devi_attached(cdip)) {
13724 13724
13725 13725 /* Get full devname */
13726 13726 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13727 13727 (void) ddi_deviname(cdip, devname);
13728 13728 /* Clean cache */
13729 13729 (void) devfs_clean(pdip, devname + 1,
13730 13730 DV_CLEAN_FORCE);
13731 13731 kmem_free(devname, MAXNAMELEN + 1);
13732 13732 }
13733 13733
13734 13734 rval = ndi_devi_offline(cdip, flags);
13735 13735
13736 13736 return (rval);
13737 13737 }
13738 13738
13739 13739 static dev_info_t *
13740 13740 mptsas_find_child(dev_info_t *pdip, char *name)
13741 13741 {
13742 13742 dev_info_t *child = NULL;
13743 13743 char *rname = NULL;
13744 13744 int rval = DDI_FAILURE;
13745 13745
13746 13746 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13747 13747
13748 13748 child = ddi_get_child(pdip);
13749 13749 while (child) {
13750 13750 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13751 13751 if (rval != DDI_SUCCESS) {
13752 13752 child = ddi_get_next_sibling(child);
13753 13753 bzero(rname, SCSI_MAXNAMELEN);
13754 13754 continue;
13755 13755 }
13756 13756
13757 13757 if (strcmp(rname, name) == 0) {
13758 13758 break;
13759 13759 }
13760 13760 child = ddi_get_next_sibling(child);
13761 13761 bzero(rname, SCSI_MAXNAMELEN);
13762 13762 }
13763 13763
13764 13764 kmem_free(rname, SCSI_MAXNAMELEN);
13765 13765
13766 13766 return (child);
13767 13767 }
13768 13768
13769 13769
13770 13770 static dev_info_t *
13771 13771 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13772 13772 {
13773 13773 dev_info_t *child = NULL;
13774 13774 char *name = NULL;
13775 13775 char *addr = NULL;
13776 13776
13777 13777 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13778 13778 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13779 13779 (void) sprintf(name, "%016"PRIx64, sasaddr);
13780 13780 (void) sprintf(addr, "w%s,%x", name, lun);
13781 13781 child = mptsas_find_child(pdip, addr);
13782 13782 kmem_free(name, SCSI_MAXNAMELEN);
13783 13783 kmem_free(addr, SCSI_MAXNAMELEN);
13784 13784 return (child);
13785 13785 }
13786 13786
13787 13787 static dev_info_t *
13788 13788 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13789 13789 {
13790 13790 dev_info_t *child;
13791 13791 char *addr;
13792 13792
13793 13793 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13794 13794 (void) sprintf(addr, "p%x,0", phy);
13795 13795 child = mptsas_find_child(pdip, addr);
13796 13796 kmem_free(addr, SCSI_MAXNAMELEN);
13797 13797 return (child);
13798 13798 }
13799 13799
13800 13800 static mdi_pathinfo_t *
13801 13801 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13802 13802 {
13803 13803 mdi_pathinfo_t *path;
13804 13804 char *addr = NULL;
13805 13805
13806 13806 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13807 13807 (void) sprintf(addr, "p%x,0", phy);
13808 13808 path = mdi_pi_find(pdip, NULL, addr);
13809 13809 kmem_free(addr, SCSI_MAXNAMELEN);
13810 13810 return (path);
13811 13811 }
13812 13812
13813 13813 static mdi_pathinfo_t *
13814 13814 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13815 13815 {
13816 13816 mdi_pathinfo_t *path;
13817 13817 char *name = NULL;
13818 13818 char *addr = NULL;
13819 13819
13820 13820 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13821 13821 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13822 13822 (void) sprintf(name, "%016"PRIx64, sasaddr);
13823 13823 (void) sprintf(addr, "w%s,%x", name, lun);
13824 13824 path = mdi_pi_find(parent, NULL, addr);
13825 13825 kmem_free(name, SCSI_MAXNAMELEN);
13826 13826 kmem_free(addr, SCSI_MAXNAMELEN);
13827 13827
13828 13828 return (path);
13829 13829 }
13830 13830
13831 13831 static int
13832 13832 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13833 13833 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13834 13834 {
13835 13835 int i = 0;
13836 13836 uchar_t *inq83 = NULL;
13837 13837 int inq83_len1 = 0xFF;
13838 13838 int inq83_len = 0;
13839 13839 int rval = DDI_FAILURE;
13840 13840 ddi_devid_t devid;
13841 13841 char *guid = NULL;
13842 13842 int target = ptgt->m_devhdl;
13843 13843 mdi_pathinfo_t *pip = NULL;
13844 13844 mptsas_t *mpt = DIP2MPT(pdip);
13845 13845
13846 13846 /*
13847 13847 * For DVD/CD ROM and tape devices and optical
13848 13848 * devices, we won't try to enumerate them under
13849 13849 * scsi_vhci, so no need to try page83
13850 13850 */
13851 13851 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13852 13852 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13853 13853 sd_inq->inq_dtype == DTYPE_ESI))
13854 13854 goto create_lun;
13855 13855
13856 13856 /*
13857 13857 * The LCA returns good SCSI status, but corrupt page 83 data the first
13858 13858 * time it is queried. The solution is to keep trying to request page83
13859 13859 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13860 13860 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13861 13861 * give up to get VPD page at this stage and fail the enumeration.
13862 13862 */
13863 13863
13864 13864 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13865 13865
13866 13866 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13867 13867 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13868 13868 inq83_len1, &inq83_len, 1);
13869 13869 if (rval != 0) {
13870 13870 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13871 13871 "0x83 for target:%x, lun:%x failed!", target, lun);
13872 13872 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13873 13873 goto create_lun;
13874 13874 goto out;
13875 13875 }
13876 13876 /*
13877 13877 * create DEVID from inquiry data
13878 13878 */
13879 13879 if ((rval = ddi_devid_scsi_encode(
13880 13880 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13881 13881 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13882 13882 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13883 13883 /*
13884 13884 * extract GUID from DEVID
13885 13885 */
13886 13886 guid = ddi_devid_to_guid(devid);
13887 13887
13888 13888 /*
13889 13889 * Do not enable MPXIO if the strlen(guid) is greater
13890 13890 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13891 13891 * handled by framework later.
13892 13892 */
13893 13893 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13894 13894 ddi_devid_free_guid(guid);
13895 13895 guid = NULL;
13896 13896 if (mpt->m_mpxio_enable == TRUE) {
13897 13897 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13898 13898 "lun:%x doesn't have a valid GUID, "
13899 13899 "multipathing for this drive is "
13900 13900 "not enabled", target, lun);
13901 13901 }
13902 13902 }
13903 13903
13904 13904 /*
13905 13905 * devid no longer needed
13906 13906 */
13907 13907 ddi_devid_free(devid);
13908 13908 break;
13909 13909 } else if (rval == DDI_NOT_WELL_FORMED) {
13910 13910 /*
13911 13911 * return value of ddi_devid_scsi_encode equal to
13912 13912 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13913 13913 * to retry inquiry page 0x83 and get GUID.
13914 13914 */
13915 13915 NDBG20(("Not well formed devid, retry..."));
13916 13916 delay(1 * drv_usectohz(1000000));
13917 13917 continue;
13918 13918 } else {
13919 13919 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13920 13920 "path target:%x, lun:%x", target, lun);
13921 13921 rval = DDI_FAILURE;
13922 13922 goto create_lun;
13923 13923 }
13924 13924 }
13925 13925
13926 13926 if (i == mptsas_inq83_retry_timeout) {
13927 13927 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13928 13928 "for path target:%x, lun:%x", target, lun);
13929 13929 }
13930 13930
13931 13931 rval = DDI_FAILURE;
13932 13932
13933 13933 create_lun:
13934 13934 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13935 13935 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13936 13936 ptgt, lun);
13937 13937 }
13938 13938 if (rval != DDI_SUCCESS) {
13939 13939 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13940 13940 ptgt, lun);
13941 13941
13942 13942 }
13943 13943 out:
13944 13944 if (guid != NULL) {
13945 13945 /*
13946 13946 * guid no longer needed
13947 13947 */
13948 13948 ddi_devid_free_guid(guid);
13949 13949 }
13950 13950 if (inq83 != NULL)
13951 13951 kmem_free(inq83, inq83_len1);
13952 13952 return (rval);
13953 13953 }
13954 13954
13955 13955 static int
13956 13956 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13957 13957 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13958 13958 {
13959 13959 int target;
13960 13960 char *nodename = NULL;
13961 13961 char **compatible = NULL;
13962 13962 int ncompatible = 0;
13963 13963 int mdi_rtn = MDI_FAILURE;
13964 13964 int rval = DDI_FAILURE;
13965 13965 char *old_guid = NULL;
13966 13966 mptsas_t *mpt = DIP2MPT(pdip);
13967 13967 char *lun_addr = NULL;
13968 13968 char *wwn_str = NULL;
13969 13969 char *attached_wwn_str = NULL;
13970 13970 char *component = NULL;
13971 13971 uint8_t phy = 0xFF;
13972 13972 uint64_t sas_wwn;
13973 13973 int64_t lun64 = 0;
13974 13974 uint32_t devinfo;
13975 13975 uint16_t dev_hdl;
13976 13976 uint16_t pdev_hdl;
13977 13977 uint64_t dev_sas_wwn;
13978 13978 uint64_t pdev_sas_wwn;
|
↓ open down ↓ |
259 lines elided |
↑ open up ↑ |
13979 13979 uint32_t pdev_info;
13980 13980 uint8_t physport;
13981 13981 uint8_t phy_id;
13982 13982 uint32_t page_address;
13983 13983 uint16_t bay_num, enclosure;
13984 13984 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
13985 13985 uint32_t dev_info;
13986 13986
13987 13987 mutex_enter(&mpt->m_mutex);
13988 13988 target = ptgt->m_devhdl;
13989 - sas_wwn = ptgt->m_sas_wwn;
13989 + sas_wwn = ptgt->m_addr.mta_wwn;
13990 13990 devinfo = ptgt->m_deviceinfo;
13991 13991 phy = ptgt->m_phynum;
13992 13992 mutex_exit(&mpt->m_mutex);
13993 13993
13994 13994 if (sas_wwn) {
13995 13995 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13996 13996 } else {
13997 13997 *pip = mptsas_find_path_phy(pdip, phy);
13998 13998 }
13999 13999
14000 14000 if (*pip != NULL) {
14001 14001 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14002 14002 ASSERT(*lun_dip != NULL);
14003 14003 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14004 14004 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14005 14005 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14006 14006 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14007 14007 /*
14008 14008 * Same path back online again.
14009 14009 */
14010 14010 (void) ddi_prop_free(old_guid);
14011 14011 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14012 14012 (!MDI_PI_IS_STANDBY(*pip)) &&
14013 14013 (ptgt->m_tgt_unconfigured == 0)) {
14014 14014 rval = mdi_pi_online(*pip, 0);
14015 14015 mutex_enter(&mpt->m_mutex);
14016 14016 ptgt->m_led_status = 0;
14017 14017 (void) mptsas_flush_led_status(mpt,
14018 14018 ptgt);
14019 14019 mutex_exit(&mpt->m_mutex);
14020 14020 } else {
14021 14021 rval = DDI_SUCCESS;
14022 14022 }
14023 14023 if (rval != DDI_SUCCESS) {
14024 14024 mptsas_log(mpt, CE_WARN, "path:target: "
14025 14025 "%x, lun:%x online failed!", target,
14026 14026 lun);
14027 14027 *pip = NULL;
14028 14028 *lun_dip = NULL;
14029 14029 }
14030 14030 return (rval);
14031 14031 } else {
14032 14032 /*
14033 14033 * The GUID of the LUN has changed which maybe
14034 14034 * because customer mapped another volume to the
14035 14035 * same LUN.
14036 14036 */
14037 14037 mptsas_log(mpt, CE_WARN, "The GUID of the "
14038 14038 "target:%x, lun:%x was changed, maybe "
14039 14039 "because someone mapped another volume "
14040 14040 "to the same LUN", target, lun);
14041 14041 (void) ddi_prop_free(old_guid);
14042 14042 if (!MDI_PI_IS_OFFLINE(*pip)) {
14043 14043 rval = mdi_pi_offline(*pip, 0);
14044 14044 if (rval != MDI_SUCCESS) {
14045 14045 mptsas_log(mpt, CE_WARN, "path:"
14046 14046 "target:%x, lun:%x offline "
14047 14047 "failed!", target, lun);
14048 14048 *pip = NULL;
14049 14049 *lun_dip = NULL;
14050 14050 return (DDI_FAILURE);
14051 14051 }
14052 14052 }
14053 14053 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14054 14054 mptsas_log(mpt, CE_WARN, "path:target:"
14055 14055 "%x, lun:%x free failed!", target,
14056 14056 lun);
14057 14057 *pip = NULL;
14058 14058 *lun_dip = NULL;
14059 14059 return (DDI_FAILURE);
14060 14060 }
14061 14061 }
14062 14062 } else {
14063 14063 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14064 14064 "property for path:target:%x, lun:%x", target, lun);
14065 14065 *pip = NULL;
14066 14066 *lun_dip = NULL;
14067 14067 return (DDI_FAILURE);
14068 14068 }
14069 14069 }
14070 14070 scsi_hba_nodename_compatible_get(inq, NULL,
14071 14071 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14072 14072
14073 14073 /*
14074 14074 * if nodename can't be determined then print a message and skip it
14075 14075 */
14076 14076 if (nodename == NULL) {
14077 14077 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14078 14078 "driver for target%d lun %d dtype:0x%02x", target, lun,
14079 14079 inq->inq_dtype);
14080 14080 return (DDI_FAILURE);
14081 14081 }
14082 14082
14083 14083 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14084 14084 /* The property is needed by MPAPI */
14085 14085 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14086 14086
14087 14087 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14088 14088 if (guid) {
14089 14089 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14090 14090 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14091 14091 } else {
14092 14092 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14093 14093 (void) sprintf(wwn_str, "p%x", phy);
14094 14094 }
14095 14095
14096 14096 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14097 14097 guid, lun_addr, compatible, ncompatible,
14098 14098 0, pip);
14099 14099 if (mdi_rtn == MDI_SUCCESS) {
14100 14100
14101 14101 if (mdi_prop_update_string(*pip, MDI_GUID,
14102 14102 guid) != DDI_SUCCESS) {
14103 14103 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14104 14104 "create prop for target %d lun %d (MDI_GUID)",
14105 14105 target, lun);
14106 14106 mdi_rtn = MDI_FAILURE;
14107 14107 goto virt_create_done;
14108 14108 }
14109 14109
14110 14110 if (mdi_prop_update_int(*pip, LUN_PROP,
14111 14111 lun) != DDI_SUCCESS) {
14112 14112 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14113 14113 "create prop for target %d lun %d (LUN_PROP)",
14114 14114 target, lun);
14115 14115 mdi_rtn = MDI_FAILURE;
14116 14116 goto virt_create_done;
14117 14117 }
14118 14118 lun64 = (int64_t)lun;
14119 14119 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14120 14120 lun64) != DDI_SUCCESS) {
14121 14121 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14122 14122 "create prop for target %d (LUN64_PROP)",
14123 14123 target);
14124 14124 mdi_rtn = MDI_FAILURE;
14125 14125 goto virt_create_done;
14126 14126 }
14127 14127 if (mdi_prop_update_string_array(*pip, "compatible",
14128 14128 compatible, ncompatible) !=
14129 14129 DDI_PROP_SUCCESS) {
14130 14130 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14131 14131 "create prop for target %d lun %d (COMPATIBLE)",
14132 14132 target, lun);
14133 14133 mdi_rtn = MDI_FAILURE;
14134 14134 goto virt_create_done;
14135 14135 }
14136 14136 if (sas_wwn && (mdi_prop_update_string(*pip,
14137 14137 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14138 14138 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14139 14139 "create prop for target %d lun %d "
14140 14140 "(target-port)", target, lun);
14141 14141 mdi_rtn = MDI_FAILURE;
14142 14142 goto virt_create_done;
14143 14143 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14144 14144 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14145 14145 /*
14146 14146 * Direct attached SATA device without DeviceName
14147 14147 */
14148 14148 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14149 14149 "create prop for SAS target %d lun %d "
14150 14150 "(sata-phy)", target, lun);
14151 14151 mdi_rtn = MDI_FAILURE;
14152 14152 goto virt_create_done;
14153 14153 }
14154 14154 mutex_enter(&mpt->m_mutex);
14155 14155
14156 14156 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14157 14157 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14158 14158 (uint32_t)ptgt->m_devhdl;
14159 14159 rval = mptsas_get_sas_device_page0(mpt, page_address,
14160 14160 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14161 14161 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14162 14162 if (rval != DDI_SUCCESS) {
14163 14163 mutex_exit(&mpt->m_mutex);
14164 14164 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14165 14165 "parent device for handle %d", page_address);
14166 14166 mdi_rtn = MDI_FAILURE;
14167 14167 goto virt_create_done;
14168 14168 }
14169 14169
14170 14170 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14171 14171 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14172 14172 rval = mptsas_get_sas_device_page0(mpt, page_address,
14173 14173 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14174 14174 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14175 14175 if (rval != DDI_SUCCESS) {
14176 14176 mutex_exit(&mpt->m_mutex);
14177 14177 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14178 14178 "device info for handle %d", page_address);
14179 14179 mdi_rtn = MDI_FAILURE;
14180 14180 goto virt_create_done;
14181 14181 }
14182 14182
14183 14183 mutex_exit(&mpt->m_mutex);
14184 14184
14185 14185 /*
14186 14186 * If this device direct attached to the controller
14187 14187 * set the attached-port to the base wwid
14188 14188 */
14189 14189 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14190 14190 != DEVINFO_DIRECT_ATTACHED) {
14191 14191 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14192 14192 pdev_sas_wwn);
14193 14193 } else {
14194 14194 /*
14195 14195 * Update the iport's attached-port to guid
14196 14196 */
14197 14197 if (sas_wwn == 0) {
14198 14198 (void) sprintf(wwn_str, "p%x", phy);
14199 14199 } else {
14200 14200 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14201 14201 }
14202 14202 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14203 14203 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14204 14204 DDI_PROP_SUCCESS) {
14205 14205 mptsas_log(mpt, CE_WARN,
14206 14206 "mptsas unable to create "
14207 14207 "property for iport target-port"
14208 14208 " %s (sas_wwn)",
14209 14209 wwn_str);
14210 14210 mdi_rtn = MDI_FAILURE;
14211 14211 goto virt_create_done;
14212 14212 }
14213 14213
14214 14214 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14215 14215 mpt->un.m_base_wwid);
14216 14216 }
14217 14217
14218 14218 if (mdi_prop_update_string(*pip,
14219 14219 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14220 14220 DDI_PROP_SUCCESS) {
14221 14221 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14222 14222 "property for iport attached-port %s (sas_wwn)",
14223 14223 attached_wwn_str);
14224 14224 mdi_rtn = MDI_FAILURE;
14225 14225 goto virt_create_done;
14226 14226 }
14227 14227
14228 14228
14229 14229 if (inq->inq_dtype == 0) {
14230 14230 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14231 14231 /*
14232 14232 * set obp path for pathinfo
14233 14233 */
14234 14234 (void) snprintf(component, MAXPATHLEN,
14235 14235 "disk@%s", lun_addr);
14236 14236
14237 14237 if (mdi_pi_pathname_obp_set(*pip, component) !=
14238 14238 DDI_SUCCESS) {
14239 14239 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14240 14240 "unable to set obp-path for object %s",
14241 14241 component);
14242 14242 mdi_rtn = MDI_FAILURE;
14243 14243 goto virt_create_done;
14244 14244 }
14245 14245 }
14246 14246
14247 14247 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14248 14248 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14249 14249 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14250 14250 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14251 14251 "pm-capable", 1)) !=
14252 14252 DDI_PROP_SUCCESS) {
14253 14253 mptsas_log(mpt, CE_WARN, "mptsas driver"
14254 14254 "failed to create pm-capable "
14255 14255 "property, target %d", target);
14256 14256 mdi_rtn = MDI_FAILURE;
14257 14257 goto virt_create_done;
14258 14258 }
14259 14259 }
14260 14260 /*
14261 14261 * Create the phy-num property
14262 14262 */
14263 14263 if (mdi_prop_update_int(*pip, "phy-num",
14264 14264 ptgt->m_phynum) != DDI_SUCCESS) {
14265 14265 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14266 14266 "create phy-num property for target %d lun %d",
14267 14267 target, lun);
14268 14268 mdi_rtn = MDI_FAILURE;
14269 14269 goto virt_create_done;
14270 14270 }
14271 14271 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14272 14272 mdi_rtn = mdi_pi_online(*pip, 0);
14273 14273 if (mdi_rtn == MDI_SUCCESS) {
14274 14274 mutex_enter(&mpt->m_mutex);
14275 14275 ptgt->m_led_status = 0;
14276 14276 (void) mptsas_flush_led_status(mpt, ptgt);
14277 14277 mutex_exit(&mpt->m_mutex);
14278 14278 }
14279 14279 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14280 14280 mdi_rtn = MDI_FAILURE;
14281 14281 }
14282 14282 virt_create_done:
14283 14283 if (*pip && mdi_rtn != MDI_SUCCESS) {
14284 14284 (void) mdi_pi_free(*pip, 0);
14285 14285 *pip = NULL;
14286 14286 *lun_dip = NULL;
14287 14287 }
14288 14288 }
14289 14289
14290 14290 scsi_hba_nodename_compatible_free(nodename, compatible);
14291 14291 if (lun_addr != NULL) {
14292 14292 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14293 14293 }
14294 14294 if (wwn_str != NULL) {
14295 14295 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14296 14296 }
14297 14297 if (component != NULL) {
14298 14298 kmem_free(component, MAXPATHLEN);
14299 14299 }
14300 14300
14301 14301 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14302 14302 }
14303 14303
14304 14304 static int
14305 14305 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14306 14306 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14307 14307 {
14308 14308 int target;
14309 14309 int rval;
14310 14310 int ndi_rtn = NDI_FAILURE;
14311 14311 uint64_t be_sas_wwn;
14312 14312 char *nodename = NULL;
14313 14313 char **compatible = NULL;
14314 14314 int ncompatible = 0;
14315 14315 int instance = 0;
14316 14316 mptsas_t *mpt = DIP2MPT(pdip);
14317 14317 char *wwn_str = NULL;
14318 14318 char *component = NULL;
14319 14319 char *attached_wwn_str = NULL;
14320 14320 uint8_t phy = 0xFF;
14321 14321 uint64_t sas_wwn;
14322 14322 uint32_t devinfo;
14323 14323 uint16_t dev_hdl;
14324 14324 uint16_t pdev_hdl;
14325 14325 uint64_t pdev_sas_wwn;
14326 14326 uint64_t dev_sas_wwn;
14327 14327 uint32_t pdev_info;
|
↓ open down ↓ |
328 lines elided |
↑ open up ↑ |
14328 14328 uint8_t physport;
14329 14329 uint8_t phy_id;
14330 14330 uint32_t page_address;
14331 14331 uint16_t bay_num, enclosure;
14332 14332 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14333 14333 uint32_t dev_info;
14334 14334 int64_t lun64 = 0;
14335 14335
14336 14336 mutex_enter(&mpt->m_mutex);
14337 14337 target = ptgt->m_devhdl;
14338 - sas_wwn = ptgt->m_sas_wwn;
14338 + sas_wwn = ptgt->m_addr.mta_wwn;
14339 14339 devinfo = ptgt->m_deviceinfo;
14340 14340 phy = ptgt->m_phynum;
14341 14341 mutex_exit(&mpt->m_mutex);
14342 14342
14343 14343 /*
14344 14344 * generate compatible property with binding-set "mpt"
14345 14345 */
14346 14346 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14347 14347 &nodename, &compatible, &ncompatible);
14348 14348
14349 14349 /*
14350 14350 * if nodename can't be determined then print a message and skip it
14351 14351 */
14352 14352 if (nodename == NULL) {
14353 14353 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14354 14354 "for target %d lun %d", target, lun);
14355 14355 return (DDI_FAILURE);
14356 14356 }
14357 14357
14358 14358 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14359 14359 DEVI_SID_NODEID, lun_dip);
14360 14360
14361 14361 /*
14362 14362 * if lun alloc success, set props
14363 14363 */
14364 14364 if (ndi_rtn == NDI_SUCCESS) {
14365 14365
14366 14366 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14367 14367 *lun_dip, LUN_PROP, lun) !=
14368 14368 DDI_PROP_SUCCESS) {
14369 14369 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14370 14370 "property for target %d lun %d (LUN_PROP)",
14371 14371 target, lun);
14372 14372 ndi_rtn = NDI_FAILURE;
14373 14373 goto phys_create_done;
14374 14374 }
14375 14375
14376 14376 lun64 = (int64_t)lun;
14377 14377 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14378 14378 *lun_dip, LUN64_PROP, lun64) !=
14379 14379 DDI_PROP_SUCCESS) {
14380 14380 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14381 14381 "property for target %d lun64 %d (LUN64_PROP)",
14382 14382 target, lun);
14383 14383 ndi_rtn = NDI_FAILURE;
14384 14384 goto phys_create_done;
14385 14385 }
14386 14386 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14387 14387 *lun_dip, "compatible", compatible, ncompatible)
14388 14388 != DDI_PROP_SUCCESS) {
14389 14389 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14390 14390 "property for target %d lun %d (COMPATIBLE)",
14391 14391 target, lun);
14392 14392 ndi_rtn = NDI_FAILURE;
14393 14393 goto phys_create_done;
14394 14394 }
14395 14395
14396 14396 /*
14397 14397 * We need the SAS WWN for non-multipath devices, so
14398 14398 * we'll use the same property as that multipathing
14399 14399 * devices need to present for MPAPI. If we don't have
14400 14400 * a WWN (e.g. parallel SCSI), don't create the prop.
14401 14401 */
14402 14402 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14403 14403 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14404 14404 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14405 14405 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14406 14406 != DDI_PROP_SUCCESS) {
14407 14407 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14408 14408 "create property for SAS target %d lun %d "
14409 14409 "(target-port)", target, lun);
14410 14410 ndi_rtn = NDI_FAILURE;
14411 14411 goto phys_create_done;
14412 14412 }
14413 14413
14414 14414 be_sas_wwn = BE_64(sas_wwn);
14415 14415 if (sas_wwn && ndi_prop_update_byte_array(
14416 14416 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14417 14417 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14418 14418 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14419 14419 "create property for SAS target %d lun %d "
14420 14420 "(port-wwn)", target, lun);
14421 14421 ndi_rtn = NDI_FAILURE;
14422 14422 goto phys_create_done;
14423 14423 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14424 14424 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14425 14425 DDI_PROP_SUCCESS)) {
14426 14426 /*
14427 14427 * Direct attached SATA device without DeviceName
14428 14428 */
14429 14429 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14430 14430 "create property for SAS target %d lun %d "
14431 14431 "(sata-phy)", target, lun);
14432 14432 ndi_rtn = NDI_FAILURE;
14433 14433 goto phys_create_done;
14434 14434 }
14435 14435
14436 14436 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14437 14437 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14438 14438 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14439 14439 "create property for SAS target %d lun %d"
14440 14440 " (SAS_PROP)", target, lun);
14441 14441 ndi_rtn = NDI_FAILURE;
14442 14442 goto phys_create_done;
14443 14443 }
14444 14444 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14445 14445 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14446 14446 mptsas_log(mpt, CE_WARN, "mptsas unable "
|
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
14447 14447 "to create guid property for target %d "
14448 14448 "lun %d", target, lun);
14449 14449 ndi_rtn = NDI_FAILURE;
14450 14450 goto phys_create_done;
14451 14451 }
14452 14452
14453 14453 /*
14454 14454 * The following code is to set properties for SM-HBA support,
14455 14455 * it doesn't apply to RAID volumes
14456 14456 */
14457 - if (ptgt->m_phymask == 0)
14457 + if (ptgt->m_addr.mta_phymask == 0)
14458 14458 goto phys_raid_lun;
14459 14459
14460 14460 mutex_enter(&mpt->m_mutex);
14461 14461
14462 14462 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14463 14463 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14464 14464 (uint32_t)ptgt->m_devhdl;
14465 14465 rval = mptsas_get_sas_device_page0(mpt, page_address,
14466 14466 &dev_hdl, &dev_sas_wwn, &dev_info,
14467 14467 &physport, &phy_id, &pdev_hdl,
14468 14468 &bay_num, &enclosure);
14469 14469 if (rval != DDI_SUCCESS) {
14470 14470 mutex_exit(&mpt->m_mutex);
14471 14471 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14472 14472 "parent device for handle %d.", page_address);
14473 14473 ndi_rtn = NDI_FAILURE;
14474 14474 goto phys_create_done;
14475 14475 }
14476 14476
14477 14477 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14478 14478 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14479 14479 rval = mptsas_get_sas_device_page0(mpt, page_address,
14480 14480 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14481 14481 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14482 14482 if (rval != DDI_SUCCESS) {
14483 14483 mutex_exit(&mpt->m_mutex);
14484 14484 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14485 14485 "device for handle %d.", page_address);
14486 14486 ndi_rtn = NDI_FAILURE;
14487 14487 goto phys_create_done;
14488 14488 }
14489 14489
14490 14490 mutex_exit(&mpt->m_mutex);
14491 14491
14492 14492 /*
14493 14493 * If this device direct attached to the controller
14494 14494 * set the attached-port to the base wwid
14495 14495 */
14496 14496 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14497 14497 != DEVINFO_DIRECT_ATTACHED) {
14498 14498 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14499 14499 pdev_sas_wwn);
14500 14500 } else {
14501 14501 /*
14502 14502 * Update the iport's attached-port to guid
14503 14503 */
14504 14504 if (sas_wwn == 0) {
14505 14505 (void) sprintf(wwn_str, "p%x", phy);
14506 14506 } else {
14507 14507 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14508 14508 }
14509 14509 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14510 14510 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14511 14511 DDI_PROP_SUCCESS) {
14512 14512 mptsas_log(mpt, CE_WARN,
14513 14513 "mptsas unable to create "
14514 14514 "property for iport target-port"
14515 14515 " %s (sas_wwn)",
14516 14516 wwn_str);
14517 14517 ndi_rtn = NDI_FAILURE;
14518 14518 goto phys_create_done;
14519 14519 }
14520 14520
14521 14521 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14522 14522 mpt->un.m_base_wwid);
14523 14523 }
14524 14524
14525 14525 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14526 14526 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14527 14527 DDI_PROP_SUCCESS) {
14528 14528 mptsas_log(mpt, CE_WARN,
14529 14529 "mptsas unable to create "
14530 14530 "property for iport attached-port %s (sas_wwn)",
14531 14531 attached_wwn_str);
14532 14532 ndi_rtn = NDI_FAILURE;
14533 14533 goto phys_create_done;
14534 14534 }
14535 14535
14536 14536 if (IS_SATA_DEVICE(dev_info)) {
14537 14537 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14538 14538 *lun_dip, MPTSAS_VARIANT, "sata") !=
14539 14539 DDI_PROP_SUCCESS) {
14540 14540 mptsas_log(mpt, CE_WARN,
14541 14541 "mptsas unable to create "
14542 14542 "property for device variant ");
14543 14543 ndi_rtn = NDI_FAILURE;
14544 14544 goto phys_create_done;
14545 14545 }
14546 14546 }
14547 14547
14548 14548 if (IS_ATAPI_DEVICE(dev_info)) {
14549 14549 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14550 14550 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14551 14551 DDI_PROP_SUCCESS) {
14552 14552 mptsas_log(mpt, CE_WARN,
14553 14553 "mptsas unable to create "
14554 14554 "property for device variant ");
14555 14555 ndi_rtn = NDI_FAILURE;
14556 14556 goto phys_create_done;
14557 14557 }
14558 14558 }
14559 14559
14560 14560 phys_raid_lun:
14561 14561 /*
14562 14562 * if this is a SAS controller, and the target is a SATA
14563 14563 * drive, set the 'pm-capable' property for sd and if on
14564 14564 * an OPL platform, also check if this is an ATAPI
14565 14565 * device.
14566 14566 */
14567 14567 instance = ddi_get_instance(mpt->m_dip);
14568 14568 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14569 14569 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14570 14570 NDBG2(("mptsas%d: creating pm-capable property, "
14571 14571 "target %d", instance, target));
14572 14572
14573 14573 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14574 14574 *lun_dip, "pm-capable", 1)) !=
14575 14575 DDI_PROP_SUCCESS) {
14576 14576 mptsas_log(mpt, CE_WARN, "mptsas "
14577 14577 "failed to create pm-capable "
14578 14578 "property, target %d", target);
14579 14579 ndi_rtn = NDI_FAILURE;
14580 14580 goto phys_create_done;
14581 14581 }
14582 14582
14583 14583 }
14584 14584
14585 14585 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14586 14586 /*
14587 14587 * add 'obp-path' properties for devinfo
14588 14588 */
14589 14589 bzero(wwn_str, sizeof (wwn_str));
14590 14590 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14591 14591 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14592 14592 if (guid) {
14593 14593 (void) snprintf(component, MAXPATHLEN,
14594 14594 "disk@w%s,%x", wwn_str, lun);
14595 14595 } else {
14596 14596 (void) snprintf(component, MAXPATHLEN,
14597 14597 "disk@p%x,%x", phy, lun);
14598 14598 }
14599 14599 if (ddi_pathname_obp_set(*lun_dip, component)
14600 14600 != DDI_SUCCESS) {
|
↓ open down ↓ |
133 lines elided |
↑ open up ↑ |
14601 14601 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14602 14602 "unable to set obp-path for SAS "
14603 14603 "object %s", component);
14604 14604 ndi_rtn = NDI_FAILURE;
14605 14605 goto phys_create_done;
14606 14606 }
14607 14607 }
14608 14608 /*
14609 14609 * Create the phy-num property for non-raid disk
14610 14610 */
14611 - if (ptgt->m_phymask != 0) {
14611 + if (ptgt->m_addr.mta_phymask != 0) {
14612 14612 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14613 14613 *lun_dip, "phy-num", ptgt->m_phynum) !=
14614 14614 DDI_PROP_SUCCESS) {
14615 14615 mptsas_log(mpt, CE_WARN, "mptsas driver "
14616 14616 "failed to create phy-num property for "
14617 14617 "target %d", target);
14618 14618 ndi_rtn = NDI_FAILURE;
14619 14619 goto phys_create_done;
14620 14620 }
14621 14621 }
14622 14622 phys_create_done:
14623 14623 /*
14624 14624 * If props were setup ok, online the lun
14625 14625 */
14626 14626 if (ndi_rtn == NDI_SUCCESS) {
14627 14627 /*
14628 14628 * Try to online the new node
14629 14629 */
14630 14630 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14631 14631 }
14632 14632 if (ndi_rtn == NDI_SUCCESS) {
14633 14633 mutex_enter(&mpt->m_mutex);
14634 14634 ptgt->m_led_status = 0;
14635 14635 (void) mptsas_flush_led_status(mpt, ptgt);
14636 14636 mutex_exit(&mpt->m_mutex);
14637 14637 }
14638 14638
14639 14639 /*
14640 14640 * If success set rtn flag, else unwire alloc'd lun
14641 14641 */
14642 14642 if (ndi_rtn != NDI_SUCCESS) {
14643 14643 NDBG12(("mptsas driver unable to online "
14644 14644 "target %d lun %d", target, lun));
14645 14645 ndi_prop_remove_all(*lun_dip);
14646 14646 (void) ndi_devi_free(*lun_dip);
14647 14647 *lun_dip = NULL;
14648 14648 }
14649 14649 }
14650 14650
14651 14651 scsi_hba_nodename_compatible_free(nodename, compatible);
14652 14652
14653 14653 if (wwn_str != NULL) {
14654 14654 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14655 14655 }
14656 14656 if (component != NULL) {
14657 14657 kmem_free(component, MAXPATHLEN);
14658 14658 }
14659 14659
14660 14660
14661 14661 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14662 14662 }
14663 14663
14664 14664 static int
14665 14665 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14666 14666 {
14667 14667 mptsas_t *mpt = DIP2MPT(pdip);
14668 14668 struct smp_device smp_sd;
14669 14669
14670 14670 /* XXX An HBA driver should not be allocating an smp_device. */
14671 14671 bzero(&smp_sd, sizeof (struct smp_device));
14672 14672 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14673 14673 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14674 14674
14675 14675 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14676 14676 return (NDI_FAILURE);
14677 14677 return (NDI_SUCCESS);
14678 14678 }
14679 14679
14680 14680 static int
14681 14681 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14682 14682 {
14683 14683 mptsas_t *mpt = DIP2MPT(pdip);
14684 14684 mptsas_smp_t *psmp = NULL;
14685 14685 int rval;
14686 14686 int phymask;
14687 14687
14688 14688 /*
14689 14689 * Get the physical port associated to the iport
14690 14690 * PHYMASK TODO
14691 14691 */
14692 14692 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14693 14693 "phymask", 0);
14694 14694 /*
14695 14695 * Find the smp node in hash table with specified sas address and
14696 14696 * physical port
14697 14697 */
14698 14698 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14699 14699 if (psmp == NULL) {
14700 14700 return (DDI_FAILURE);
14701 14701 }
14702 14702
14703 14703 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14704 14704
14705 14705 return (rval);
14706 14706 }
14707 14707
14708 14708 static int
14709 14709 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14710 14710 dev_info_t **smp_dip)
14711 14711 {
14712 14712 char wwn_str[MPTSAS_WWN_STRLEN];
14713 14713 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14714 14714 int ndi_rtn = NDI_FAILURE;
14715 14715 int rval = 0;
14716 14716 mptsas_smp_t dev_info;
14717 14717 uint32_t page_address;
14718 14718 mptsas_t *mpt = DIP2MPT(pdip);
14719 14719 uint16_t dev_hdl;
14720 14720 uint64_t sas_wwn;
14721 14721 uint64_t smp_sas_wwn;
14722 14722 uint8_t physport;
|
↓ open down ↓ |
101 lines elided |
↑ open up ↑ |
14723 14723 uint8_t phy_id;
14724 14724 uint16_t pdev_hdl;
14725 14725 uint8_t numphys = 0;
14726 14726 uint16_t i = 0;
14727 14727 char phymask[MPTSAS_MAX_PHYS];
14728 14728 char *iport = NULL;
14729 14729 mptsas_phymask_t phy_mask = 0;
14730 14730 uint16_t attached_devhdl;
14731 14731 uint16_t bay_num, enclosure;
14732 14732
14733 - (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14733 + (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14734 14734
14735 14735 /*
14736 14736 * Probe smp device, prevent the node of removed device from being
14737 14737 * configured succesfully
14738 14738 */
14739 - if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
14739 + if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
14740 14740 return (DDI_FAILURE);
14741 14741 }
14742 14742
14743 14743 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14744 14744 return (DDI_SUCCESS);
14745 14745 }
14746 14746
14747 14747 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14748 14748
14749 14749 /*
14750 14750 * if lun alloc success, set props
14751 14751 */
14752 14752 if (ndi_rtn == NDI_SUCCESS) {
14753 14753 /*
14754 14754 * Set the flavor of the child to be SMP flavored
14755 14755 */
14756 14756 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
14757 14757
14758 14758 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14759 14759 *smp_dip, SMP_WWN, wwn_str) !=
14760 14760 DDI_PROP_SUCCESS) {
14761 14761 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14762 14762 "property for smp device %s (sas_wwn)",
14763 14763 wwn_str);
14764 14764 ndi_rtn = NDI_FAILURE;
14765 14765 goto smp_create_done;
14766 14766 }
14767 - (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
14767 + (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
14768 14768 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14769 14769 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14770 14770 DDI_PROP_SUCCESS) {
14771 14771 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14772 14772 "property for iport target-port %s (sas_wwn)",
14773 14773 wwn_str);
14774 14774 ndi_rtn = NDI_FAILURE;
14775 14775 goto smp_create_done;
14776 14776 }
14777 14777
14778 14778 mutex_enter(&mpt->m_mutex);
14779 14779
14780 14780 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14781 14781 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14782 14782 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14783 14783 &dev_info);
14784 14784 if (rval != DDI_SUCCESS) {
14785 14785 mutex_exit(&mpt->m_mutex);
14786 14786 mptsas_log(mpt, CE_WARN,
14787 14787 "mptsas unable to get expander "
14788 14788 "parent device info for %x", page_address);
14789 14789 ndi_rtn = NDI_FAILURE;
14790 14790 goto smp_create_done;
14791 14791 }
14792 14792
14793 14793 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14794 14794 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14795 14795 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14796 14796 (uint32_t)dev_info.m_pdevhdl;
14797 14797 rval = mptsas_get_sas_device_page0(mpt, page_address,
14798 14798 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14799 14799 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14800 14800 if (rval != DDI_SUCCESS) {
14801 14801 mutex_exit(&mpt->m_mutex);
14802 14802 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14803 14803 "device info for %x", page_address);
14804 14804 ndi_rtn = NDI_FAILURE;
14805 14805 goto smp_create_done;
14806 14806 }
14807 14807
14808 14808 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14809 14809 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14810 14810 (uint32_t)dev_info.m_devhdl;
14811 14811 rval = mptsas_get_sas_device_page0(mpt, page_address,
14812 14812 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14813 14813 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14814 14814 if (rval != DDI_SUCCESS) {
14815 14815 mutex_exit(&mpt->m_mutex);
14816 14816 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14817 14817 "device info for %x", page_address);
14818 14818 ndi_rtn = NDI_FAILURE;
14819 14819 goto smp_create_done;
14820 14820 }
14821 14821 mutex_exit(&mpt->m_mutex);
14822 14822
14823 14823 /*
14824 14824 * If this smp direct attached to the controller
14825 14825 * set the attached-port to the base wwid
14826 14826 */
14827 14827 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14828 14828 != DEVINFO_DIRECT_ATTACHED) {
14829 14829 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14830 14830 sas_wwn);
14831 14831 } else {
14832 14832 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14833 14833 mpt->un.m_base_wwid);
14834 14834 }
14835 14835
14836 14836 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14837 14837 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14838 14838 DDI_PROP_SUCCESS) {
14839 14839 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14840 14840 "property for smp attached-port %s (sas_wwn)",
14841 14841 attached_wwn_str);
14842 14842 ndi_rtn = NDI_FAILURE;
14843 14843 goto smp_create_done;
14844 14844 }
14845 14845
14846 14846 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14847 14847 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14848 14848 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14849 14849 "create property for SMP %s (SMP_PROP) ",
14850 14850 wwn_str);
14851 14851 ndi_rtn = NDI_FAILURE;
14852 14852 goto smp_create_done;
14853 14853 }
14854 14854
14855 14855 /*
14856 14856 * check the smp to see whether it direct
14857 14857 * attached to the controller
14858 14858 */
14859 14859 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14860 14860 != DEVINFO_DIRECT_ATTACHED) {
14861 14861 goto smp_create_done;
14862 14862 }
14863 14863 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14864 14864 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14865 14865 if (numphys > 0) {
14866 14866 goto smp_create_done;
14867 14867 }
14868 14868 /*
14869 14869 * this iport is an old iport, we need to
14870 14870 * reconfig the props for it.
14871 14871 */
14872 14872 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14873 14873 MPTSAS_VIRTUAL_PORT, 0) !=
14874 14874 DDI_PROP_SUCCESS) {
14875 14875 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14876 14876 MPTSAS_VIRTUAL_PORT);
14877 14877 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14878 14878 "prop update failed");
14879 14879 goto smp_create_done;
14880 14880 }
14881 14881
14882 14882 mutex_enter(&mpt->m_mutex);
14883 14883 numphys = 0;
14884 14884 iport = ddi_get_name_addr(pdip);
14885 14885 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14886 14886 bzero(phymask, sizeof (phymask));
14887 14887 (void) sprintf(phymask,
14888 14888 "%x", mpt->m_phy_info[i].phy_mask);
14889 14889 if (strcmp(phymask, iport) == 0) {
14890 14890 phy_mask = mpt->m_phy_info[i].phy_mask;
14891 14891 break;
14892 14892 }
14893 14893 }
14894 14894
14895 14895 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14896 14896 if ((phy_mask >> i) & 0x01) {
14897 14897 numphys++;
14898 14898 }
14899 14899 }
14900 14900 /*
14901 14901 * Update PHY info for smhba
14902 14902 */
14903 14903 if (mptsas_smhba_phy_init(mpt)) {
14904 14904 mutex_exit(&mpt->m_mutex);
14905 14905 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14906 14906 "failed");
14907 14907 goto smp_create_done;
14908 14908 }
14909 14909 mutex_exit(&mpt->m_mutex);
14910 14910
14911 14911 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
14912 14912 &attached_devhdl);
14913 14913
14914 14914 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14915 14915 MPTSAS_NUM_PHYS, numphys) !=
14916 14916 DDI_PROP_SUCCESS) {
14917 14917 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14918 14918 MPTSAS_NUM_PHYS);
14919 14919 mptsas_log(mpt, CE_WARN, "mptsas update "
14920 14920 "num phys props failed");
14921 14921 goto smp_create_done;
14922 14922 }
14923 14923 /*
14924 14924 * Add parent's props for SMHBA support
14925 14925 */
14926 14926 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14927 14927 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14928 14928 DDI_PROP_SUCCESS) {
14929 14929 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14930 14930 SCSI_ADDR_PROP_ATTACHED_PORT);
14931 14931 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14932 14932 "attached-port failed");
14933 14933 goto smp_create_done;
14934 14934 }
14935 14935
14936 14936 smp_create_done:
14937 14937 /*
14938 14938 * If props were setup ok, online the lun
14939 14939 */
14940 14940 if (ndi_rtn == NDI_SUCCESS) {
14941 14941 /*
14942 14942 * Try to online the new node
14943 14943 */
14944 14944 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14945 14945 }
14946 14946
14947 14947 /*
14948 14948 * If success set rtn flag, else unwire alloc'd lun
14949 14949 */
14950 14950 if (ndi_rtn != NDI_SUCCESS) {
14951 14951 NDBG12(("mptsas unable to online "
14952 14952 "SMP target %s", wwn_str));
14953 14953 ndi_prop_remove_all(*smp_dip);
14954 14954 (void) ndi_devi_free(*smp_dip);
14955 14955 }
14956 14956 }
14957 14957
14958 14958 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14959 14959 }
14960 14960
14961 14961 /* smp transport routine */
14962 14962 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14963 14963 {
14964 14964 uint64_t wwn;
14965 14965 Mpi2SmpPassthroughRequest_t req;
14966 14966 Mpi2SmpPassthroughReply_t rep;
14967 14967 uint32_t direction = 0;
14968 14968 mptsas_t *mpt;
14969 14969 int ret;
14970 14970 uint64_t tmp64;
14971 14971
14972 14972 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14973 14973 smp_a_hba_tran->smp_tran_hba_private;
14974 14974
14975 14975 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14976 14976 /*
14977 14977 * Need to compose a SMP request message
14978 14978 * and call mptsas_do_passthru() function
14979 14979 */
14980 14980 bzero(&req, sizeof (req));
14981 14981 bzero(&rep, sizeof (rep));
14982 14982 req.PassthroughFlags = 0;
14983 14983 req.PhysicalPort = 0xff;
14984 14984 req.ChainOffset = 0;
14985 14985 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14986 14986
14987 14987 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14988 14988 smp_pkt->smp_pkt_reason = ERANGE;
14989 14989 return (DDI_FAILURE);
14990 14990 }
14991 14991 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14992 14992
14993 14993 req.MsgFlags = 0;
14994 14994 tmp64 = LE_64(wwn);
14995 14995 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14996 14996 if (smp_pkt->smp_pkt_rspsize > 0) {
14997 14997 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14998 14998 }
14999 14999 if (smp_pkt->smp_pkt_reqsize > 0) {
15000 15000 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15001 15001 }
15002 15002
15003 15003 mutex_enter(&mpt->m_mutex);
15004 15004 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15005 15005 (uint8_t *)smp_pkt->smp_pkt_rsp,
15006 15006 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15007 15007 smp_pkt->smp_pkt_rspsize - 4, direction,
15008 15008 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15009 15009 smp_pkt->smp_pkt_timeout, FKIOCTL);
15010 15010 mutex_exit(&mpt->m_mutex);
15011 15011 if (ret != 0) {
15012 15012 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15013 15013 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15014 15014 return (DDI_FAILURE);
15015 15015 }
15016 15016 /* do passthrough success, check the smp status */
15017 15017 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15018 15018 switch (LE_16(rep.IOCStatus)) {
15019 15019 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15020 15020 smp_pkt->smp_pkt_reason = ENODEV;
15021 15021 break;
15022 15022 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15023 15023 smp_pkt->smp_pkt_reason = EOVERFLOW;
15024 15024 break;
15025 15025 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15026 15026 smp_pkt->smp_pkt_reason = EIO;
15027 15027 break;
15028 15028 default:
15029 15029 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15030 15030 "status:%x", LE_16(rep.IOCStatus));
15031 15031 smp_pkt->smp_pkt_reason = EIO;
15032 15032 break;
15033 15033 }
15034 15034 return (DDI_FAILURE);
15035 15035 }
15036 15036 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15037 15037 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15038 15038 rep.SASStatus);
15039 15039 smp_pkt->smp_pkt_reason = EIO;
15040 15040 return (DDI_FAILURE);
|
↓ open down ↓ |
263 lines elided |
↑ open up ↑ |
15041 15041 }
15042 15042
15043 15043 return (DDI_SUCCESS);
15044 15044 }
15045 15045
15046 15046 /*
15047 15047 * If we didn't get a match, we need to get sas page0 for each device, and
15048 15048 * untill we get a match. If failed, return NULL
15049 15049 */
15050 15050 static mptsas_target_t *
15051 -mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
15051 +mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
15052 15052 {
15053 15053 int i, j = 0;
15054 15054 int rval = 0;
15055 15055 uint16_t cur_handle;
15056 15056 uint32_t page_address;
15057 15057 mptsas_target_t *ptgt = NULL;
15058 15058
15059 15059 /*
15060 15060 * PHY named device must be direct attached and attaches to
15061 15061 * narrow port, if the iport is not parent of the device which
15062 15062 * we are looking for.
15063 15063 */
15064 15064 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15065 15065 if ((1 << i) & phymask)
15066 15066 j++;
15067 15067 }
15068 15068
15069 15069 if (j > 1)
15070 15070 return (NULL);
15071 15071
15072 15072 /*
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
15073 15073 * Must be a narrow port and single device attached to the narrow port
15074 15074 * So the physical port num of device which is equal to the iport's
15075 15075 * port num is the device what we are looking for.
15076 15076 */
15077 15077
15078 15078 if (mpt->m_phy_info[phy].phy_mask != phymask)
15079 15079 return (NULL);
15080 15080
15081 15081 mutex_enter(&mpt->m_mutex);
15082 15082
15083 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
15084 - MPTSAS_HASH_FIRST);
15085 - while (ptgt != NULL) {
15086 - if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15087 - mutex_exit(&mpt->m_mutex);
15088 - return (ptgt);
15089 - }
15090 -
15091 - ptgt = (mptsas_target_t *)mptsas_hash_traverse(
15092 - &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
15083 + ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
15084 + &phy);
15085 + if (ptgt != NULL) {
15086 + mutex_exit(&mpt->m_mutex);
15087 + return (ptgt);
15093 15088 }
15094 15089
15095 15090 if (mpt->m_done_traverse_dev) {
15096 15091 mutex_exit(&mpt->m_mutex);
15097 15092 return (NULL);
15098 15093 }
15099 15094
15100 15095 /* If didn't get a match, come here */
15101 15096 cur_handle = mpt->m_dev_handle;
15102 15097 for (; ; ) {
15103 15098 ptgt = NULL;
15104 15099 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15105 15100 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15106 15101 rval = mptsas_get_target_device_info(mpt, page_address,
15107 15102 &cur_handle, &ptgt);
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
15108 15103 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15109 15104 (rval == DEV_INFO_FAIL_ALLOC)) {
15110 15105 break;
15111 15106 }
15112 15107 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15113 15108 (rval == DEV_INFO_PHYS_DISK)) {
15114 15109 continue;
15115 15110 }
15116 15111 mpt->m_dev_handle = cur_handle;
15117 15112
15118 - if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15113 + if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
15119 15114 break;
15120 15115 }
15121 15116 }
15122 15117
15123 15118 mutex_exit(&mpt->m_mutex);
15124 15119 return (ptgt);
15125 15120 }
15126 15121
15127 15122 /*
15128 - * The ptgt->m_sas_wwn contains the wwid for each disk.
15123 + * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
15129 15124 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15130 15125 * If we didn't get a match, we need to get sas page0 for each device, and
15131 15126 * untill we get a match
15132 15127 * If failed, return NULL
15133 15128 */
15134 15129 static mptsas_target_t *
15135 -mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
15130 +mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15136 15131 {
15137 15132 int rval = 0;
15138 15133 uint16_t cur_handle;
15139 15134 uint32_t page_address;
15140 15135 mptsas_target_t *tmp_tgt = NULL;
15136 + mptsas_target_addr_t addr;
15141 15137
15138 + addr.mta_wwn = wwid;
15139 + addr.mta_phymask = phymask;
15142 15140 mutex_enter(&mpt->m_mutex);
15143 - tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15144 - &mpt->m_active->m_tgttbl, wwid, phymask);
15141 + tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15145 15142 if (tmp_tgt != NULL) {
15146 15143 mutex_exit(&mpt->m_mutex);
15147 15144 return (tmp_tgt);
15148 15145 }
15149 15146
15150 15147 if (phymask == 0) {
15151 15148 /*
15152 15149 * It's IR volume
15153 15150 */
15154 15151 rval = mptsas_get_raid_info(mpt);
15155 15152 if (rval) {
15156 - tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15157 - &mpt->m_active->m_tgttbl, wwid, phymask);
15153 + tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15158 15154 }
15159 15155 mutex_exit(&mpt->m_mutex);
15160 15156 return (tmp_tgt);
15161 15157 }
15162 15158
15163 15159 if (mpt->m_done_traverse_dev) {
15164 15160 mutex_exit(&mpt->m_mutex);
15165 15161 return (NULL);
15166 15162 }
15167 15163
15168 15164 /* If didn't get a match, come here */
15169 15165 cur_handle = mpt->m_dev_handle;
15170 - for (; ; ) {
15166 + for (;;) {
15171 15167 tmp_tgt = NULL;
15172 15168 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15173 15169 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15174 15170 rval = mptsas_get_target_device_info(mpt, page_address,
15175 15171 &cur_handle, &tmp_tgt);
15176 15172 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15177 15173 (rval == DEV_INFO_FAIL_ALLOC)) {
15178 15174 tmp_tgt = NULL;
15179 15175 break;
15180 15176 }
15181 15177 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15182 15178 (rval == DEV_INFO_PHYS_DISK)) {
15183 15179 continue;
15184 15180 }
15185 15181 mpt->m_dev_handle = cur_handle;
15186 - if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15187 - (tmp_tgt->m_phymask == phymask)) {
15182 + if ((tmp_tgt->m_addr.mta_wwn) &&
15183 + (tmp_tgt->m_addr.mta_wwn == wwid) &&
15184 + (tmp_tgt->m_addr.mta_phymask == phymask)) {
15188 15185 break;
15189 15186 }
15190 15187 }
15191 15188
15192 15189 mutex_exit(&mpt->m_mutex);
15193 15190 return (tmp_tgt);
15194 15191 }
15195 15192
15196 15193 static mptsas_smp_t *
15197 -mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15194 +mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15198 15195 {
15199 15196 int rval = 0;
15200 15197 uint16_t cur_handle;
15201 15198 uint32_t page_address;
15202 15199 mptsas_smp_t smp_node, *psmp = NULL;
15200 + mptsas_target_addr_t addr;
15203 15201
15202 + addr.mta_wwn = wwid;
15203 + addr.mta_phymask = phymask;
15204 15204 mutex_enter(&mpt->m_mutex);
15205 - psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15206 - wwid, phymask);
15205 + psmp = refhash_lookup(mpt->m_smp_targets, &addr);
15207 15206 if (psmp != NULL) {
15208 15207 mutex_exit(&mpt->m_mutex);
15209 15208 return (psmp);
15210 15209 }
15211 15210
15212 15211 if (mpt->m_done_traverse_smp) {
15213 15212 mutex_exit(&mpt->m_mutex);
15214 15213 return (NULL);
15215 15214 }
15216 15215
15217 15216 /* If didn't get a match, come here */
15218 15217 cur_handle = mpt->m_smp_devhdl;
15219 - for (; ; ) {
15218 + for (;;) {
15220 15219 psmp = NULL;
15221 15220 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15222 15221 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15223 15222 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15224 15223 &smp_node);
15225 15224 if (rval != DDI_SUCCESS) {
15226 15225 break;
15227 15226 }
15228 15227 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15229 - psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15228 + psmp = mptsas_smp_alloc(mpt, &smp_node);
15230 15229 ASSERT(psmp);
15231 - if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15232 - (psmp->m_phymask == phymask)) {
15230 + if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
15231 + (psmp->m_addr.mta_phymask == phymask)) {
15233 15232 break;
15234 15233 }
15235 15234 }
15236 15235
15237 15236 mutex_exit(&mpt->m_mutex);
15238 15237 return (psmp);
15239 15238 }
15240 15239
15241 -/* helper functions using hash */
15242 -
15243 -/*
15244 - * Can't have duplicate entries for same devhdl,
15245 - * if there are invalid entries, the devhdl should be set to 0xffff
15246 - */
15247 -static void *
15248 -mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15249 -{
15250 - mptsas_hash_data_t *data;
15251 -
15252 - data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15253 - while (data != NULL) {
15254 - if (data->devhdl == devhdl) {
15255 - break;
15256 - }
15257 - data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15258 - }
15259 - return (data);
15260 -}
15261 -
15262 15240 mptsas_target_t *
15263 -mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15241 +mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
15264 15242 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15265 15243 {
15266 15244 mptsas_target_t *tmp_tgt = NULL;
15245 + mptsas_target_addr_t addr;
15267 15246
15268 - tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15247 + addr.mta_wwn = wwid;
15248 + addr.mta_phymask = phymask;
15249 + tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
15269 15250 if (tmp_tgt != NULL) {
15270 15251 NDBG20(("Hash item already exist"));
15271 15252 tmp_tgt->m_deviceinfo = devinfo;
15272 - tmp_tgt->m_devhdl = devhdl;
15253 + tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
15273 15254 return (tmp_tgt);
15274 15255 }
15275 15256 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15276 15257 if (tmp_tgt == NULL) {
15277 15258 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15278 15259 return (NULL);
15279 15260 }
15280 15261 tmp_tgt->m_devhdl = devhdl;
15281 - tmp_tgt->m_sas_wwn = wwid;
15262 + tmp_tgt->m_addr.mta_wwn = wwid;
15282 15263 tmp_tgt->m_deviceinfo = devinfo;
15283 - tmp_tgt->m_phymask = phymask;
15264 + tmp_tgt->m_addr.mta_phymask = phymask;
15284 15265 tmp_tgt->m_phynum = phynum;
15285 15266 /* Initialized the tgt structure */
15286 15267 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15287 15268 tmp_tgt->m_qfull_retry_interval =
15288 15269 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15289 15270 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15290 15271
15291 - mptsas_hash_add(hashtab, tmp_tgt);
15272 + refhash_insert(mpt->m_targets, tmp_tgt);
15292 15273
15293 15274 return (tmp_tgt);
15294 15275 }
15295 15276
15296 15277 static void
15297 -mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15298 - mptsas_phymask_t phymask)
15278 +mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
15299 15279 {
15300 - mptsas_target_t *tmp_tgt;
15301 - tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15302 - if (tmp_tgt == NULL) {
15303 - cmn_err(CE_WARN, "Tgt not found, nothing to free");
15304 - } else {
15305 - kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15306 - }
15280 + dst->m_devhdl = src->m_devhdl;
15281 + dst->m_deviceinfo = src->m_deviceinfo;
15282 + dst->m_pdevhdl = src->m_pdevhdl;
15283 + dst->m_pdevinfo = src->m_pdevinfo;
15307 15284 }
15308 15285
15309 -/*
15310 - * Return the entry in the hash table
15311 - */
15312 15286 static mptsas_smp_t *
15313 -mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15287 +mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
15314 15288 {
15315 - uint64_t key1 = data->m_sasaddr;
15316 - mptsas_phymask_t key2 = data->m_phymask;
15289 + mptsas_target_addr_t addr;
15317 15290 mptsas_smp_t *ret_data;
15318 15291
15319 - ret_data = mptsas_hash_search(hashtab, key1, key2);
15292 + addr.mta_wwn = data->m_addr.mta_wwn;
15293 + addr.mta_phymask = data->m_addr.mta_phymask;
15294 + ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
15295 + /*
15296 + * If there's already a matching SMP target, update its fields
15297 + * in place. Since the address is not changing, it's safe to do
15298 + * this. We cannot just bcopy() here because the structure we've
15299 + * been given has invalid hash links.
15300 + */
15320 15301 if (ret_data != NULL) {
15321 - bcopy(data, ret_data, sizeof (mptsas_smp_t));
15302 + mptsas_smp_target_copy(data, ret_data);
15322 15303 return (ret_data);
15323 15304 }
15324 15305
15325 15306 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15326 15307 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15327 - mptsas_hash_add(hashtab, ret_data);
15308 + refhash_insert(mpt->m_smp_targets, ret_data);
15328 15309 return (ret_data);
15329 15310 }
15330 15311
15331 -static void
15332 -mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15333 - mptsas_phymask_t phymask)
15334 -{
15335 - mptsas_smp_t *tmp_smp;
15336 - tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15337 - if (tmp_smp == NULL) {
15338 - cmn_err(CE_WARN, "Smp element not found, nothing to free");
15339 - } else {
15340 - kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15341 - }
15342 -}
15343 -
15344 15312 /*
15345 - * Hash operation functions
15346 - * key1 is the sas_wwn, key2 is the phymask
15347 - */
15348 -static void
15349 -mptsas_hash_init(mptsas_hash_table_t *hashtab)
15350 -{
15351 - if (hashtab == NULL) {
15352 - return;
15353 - }
15354 - bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15355 - MPTSAS_HASH_ARRAY_SIZE);
15356 - hashtab->cur = NULL;
15357 - hashtab->line = 0;
15358 -}
15359 -
15360 -static void
15361 -mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15362 -{
15363 - uint16_t line = 0;
15364 - mptsas_hash_node_t *cur = NULL, *last = NULL;
15365 -
15366 - if (hashtab == NULL) {
15367 - return;
15368 - }
15369 - for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15370 - cur = hashtab->head[line];
15371 - while (cur != NULL) {
15372 - last = cur;
15373 - cur = cur->next;
15374 - kmem_free(last->data, datalen);
15375 - kmem_free(last, sizeof (mptsas_hash_node_t));
15376 - }
15377 - }
15378 -}
15379 -
15380 -/*
15381 - * You must guarantee the element doesn't exist in the hash table
15382 - * before you call mptsas_hash_add()
15383 - */
15384 -static void
15385 -mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15386 -{
15387 - uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15388 - mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15389 - mptsas_hash_node_t **head = NULL;
15390 - mptsas_hash_node_t *node = NULL;
15391 -
15392 - if (hashtab == NULL) {
15393 - return;
15394 - }
15395 - ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15396 - node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15397 - node->data = data;
15398 -
15399 - head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15400 - if (*head == NULL) {
15401 - *head = node;
15402 - } else {
15403 - node->next = *head;
15404 - *head = node;
15405 - }
15406 -}
15407 -
15408 -static void *
15409 -mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15410 - mptsas_phymask_t key2)
15411 -{
15412 - mptsas_hash_node_t **head = NULL;
15413 - mptsas_hash_node_t *last = NULL, *cur = NULL;
15414 - mptsas_hash_data_t *data;
15415 - if (hashtab == NULL) {
15416 - return (NULL);
15417 - }
15418 - head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15419 - cur = *head;
15420 - while (cur != NULL) {
15421 - data = cur->data;
15422 - if ((data->key1 == key1) && (data->key2 == key2)) {
15423 - if (last == NULL) {
15424 - (*head) = cur->next;
15425 - } else {
15426 - last->next = cur->next;
15427 - }
15428 - kmem_free(cur, sizeof (mptsas_hash_node_t));
15429 - return (data);
15430 - } else {
15431 - last = cur;
15432 - cur = cur->next;
15433 - }
15434 - }
15435 - return (NULL);
15436 -}
15437 -
15438 -static void *
15439 -mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15440 - mptsas_phymask_t key2)
15441 -{
15442 - mptsas_hash_node_t *cur = NULL;
15443 - mptsas_hash_data_t *data;
15444 - if (hashtab == NULL) {
15445 - return (NULL);
15446 - }
15447 - cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15448 - while (cur != NULL) {
15449 - data = cur->data;
15450 - if ((data->key1 == key1) && (data->key2 == key2)) {
15451 - return (data);
15452 - } else {
15453 - cur = cur->next;
15454 - }
15455 - }
15456 - return (NULL);
15457 -}
15458 -
15459 -static void *
15460 -mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15461 -{
15462 - mptsas_hash_node_t *this = NULL;
15463 -
15464 - if (hashtab == NULL) {
15465 - return (NULL);
15466 - }
15467 -
15468 - if (pos == MPTSAS_HASH_FIRST) {
15469 - hashtab->line = 0;
15470 - hashtab->cur = NULL;
15471 - this = hashtab->head[0];
15472 - } else {
15473 - if (hashtab->cur == NULL) {
15474 - return (NULL);
15475 - } else {
15476 - this = hashtab->cur->next;
15477 - }
15478 - }
15479 -
15480 - while (this == NULL) {
15481 - hashtab->line++;
15482 - if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
15483 - /* the traverse reaches the end */
15484 - hashtab->cur = NULL;
15485 - return (NULL);
15486 - } else {
15487 - this = hashtab->head[hashtab->line];
15488 - }
15489 - }
15490 - hashtab->cur = this;
15491 - return (this->data);
15492 -}
15493 -
15494 -/*
15495 15313 * Functions for SGPIO LED support
15496 15314 */
15497 15315 static dev_info_t *
15498 15316 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15499 15317 {
15500 15318 dev_info_t *dip;
15501 15319 int prop;
15502 15320 dip = e_ddi_hold_devi_by_dev(dev, 0);
15503 15321 if (dip == NULL)
15504 15322 return (dip);
15505 15323 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15506 15324 "phymask", 0);
15507 15325 *phymask = (mptsas_phymask_t)prop;
15508 15326 ddi_release_devi(dip);
15509 15327 return (dip);
15510 15328 }
15511 15329 static mptsas_target_t *
15512 15330 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
15513 15331 {
15514 15332 uint8_t phynum;
15515 15333 uint64_t wwn;
15516 15334 int lun;
15517 15335 mptsas_target_t *ptgt = NULL;
15518 15336
15519 15337 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
15520 15338 return (NULL);
15521 15339 }
15522 15340 if (addr[0] == 'w') {
15523 15341 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
15524 15342 } else {
15525 15343 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
15526 15344 }
15527 15345 return (ptgt);
15528 15346 }
15529 15347
15530 15348 static int
15531 15349 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
15532 15350 {
15533 15351 uint32_t slotstatus = 0;
15534 15352
15535 15353 /* Build an MPI2 Slot Status based on our view of the world */
15536 15354 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
15537 15355 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
15538 15356 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
15539 15357 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
15540 15358 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
15541 15359 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
15542 15360
15543 15361 /* Write it to the controller */
15544 15362 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
15545 15363 slotstatus, ptgt->m_slot_num));
15546 15364 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
15547 15365 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
15548 15366 }
15549 15367
15550 15368 /*
15551 15369 * send sep request, use enclosure/slot addressing
15552 15370 */
15553 15371 static int
15554 15372 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
15555 15373 uint32_t *status, uint8_t act)
15556 15374 {
15557 15375 Mpi2SepRequest_t req;
15558 15376 Mpi2SepReply_t rep;
15559 15377 int ret;
15560 15378
15561 15379 ASSERT(mutex_owned(&mpt->m_mutex));
15562 15380
15563 15381 /*
15564 15382 * We only support SEP control of directly-attached targets, in which
15565 15383 * case the "SEP" we're talking to is a virtual one contained within
15566 15384 * the HBA itself. This is necessary because DA targets typically have
|
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
15567 15385 * no other mechanism for LED control. Targets for which a separate
15568 15386 * enclosure service processor exists should be controlled via ses(7d)
15569 15387 * or sgen(7d). Furthermore, since such requests can time out, they
15570 15388 * should be made in user context rather than in response to
15571 15389 * asynchronous fabric changes.
15572 15390 *
15573 15391 * In addition, we do not support this operation for RAID volumes,
15574 15392 * since there is no slot associated with them.
15575 15393 */
15576 15394 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
15577 - ptgt->m_phymask == 0) {
15395 + ptgt->m_addr.mta_phymask == 0) {
15578 15396 return (ENOTTY);
15579 15397 }
15580 15398
15581 15399 bzero(&req, sizeof (req));
15582 15400 bzero(&rep, sizeof (rep));
15583 15401
15584 15402 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
15585 15403 req.Action = act;
15586 15404 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
15587 15405 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
15588 15406 req.Slot = LE_16(ptgt->m_slot_num);
15589 15407 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15590 15408 req.SlotStatus = LE_32(*status);
15591 15409 }
15592 15410 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
15593 15411 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
15594 15412 if (ret != 0) {
15595 15413 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
15596 15414 "Processor Request message error %d", ret);
15597 15415 return (ret);
15598 15416 }
15599 15417 /* do passthrough success, check the ioc status */
15600 15418 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15601 15419 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
15602 15420 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
15603 15421 LE_32(rep.IOCLogInfo));
15604 15422 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
15605 15423 case MPI2_IOCSTATUS_INVALID_FUNCTION:
15606 15424 case MPI2_IOCSTATUS_INVALID_VPID:
15607 15425 case MPI2_IOCSTATUS_INVALID_FIELD:
15608 15426 case MPI2_IOCSTATUS_INVALID_STATE:
15609 15427 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
15610 15428 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
15611 15429 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
15612 15430 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
15613 15431 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
15614 15432 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
15615 15433 return (EINVAL);
15616 15434 case MPI2_IOCSTATUS_BUSY:
15617 15435 return (EBUSY);
15618 15436 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
15619 15437 return (EAGAIN);
15620 15438 case MPI2_IOCSTATUS_INVALID_SGL:
15621 15439 case MPI2_IOCSTATUS_INTERNAL_ERROR:
15622 15440 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
15623 15441 default:
15624 15442 return (EIO);
15625 15443 }
15626 15444 }
15627 15445 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
15628 15446 *status = LE_32(rep.SlotStatus);
15629 15447 }
15630 15448
15631 15449 return (0);
15632 15450 }
15633 15451
15634 15452 int
15635 15453 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15636 15454 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15637 15455 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15638 15456 {
15639 15457 ddi_dma_cookie_t new_cookie;
15640 15458 size_t alloc_len;
15641 15459 uint_t ncookie;
15642 15460
15643 15461 if (cookiep == NULL)
15644 15462 cookiep = &new_cookie;
15645 15463
15646 15464 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15647 15465 NULL, dma_hdp) != DDI_SUCCESS) {
15648 15466 return (FALSE);
15649 15467 }
15650 15468
15651 15469 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15652 15470 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15653 15471 acc_hdp) != DDI_SUCCESS) {
15654 15472 ddi_dma_free_handle(dma_hdp);
15655 15473 return (FALSE);
15656 15474 }
15657 15475
15658 15476 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15659 15477 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15660 15478 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15661 15479 (void) ddi_dma_mem_free(acc_hdp);
15662 15480 ddi_dma_free_handle(dma_hdp);
15663 15481 return (FALSE);
15664 15482 }
15665 15483
15666 15484 return (TRUE);
15667 15485 }
15668 15486
15669 15487 void
15670 15488 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15671 15489 {
15672 15490 if (*dma_hdp == NULL)
15673 15491 return;
15674 15492
15675 15493 (void) ddi_dma_unbind_handle(*dma_hdp);
15676 15494 (void) ddi_dma_mem_free(acc_hdp);
15677 15495 ddi_dma_free_handle(dma_hdp);
15678 15496 }
|
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX